max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
streams/blog/migrations/0012_auto_20200928_1212.py | Engerrs/ckan.org | 1 | 9300 | <filename>streams/blog/migrations/0012_auto_20200928_1212.py
# Generated by Django 3.1.1 on 2020-09-28 12:12
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_blogpostpage_featured'),
]
operations = [
migrations.RemoveField(
model_name='blogpostpage',
name='date',
),
migrations.AddField(
model_name='blogpostpage',
name='created',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
]
| <filename>streams/blog/migrations/0012_auto_20200928_1212.py
# Generated by Django 3.1.1 on 2020-09-28 12:12
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_blogpostpage_featured'),
]
operations = [
migrations.RemoveField(
model_name='blogpostpage',
name='date',
),
migrations.AddField(
model_name='blogpostpage',
name='created',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
]
| en | 0.755204 | # Generated by Django 3.1.1 on 2020-09-28 12:12 | 1.599594 | 2 |
opac/webapp/main/views.py | rafaelpezzuto/opac | 0 | 9301 | # coding: utf-8
import logging
import requests
import mimetypes
from io import BytesIO
from urllib.parse import urlparse
from datetime import datetime, timedelta
from collections import OrderedDict
from flask_babelex import gettext as _
from flask import (
render_template,
abort,
current_app,
request,
session,
redirect,
jsonify,
url_for,
Response,
send_from_directory,
g,
make_response,
)
from werkzeug.contrib.atom import AtomFeed
from urllib.parse import urljoin
from legendarium.formatter import descriptive_short_format
from . import main
from webapp import babel
from webapp import cache
from webapp import controllers
from webapp.choices import STUDY_AREAS
from webapp.utils import utils
from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs
from webapp import forms
from webapp.config.lang_names import display_original_lang_name
from opac_schema.v1.models import Journal, Issue, Article, Collection
from lxml import etree
from packtools import HTMLGenerator
logger = logging.getLogger(__name__)
JOURNAL_UNPUBLISH = _("O periódico está indisponível por motivo de: ")
ISSUE_UNPUBLISH = _("O número está indisponível por motivo de: ")
ARTICLE_UNPUBLISH = _("O artigo está indisponível por motivo de: ")
IAHX_LANGS = dict(
p='pt',
e='es',
i='en',
)
def url_external(endpoint, **kwargs):
url = url_for(endpoint, **kwargs)
return urljoin(request.url_root, url)
class RetryableError(Exception):
"""Erro recuperável sem que seja necessário modificar o estado dos dados
na parte cliente, e.g., timeouts, erros advindos de particionamento de rede
etc.
"""
class NonRetryableError(Exception):
"""Erro do qual não pode ser recuperado sem modificar o estado dos dados
na parte cliente, e.g., recurso solicitado não exite, URI inválida etc.
"""
def fetch_data(url: str, timeout: float = 2) -> bytes:
try:
response = requests.get(url, timeout=timeout)
except (requests.ConnectionError, requests.Timeout) as exc:
raise RetryableError(exc) from exc
except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc:
raise NonRetryableError(exc) from exc
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
if 400 <= exc.response.status_code < 500:
raise NonRetryableError(exc) from exc
elif 500 <= exc.response.status_code < 600:
raise RetryableError(exc) from exc
else:
raise
return response.content
@main.before_app_request
def add_collection_to_g():
if not hasattr(g, 'collection'):
try:
collection = controllers.get_current_collection()
setattr(g, 'collection', collection)
except Exception:
# discutir o que fazer aqui
setattr(g, 'collection', {})
@main.after_request
def add_header(response):
response.headers['x-content-type-options'] = 'nosniff'
return response
@main.after_request
def add_language_code(response):
language = session.get('lang', get_locale())
response.set_cookie('language', language)
return response
@main.before_app_request
def add_forms_to_g():
setattr(g, 'email_share', forms.EmailShareForm())
setattr(g, 'email_contact', forms.ContactForm())
setattr(g, 'error', forms.ErrorForm())
@main.before_app_request
def add_scielo_org_config_to_g():
language = session.get('lang', get_locale())
scielo_org_links = {
key: url[language]
for key, url in current_app.config.get('SCIELO_ORG_URIS', {}).items()
}
setattr(g, 'scielo_org', scielo_org_links)
@babel.localeselector
def get_locale():
langs = current_app.config.get('LANGUAGES')
lang_from_headers = request.accept_languages.best_match(list(langs.keys()))
if 'lang' not in list(session.keys()):
session['lang'] = lang_from_headers
if not lang_from_headers and not session['lang']:
# Caso não seja possível detectar o idioma e não tenhamos a chave lang
# no seção, fixamos o idioma padrão.
session['lang'] = current_app.config.get('BABEL_DEFAULT_LOCALE')
return session['lang']
@main.route('/set_locale/<string:lang_code>/')
def set_locale(lang_code):
langs = current_app.config.get('LANGUAGES')
if lang_code not in list(langs.keys()):
abort(400, _('Código de idioma inválido'))
referrer = request.referrer
hash = request.args.get('hash')
if hash:
referrer += "#" + hash
# salvar o lang code na sessão
session['lang'] = lang_code
return redirect(referrer)
def get_lang_from_session():
"""
Tenta retornar o idioma da seção, caso não consiga retorna
BABEL_DEFAULT_LOCALE.
"""
try:
return session['lang']
except KeyError:
return current_app.config.get('BABEL_DEFAULT_LOCALE')
@main.route('/')
@cache.cached(key_prefix=cache_key_with_lang)
def index():
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
tweets = controllers.get_collection_tweets()
press_releases = controllers.get_press_releases({'language': language})
urls = {
'downloads': '{0}/w/accesses?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'references': '{0}/w/publication/size?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'other': '{0}/?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION'])
}
if (
g.collection is not None
and isinstance(g.collection, Collection)
and g.collection.metrics is not None
and current_app.config['USE_HOME_METRICS']
):
g.collection.metrics.total_journal = Journal.objects.filter(
is_public=True, current_status="current"
).count()
g.collection.metrics.total_article = Article.objects.filter(
is_public=True
).count()
context = {
'news': news,
'urls': urls,
'tweets': tweets,
'press_releases': press_releases,
}
return render_template("collection/index.html", **context)
# ##################################Collection###################################
@main.route('/journals/alpha')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list():
allowed_filters = ["current", "no-current", ""]
query_filter = request.args.get("status", "")
if not query_filter in allowed_filters:
query_filter = ""
journals_list = [
controllers.get_journal_json_data(journal)
for journal in controllers.get_journals(query_filter=query_filter)
]
return render_template("collection/list_journal.html",
**{'journals_list': journals_list, 'query_filter': query_filter})
@main.route("/journals/thematic")
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_thematic():
allowed_query_filters = ["current", "no-current", ""]
allowed_thematic_filters = ["areas", "wos", "publisher"]
thematic_table = {
"areas": "study_areas",
"wos": "subject_categories",
"publisher": "publisher_name",
}
query_filter = request.args.get("status", "")
title_query = request.args.get("query", "")
thematic_filter = request.args.get("filter", "areas")
if not query_filter in allowed_query_filters:
query_filter = ""
if not thematic_filter in allowed_thematic_filters:
thematic_filter = "areas"
lang = get_lang_from_session()[:2].lower()
objects = controllers.get_journals_grouped_by(
thematic_table[thematic_filter],
title_query,
query_filter=query_filter,
lang=lang,
)
return render_template(
"collection/list_thematic.html",
**{"objects": objects, "query_filter": query_filter, "filter": thematic_filter}
)
@main.route('/journals/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_feed():
language = session.get('lang', get_locale())
collection = controllers.get_current_collection()
title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção'))
subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name)
feed = AtomFeed(title,
subtitle=subtitle,
feed_url=request.url, url=request.url_root)
journals = controllers.get_journals_paginated(
title_query='', page=1, order_by='-created', per_page=10)
if not journals.items:
feed.add('Nenhum periódico encontrado',
url=request.url,
updated=datetime.now())
for journal in journals.items:
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = []
if last_issue:
articles = controllers.get_articles_by_iid(last_issue.iid,
is_public=True)
result_dict = OrderedDict()
for article in articles:
section = article.get_section_by_lang(language[:2])
result_dict.setdefault(section, [])
result_dict[section].append(article)
context = {
'journal': journal,
'articles': result_dict,
'language': language,
'last_issue': last_issue
}
feed.add(journal.title,
render_template("collection/list_feed_content.html", **context),
content_type='html',
author=journal.publisher_name,
url=url_external('main.journal_detail', url_seg=journal.url_segment),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/about/", methods=['GET'])
@main.route('/about/<string:slug_name>', methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def about_collection(slug_name=None):
language = session.get('lang', get_locale())
context = {}
page = None
if slug_name:
# caso seja uma página
page = controllers.get_page_by_slug_name(slug_name, language)
if not page:
abort(404, _('Página não encontrada'))
context['page'] = page
else:
# caso não seja uma página é uma lista
pages = controllers.get_pages_by_lang(language)
context['pages'] = pages
return render_template("collection/about.html", **context)
# ###################################Journal#####################################
@main.route('/scielo.php/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy():
script_php = request.args.get('script', None)
pid = request.args.get('pid', None)
tlng = request.args.get('tlng', None)
allowed_scripts = [
'sci_serial', 'sci_issuetoc', 'sci_arttext', 'sci_abstract', 'sci_issues', 'sci_pdf'
]
if (script_php is not None) and (script_php in allowed_scripts) and not pid:
# se tem pelo menos um param: pid ou script_php
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
elif script_php and pid:
if script_php == 'sci_serial':
# pid = issn
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.journal_detail',
url_seg=journal.url_segment), code=301)
elif script_php == 'sci_issuetoc':
issue = controllers.get_issue_by_pid(pid)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
if issue.url_segment and "ahead" in issue.url_segment:
return redirect(
url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for(
"main.issue_toc",
url_seg=issue.journal.url_segment,
url_seg_issue=issue.url_segment),
301
)
elif script_php == 'sci_arttext' or script_php == 'sci_abstract':
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
# 'abstract' or None (not False, porque False converterá a string 'False')
part = (script_php == 'sci_abstract' and 'abstract') or None
if tlng not in article.languages:
tlng = article.original_language
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
part=part,
lang=tlng),
code=301)
elif script_php == 'sci_issues':
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.issue_grid',
url_seg=journal.url_segment), 301)
elif script_php == 'sci_pdf':
# accesso ao pdf do artigo:
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
),
code=301
)
else:
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
else:
return redirect('/')
@main.route('/<string:journal_seg>')
@main.route('/journal/<string:journal_seg>')
def journal_detail_legacy_url(journal_seg):
return redirect(url_for('main.journal_detail',
url_seg=journal_seg), code=301)
@main.route('/j/<string:url_seg>/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_detail(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
# todo: ajustar para que seja só noticias relacionadas ao periódico
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
# Press releases
press_releases = controllers.get_press_releases({
'journal': journal,
'language': language})
# Lista de seções
# Mantendo sempre o idioma inglês para as seções na página incial do periódico
if journal.last_issue and journal.current_status == "current":
sections = [section for section in journal.last_issue.sections if section.language == 'en']
recent_articles = controllers.get_recent_articles_of_issue(journal.last_issue.iid, is_public=True)
else:
sections = []
recent_articles = []
latest_issue = journal.last_issue
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = ''
journal_metrics = controllers.get_journal_metrics(journal)
context = {
'journal': journal,
'press_releases': press_releases,
'recent_articles': recent_articles,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
# o primiero item da lista é o último número.
# condicional para verificar se issues contém itens
'last_issue': latest_issue,
'latest_issue_legend': latest_issue_legend,
'sections': sections if sections else None,
'news': news,
'journal_metrics': journal_metrics
}
return render_template("journal/detail.html", **context)
@main.route('/journal/<string:url_seg>/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_feed(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True)
feed = AtomFeed(journal.title,
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(last_issue))
feed_language = session.get('lang', get_locale())
feed_language = feed_language[:2].lower()
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or _('Artigo sem título'),
render_template("issue/feed_content.html", article=article),
content_type='html',
id=article.doi or article.pid,
author=article.authors,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/journal/<string:url_seg>/about/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def about_journal(url_seg):
language = session.get('lang', get_locale())
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
latest_issue = utils.fix_journal_last_issue(journal)
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
page = controllers.get_page_by_journal_acron_lang(journal.acronym, language)
context = {
'journal': journal,
'latest_issue_legend': latest_issue_legend,
'last_issue': latest_issue,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
if page:
context['content'] = page.content
if page.updated_at:
context['page_updated_at'] = page.updated_at
return render_template("journal/about.html", **context)
@main.route("/journals/search/alpha/ajax/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_alpha_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
page = request.args.get('page', 1, type=int)
lang = get_lang_from_session()[:2].lower()
response_data = controllers.get_alpha_list_from_paginated_journals(
title_query=query,
query_filter=query_filter,
page=page,
lang=lang)
return jsonify(response_data)
@main.route("/journals/search/group/by/filter/ajax/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_by_theme_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
filter = request.args.get('filter', 'areas', type=str)
lang = get_lang_from_session()[:2].lower()
if filter == 'areas':
objects = controllers.get_journals_grouped_by('study_areas', query, query_filter=query_filter, lang=lang)
elif filter == 'wos':
objects = controllers.get_journals_grouped_by('subject_categories', query, query_filter=query_filter, lang=lang)
elif filter == 'publisher':
objects = controllers.get_journals_grouped_by('publisher_name', query, query_filter=query_filter, lang=lang)
else:
return jsonify({
'error': 401,
'message': _('Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".')
})
return jsonify(objects)
@main.route("/journals/download/<string:list_type>/<string:extension>/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def download_journal_list(list_type, extension):
if extension.lower() not in ['csv', 'xls']:
abort(401, _('Parámetro "extension" é inválido, deve ser "csv" ou "xls".'))
elif list_type.lower() not in ['alpha', 'areas', 'wos', 'publisher']:
abort(401, _('Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".'))
else:
if extension.lower() == 'xls':
mimetype = 'application/vnd.ms-excel'
else:
mimetype = 'text/csv'
query = request.args.get('query', '', type=str)
data = controllers.get_journal_generator_for_csv(list_type=list_type,
title_query=query,
extension=extension.lower())
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = 'journals_%s_%s.%s' % (list_type, timestamp, extension)
response = Response(data, mimetype=mimetype)
response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@main.route("/<string:url_seg>/contact", methods=['POST'])
def contact(url_seg):
if not request.is_xhr:
abort(403, _('Requisição inválida, deve ser ajax.'))
if utils.is_recaptcha_valid(request):
form = forms.ContactForm(request.form)
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal.enable_contact:
abort(403, _('Periódico não permite envio de email.'))
recipients = journal.editor_email
if form.validate():
sent, message = controllers.send_email_contact(recipients,
form.data['name'],
form.data['your_email'],
form.data['message'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
else:
abort(400, _('Requisição inválida, captcha inválido.'))
@main.route("/form_contact/<string:url_seg>/", methods=['GET'])
def form_contact(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
context = {
'journal': journal
}
return render_template("journal/includes/contact_form.html", **context)
# ###################################Issue#######################################
@main.route('/grid/<string:url_seg>/')
def issue_grid_legacy(url_seg):
return redirect(url_for('main.issue_grid', url_seg=url_seg), 301)
@main.route('/j/<string:url_seg>/grid')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_grid(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# idioma da sessão
language = session.get('lang', get_locale())
# A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order"
issues_data = controllers.get_issues_for_grid_by_jid(journal.id, is_public=True)
latest_issue = issues_data['last_issue']
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
context = {
'journal': journal,
'last_issue': issues_data['last_issue'],
'latest_issue_legend': latest_issue_legend,
'volume_issue': issues_data['volume_issue'],
'ahead': issues_data['ahead'],
'result_dict': issues_data['ordered_for_grid'],
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
return render_template("issue/grid.html", **context)
@main.route('/toc/<string:url_seg>/<string:url_seg_issue>/')
def issue_toc_legacy(url_seg, url_seg_issue):
if url_seg_issue and "ahead" in url_seg_issue:
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for('main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
code=301)
@main.route('/j/<string:url_seg>/i/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def issue_toc(url_seg, url_seg_issue):
section_filter = None
goto = request.args.get("goto", None, type=str)
if goto not in ("previous", "next"):
goto = None
if goto in (None, "next") and "ahead" in url_seg_issue:
# redireciona para `aop_toc`
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
# idioma da sessão
language = session.get('lang', get_locale())
if current_app.config["FILTER_SECTION_ENABLE"]:
# seção dos documentos, se selecionada
section_filter = request.args.get('section', '', type=str).upper()
# obtém o issue
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
# obtém o journal
journal = issue.journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# completa url_segment do last_issue
utils.fix_journal_last_issue(journal)
# goto_next_or_previous_issue (redireciona)
goto_url = goto_next_or_previous_issue(
issue, request.args.get('goto', None, type=str))
if goto_url:
return redirect(goto_url, code=301)
# obtém os documentos
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
if articles:
# obtém TODAS as seções dos documentos deste sumário
sections = sorted({a.section.upper() for a in articles if a.section})
else:
# obtém as seções dos documentos deste sumário
sections = []
if current_app.config["FILTER_SECTION_ENABLE"] and section_filter != '':
# obtém somente os documentos da seção selecionada
articles = [a for a in articles if a.section.upper() == section_filter]
# obtém PDF e TEXT de cada documento
has_math_content = False
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
if 'mml:' in article.title:
has_math_content = True
# obtém a legenda bibliográfica
issue_bibliographic_strip = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(issue.year), volume=issue.volume, number=issue.number,
suppl=issue.suppl_text, language=language[:2].lower())
context = {
'this_page_url': url_for(
'main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
'has_math_content': has_math_content,
'journal': journal,
'issue': issue,
'issue_bibliographic_strip': issue_bibliographic_strip,
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
def goto_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return None
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
selected_issue = utils.get_next_issue(all_issues, current_issue)
elif goto_param == "previous":
selected_issue = utils.get_prev_issue(all_issues, current_issue)
if selected_issue in (None, current_issue):
# nao precisa redirecionar
return None
try:
url_seg_issue = selected_issue.url_segment
except AttributeError:
return None
else:
return url_for('main.issue_toc',
url_seg=selected_issue.journal.url_segment,
url_seg_issue=url_seg_issue)
def get_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return current_issue
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
return utils.get_next_issue(all_issues, current_issue)
return utils.get_prev_issue(all_issues, current_issue)
@main.route('/j/<string:url_seg>/aop')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def aop_toc(url_seg):
section_filter = request.args.get('section', '', type=str).upper()
aop_issues = controllers.get_aop_issues(url_seg) or []
if not aop_issues:
abort(404, _('Artigos ahead of print não encontrados'))
goto = request.args.get("goto", None, type=str)
if goto == "previous":
url = goto_next_or_previous_issue(aop_issues[-1], goto)
if url:
redirect(url, code=301)
journal = aop_issues[0].journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
articles = []
for aop_issue in aop_issues:
_articles = controllers.get_articles_by_iid(
aop_issue.iid, is_public=True)
if _articles:
articles.extend(_articles)
if not articles:
abort(404, _('Artigos ahead of print não encontrados'))
sections = sorted({a.section.upper() for a in articles if a.section})
if section_filter != '':
articles = [a for a in articles if a.section.upper() == section_filter]
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
context = {
'this_page_url': url_for("main.aop_toc", url_seg=url_seg),
'journal': journal,
'issue': aop_issues[0],
'issue_bibliographic_strip': "ahead of print",
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper())
for study_area in journal.study_areas
],
# o primeiro item da lista é o último número.
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
@main.route('/feed/<string:url_seg>/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_feed(url_seg, url_seg_issue):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
journal = issue.journal
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
feed = AtomFeed(journal.title or "",
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(issue))
feed_language = session.get('lang', get_locale())
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or 'Unknow title',
render_template("issue/feed_content.html", article=article),
content_type='html',
author=article.authors,
id=article.doi or article.pid,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
# ##################################Article######################################
@main.route('/article/<regex("S\d{4}-\d{3}[0-9xX][0-2][0-9]{3}\d{4}\d{5}"):pid>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pid(pid):
article = controllers.get_article_by_pid(pid)
if not article:
article = controllers.get_article_by_oap_pid(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.acronym,
article_pid_v3=article.aid))
def render_html_from_xml(article, lang, gs_abstract=False):
logger.debug("Get XML: %s", article.xml)
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
xml = etree.parse(BytesIO(result))
generator = HTMLGenerator.parse(
xml, valid_only=False, gs_abstract=gs_abstract, output_style="website")
return generator.generate(lang), generator.languages
def render_html_from_html(article, lang):
html_url = [html
for html in article.htmls
if html['lang'] == lang]
try:
html_url = html_url[0]['url']
except IndexError:
raise ValueError('Artigo não encontrado') from None
result = fetch_data(use_ssm_url(html_url))
html = result.decode('utf8')
text_languages = [html['lang'] for html in article.htmls]
return html, text_languages
def render_html_abstract(article, lang):
abstract_text = ''
for abstract in article.abstracts:
if abstract['language'] == lang:
abstract_text = abstract["text"]
break
return abstract_text, article.abstract_languages
def render_html(article, lang, gs_abstract=False):
if article.xml:
return render_html_from_xml(article, lang, gs_abstract)
elif article.htmls:
if gs_abstract:
return render_html_abstract(article, lang)
return render_html_from_html(article, lang)
else:
# TODO: Corrigir os teste que esperam ter o atributo ``htmls``
# O ideal seria levantar um ValueError.
return '', []
# TODO: Remover assim que o valor Article.xml estiver consistente na base de
# dados
def use_ssm_url(url):
"""Normaliza a string `url` de acordo com os valores das diretivas de
configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT.
A normalização busca obter uma URL absoluta em função de uma relativa, ou
uma absoluta em função de uma absoluta, mas com as partes *scheme* e
*authority* trocadas pelas definidas nas diretivas citadas anteriormente.
Este código deve ser removido assim que o valor de Article.xml estiver
consistente, i.e., todos os registros possuirem apenas URLs absolutas.
"""
if url.startswith("http"):
parsed_url = urlparse(url)
return current_app.config["SSM_BASE_URI"] + parsed_url.path
else:
return current_app.config["SSM_BASE_URI"] + url
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail(url_seg, url_seg_issue, url_seg_article, lang_code=''):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if article is None:
article = controllers.get_article_by_aop_url_segs(
issue.journal, url_seg_issue, url_seg_article
)
if article is None:
abort(404, _('Artigo não encontrado'))
req_params = {
"url_seg": article.journal.acronym,
"article_pid_v3": article.aid,
}
if lang_code:
req_params["lang"] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params))
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/')
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/<string:part>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_v3(url_seg, article_pid_v3, part=None):
qs_lang = request.args.get('lang', type=str) or None
qs_goto = request.args.get('goto', type=str) or None
qs_stop = request.args.get('stop', type=str) or None
qs_format = request.args.get('format', 'html', type=str)
gs_abstract = (part == "abstract")
if part and not gs_abstract:
abort(404,
_("Não existe '{}'. No seu lugar use '{}'"
).format(part, 'abstract'))
try:
qs_lang, article = controllers.get_article(
article_pid_v3, url_seg, qs_lang, gs_abstract, qs_goto)
if qs_goto:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article.aid,
part=part,
format=qs_format,
lang=qs_lang,
stop=getattr(article, 'stop', None),
),
code=301
)
except (controllers.PreviousOrNextArticleNotFoundError) as e:
if gs_abstract:
abort(404, _('Resumo inexistente'))
abort(404, _('Artigo inexistente'))
except (controllers.ArticleNotFoundError,
controllers.ArticleJournalNotFoundError):
abort(404, _('Artigo não encontrado'))
except controllers.ArticleLangNotFoundError:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article_pid_v3,
format=qs_format,
),
code=301
)
except controllers.ArticleAbstractNotFoundError:
abort(404, _('Recurso não encontrado'))
except controllers.ArticleIsNotPublishedError as e:
abort(404, "{}{}".format(ARTICLE_UNPUBLISH, e))
except controllers.IssueIsNotPublishedError as e:
abort(404, "{}{}".format(ISSUE_UNPUBLISH, e))
except controllers.JournalIsNotPublishedError as e:
abort(404, "{}{}".format(JOURNAL_UNPUBLISH, e))
except ValueError as e:
abort(404, str(e))
def _handle_html():
citation_pdf_url = None
for pdf_data in article.pdfs:
if pdf_data.get("lang") == qs_lang:
citation_pdf_url = url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=qs_lang,
format="pdf",
)
break
website = request.url
if website:
parsed_url = urlparse(request.url)
if current_app.config["FORCE_USE_HTTPS_GOOGLE_TAGS"]:
website = "{}://{}".format('https', parsed_url.netloc)
else:
website = "{}://{}".format(parsed_url.scheme, parsed_url.netloc)
if citation_pdf_url:
citation_pdf_url = "{}{}".format(website, citation_pdf_url)
try:
html, text_languages = render_html(article, qs_lang, gs_abstract)
except (ValueError, NonRetryableError):
abort(404, _('HTML do Artigo não encontrado ou indisponível'))
except RetryableError:
abort(500, _('Erro inesperado'))
text_versions = sorted(
[
(
lang,
display_original_lang_name(lang),
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=lang
)
)
for lang in text_languages
]
)
citation_xml_url = "{}{}".format(
website,
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
format="xml",
lang=article.original_language,
)
)
context = {
'next_article': qs_stop != 'next',
'previous_article': qs_stop != 'previous',
'article': article,
'journal': article.journal,
'issue': article.issue,
'html': html,
'citation_pdf_url': citation_pdf_url,
'citation_xml_url': citation_xml_url,
'article_lang': qs_lang,
'text_versions': text_versions,
'related_links': controllers.related_links(article),
'gs_abstract': gs_abstract,
'part': part,
}
return render_template("article/detail.html", **context)
def _handle_pdf():
if not article.pdfs:
abort(404, _('PDF do Artigo não encontrado'))
pdf_info = [pdf for pdf in article.pdfs if pdf['lang'] == qs_lang]
if len(pdf_info) != 1:
abort(404, _('PDF do Artigo não encontrado'))
try:
pdf_url = pdf_info[0]['url']
except (IndexError, KeyError, ValueError, TypeError):
abort(404, _('PDF do Artigo não encontrado'))
if pdf_url:
return get_pdf_content(pdf_url)
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
def _handle_xml():
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
response = make_response(result)
response.headers['Content-Type'] = 'application/xml'
return response
if 'html' == qs_format:
return _handle_html()
elif 'pdf' == qs_format:
return _handle_pdf()
elif 'xml' == qs_format:
return _handle_xml()
else:
abort(400, _('Formato não suportado'))
@main.route('/readcube/epdf/')
@main.route('/readcube/epdf.php')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_epdf():
doi = request.args.get('doi', None, type=str)
pid = request.args.get('pid', None, type=str)
pdf_path = request.args.get('pdf_path', None, type=str)
lang = request.args.get('lang', None, type=str)
if not all([doi, pid, pdf_path, lang]):
abort(400, _('Parâmetros insuficientes para obter o EPDF do artigo'))
else:
context = {
'doi': doi,
'pid': pid,
'pdf_path': pdf_path,
'lang': lang,
}
return render_template("article/epdf.html", **context)
def get_pdf_content(url):
logger.debug("Get PDF: %s", url)
if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]:
url = use_ssm_url(url)
try:
response = fetch_data(url)
except NonRetryableError:
abort(404, _('PDF não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
mimetype, __ = mimetypes.guess_type(url)
return Response(response, mimetype=mimetype)
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def get_content_from_ssm(resource_ssm_media_path):
resource_ssm_full_url = current_app.config['SSM_BASE_URI'] + resource_ssm_media_path
url = resource_ssm_full_url.strip()
mimetype, __ = mimetypes.guess_type(url)
try:
ssm_response = fetch_data(url)
except NonRetryableError:
abort(404, _('Recurso não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
return Response(ssm_response, mimetype=mimetype)
@main.route('/media/assets/<regex("(.*)"):relative_media_path>')
@cache.cached(key_prefix=cache_key_with_lang)
def media_assets_proxy(relative_media_path):
resource_ssm_path = '{ssm_media_path}{resource_path}'.format(
ssm_media_path=current_app.config['SSM_MEDIA_PATH'],
resource_path=relative_media_path)
return get_content_from_ssm(resource_ssm_path)
@main.route('/article/ssm/content/raw/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_ssm_content_raw():
resource_ssm_path = request.args.get('resource_ssm_path', None)
if not resource_ssm_path:
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
else:
return get_content_from_ssm(resource_ssm_path)
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pdf(url_seg, url_seg_issue, url_seg_article, lang_code=''):
"""
Padrões esperados:
`/pdf/csc/2021.v26suppl1/2557-2558`
`/pdf/csc/2021.v26suppl1/2557-2558/en`
"""
if not lang_code and "." not in url_seg_issue:
return router_legacy_pdf(url_seg, url_seg_issue, url_seg_article)
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if not article:
abort(404, _('Artigo não encontrado'))
req_params = {
'url_seg': article.journal.url_segment,
'article_pid_v3': article.aid,
'format': 'pdf',
}
if lang_code:
req_params['lang'] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params), code=301)
@main.route('/pdf/<string:journal_acron>/<string:issue_info>/<string:pdf_filename>.pdf')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_pdf(journal_acron, issue_info, pdf_filename):
pdf_filename = '%s.pdf' % pdf_filename
journal = controllers.get_journal_by_url_seg(journal_acron)
if not journal:
abort(404, _('Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org'))
article = controllers.get_article_by_pdf_filename(
journal_acron, issue_info, pdf_filename)
if not article:
abort(404, _('PDF do artigo não foi encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
lang=article._pdf_lang,
),
code=301
)
@main.route('/cgi-bin/fbpe/<string:text_or_abstract>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_article(text_or_abstract):
pid = request.args.get('pid', None)
lng = request.args.get('lng', None)
if not (text_or_abstract in ['fbtext', 'fbabs'] and pid):
# se tem pid
abort(400, _('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
article = controllers.get_article_by_pid_v1(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
),
code=301
)
# ###############################E-mail share##################################
@main.route("/email_share_ajax/", methods=['POST'])
def email_share_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.EmailShareForm(request.form)
if form.validate():
recipients = [email.strip() for email in form.data['recipients'].split(';') if email.strip() != '']
sent, message = controllers.send_email_share(form.data['your_email'],
recipients,
form.data['share_url'],
form.data['subject'],
form.data['comment'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/form_mail/", methods=['GET'])
def email_form():
context = {'url': request.args.get('url')}
return render_template("email/email_form.html", **context)
@main.route("/email_error_ajax/", methods=['POST'])
def email_error_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.ErrorForm(request.form)
if form.validate():
recipients = [email.strip() for email in current_app.config.get('EMAIL_ACCOUNTS_RECEIVE_ERRORS') if email.strip() != '']
sent, message = controllers.send_email_error(form.data['name'],
form.data['your_email'],
recipients,
form.data['url'],
form.data['error_type'],
form.data['message'],
form.data['page_title'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/error_mail/", methods=['GET'])
def error_form():
context = {'url': request.args.get('url')}
return render_template("includes/error_form.html", **context)
# ###############################Others########################################
@main.route("/media/<path:filename>/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def download_file_by_filename(filename):
media_root = current_app.config['MEDIA_ROOT']
return send_from_directory(media_root, filename)
@main.route("/img/scielo.gif", methods=['GET'])
def full_text_image():
return send_from_directory('static', 'img/full_text_scielo_img.gif')
@main.route("/robots.txt", methods=['GET'])
def get_robots_txt_file():
return send_from_directory('static', 'robots.txt')
@main.route("/revistas/<path:journal_seg>/<string:page>.htm", methods=['GET'])
def router_legacy_info_pages(journal_seg, page):
"""
Essa view function realiza o redirecionamento das URLs antigas para as novas URLs.
Mantém um dicionário como uma tabela relacionamento entre o nome das páginas que pode ser:
Página âncora
[iaboutj.htm, eaboutj.htm, paboutj.htm] -> #about
[iedboard.htm, eedboard.htm, pedboard.htm] -> #editors
[iinstruc.htm einstruc.htm, pinstruc.htm]-> #instructions
isubscrp.htm -> Sem âncora
"""
page_anchor = {
'iaboutj': '#about',
'eaboutj': '#about',
'paboutj': '#about',
'eedboard': '#editors',
'iedboard': '#editors',
'pedboard': '#editors',
'iinstruc': '#instructions',
'pinstruc': '#instructions',
'einstruc': '#instructions'
}
return redirect('%s%s' % (url_for('main.about_journal',
url_seg=journal_seg), page_anchor.get(page, '')), code=301)
@main.route("/api/v1/counter_dict", methods=['GET'])
def router_counter_dicts():
"""
Essa view function retorna um dicionário, em formato JSON, que mapeia PIDs a insumos
necessários para o funcionamento das aplicações Matomo & COUNTER & SUSHI.
"""
end_date = request.args.get('end_date', '', type=str)
try:
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
end_date = datetime.now()
begin_date = end_date - timedelta(days=30)
page = request.args.get('page', type=int)
if not page:
page = 1
limit = request.args.get('limit', type=int)
if not limit or limit > 100 or limit < 0:
limit = 100
results = {'dictionary_date': end_date,
'end_date': end_date.strftime('%Y-%m-%d %H-%M-%S'),
'begin_date': begin_date.strftime('%Y-%m-%d %H-%M-%S'),
'documents': {},
'collection': current_app.config['OPAC_COLLECTION']}
articles = controllers.get_articles_by_date_range(begin_date, end_date, page, limit)
for a in articles.items:
results['documents'].update(get_article_counter_data(a))
results['total'] = articles.total
results['pages'] = articles.pages
results['limit'] = articles.per_page
results['page'] = articles.page
return jsonify(results)
def get_article_counter_data(article):
return {
article.aid: {
"journal_acronym": article.journal.acronym,
"pid": article.pid if article.pid else '',
"aop_pid": article.aop_pid if article.aop_pid else '',
"pid_v1": article.scielo_pids.get('v1', ''),
"pid_v2": article.scielo_pids.get('v2', ''),
"pid_v3": article.scielo_pids.get('v3', ''),
"publication_date": article.publication_date,
"default_language": article.original_language,
"create": article.created,
"update": article.updated
}
}
@main.route('/cgi-bin/wxis.exe/iah/')
def author_production():
# http://www.scielo.br/cgi-bin/wxis.exe/iah/
# ?IsisScript=iah/iah.xis&base=article%5Edlibrary&format=iso.pft&
# lang=p&nextAction=lnk&
# indexSearch=AU&exprSearch=MEIERHOFFER,+LILIAN+KOZSLOWSKI
# ->
# //search.scielo.org/?lang=pt&q=au:MEIERHOFFER,+LILIAN+KOZSLOWSKI
search_url = current_app.config.get('URL_SEARCH')
if not search_url:
abort(404, "URL_SEARCH: {}".format(_('Página não encontrada')))
qs_exprSearch = request.args.get('exprSearch', type=str) or ''
qs_indexSearch = request.args.get('indexSearch', type=str) or ''
qs_lang = request.args.get('lang', type=str) or ''
_lang = IAHX_LANGS.get(qs_lang) or ''
_lang = _lang and "lang={}".format(_lang)
_expr = "{}{}".format(
qs_indexSearch == "AU" and "au:" or '', qs_exprSearch)
_expr = _expr and "q={}".format(_expr.replace(" ", "+"))
_and = _lang and _expr and "&" or ''
_question_mark = (_lang or _expr) and "?" or ""
if search_url.startswith("//"):
protocol = "https:"
elif search_url.startswith("http"):
protocol = ""
else:
protocol = "https://"
url = "{}{}{}{}{}{}".format(
protocol, search_url, _question_mark, _lang, _and, _expr)
return redirect(url, code=301)
| # coding: utf-8
import logging
import requests
import mimetypes
from io import BytesIO
from urllib.parse import urlparse
from datetime import datetime, timedelta
from collections import OrderedDict
from flask_babelex import gettext as _
from flask import (
render_template,
abort,
current_app,
request,
session,
redirect,
jsonify,
url_for,
Response,
send_from_directory,
g,
make_response,
)
from werkzeug.contrib.atom import AtomFeed
from urllib.parse import urljoin
from legendarium.formatter import descriptive_short_format
from . import main
from webapp import babel
from webapp import cache
from webapp import controllers
from webapp.choices import STUDY_AREAS
from webapp.utils import utils
from webapp.utils.caching import cache_key_with_lang, cache_key_with_lang_with_qs
from webapp import forms
from webapp.config.lang_names import display_original_lang_name
from opac_schema.v1.models import Journal, Issue, Article, Collection
from lxml import etree
from packtools import HTMLGenerator
logger = logging.getLogger(__name__)
JOURNAL_UNPUBLISH = _("O periódico está indisponível por motivo de: ")
ISSUE_UNPUBLISH = _("O número está indisponível por motivo de: ")
ARTICLE_UNPUBLISH = _("O artigo está indisponível por motivo de: ")
IAHX_LANGS = dict(
p='pt',
e='es',
i='en',
)
def url_external(endpoint, **kwargs):
url = url_for(endpoint, **kwargs)
return urljoin(request.url_root, url)
class RetryableError(Exception):
"""Erro recuperável sem que seja necessário modificar o estado dos dados
na parte cliente, e.g., timeouts, erros advindos de particionamento de rede
etc.
"""
class NonRetryableError(Exception):
"""Erro do qual não pode ser recuperado sem modificar o estado dos dados
na parte cliente, e.g., recurso solicitado não exite, URI inválida etc.
"""
def fetch_data(url: str, timeout: float = 2) -> bytes:
try:
response = requests.get(url, timeout=timeout)
except (requests.ConnectionError, requests.Timeout) as exc:
raise RetryableError(exc) from exc
except (requests.InvalidSchema, requests.MissingSchema, requests.InvalidURL) as exc:
raise NonRetryableError(exc) from exc
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
if 400 <= exc.response.status_code < 500:
raise NonRetryableError(exc) from exc
elif 500 <= exc.response.status_code < 600:
raise RetryableError(exc) from exc
else:
raise
return response.content
@main.before_app_request
def add_collection_to_g():
if not hasattr(g, 'collection'):
try:
collection = controllers.get_current_collection()
setattr(g, 'collection', collection)
except Exception:
# discutir o que fazer aqui
setattr(g, 'collection', {})
@main.after_request
def add_header(response):
response.headers['x-content-type-options'] = 'nosniff'
return response
@main.after_request
def add_language_code(response):
language = session.get('lang', get_locale())
response.set_cookie('language', language)
return response
@main.before_app_request
def add_forms_to_g():
setattr(g, 'email_share', forms.EmailShareForm())
setattr(g, 'email_contact', forms.ContactForm())
setattr(g, 'error', forms.ErrorForm())
@main.before_app_request
def add_scielo_org_config_to_g():
language = session.get('lang', get_locale())
scielo_org_links = {
key: url[language]
for key, url in current_app.config.get('SCIELO_ORG_URIS', {}).items()
}
setattr(g, 'scielo_org', scielo_org_links)
@babel.localeselector
def get_locale():
langs = current_app.config.get('LANGUAGES')
lang_from_headers = request.accept_languages.best_match(list(langs.keys()))
if 'lang' not in list(session.keys()):
session['lang'] = lang_from_headers
if not lang_from_headers and not session['lang']:
# Caso não seja possível detectar o idioma e não tenhamos a chave lang
# no seção, fixamos o idioma padrão.
session['lang'] = current_app.config.get('BABEL_DEFAULT_LOCALE')
return session['lang']
@main.route('/set_locale/<string:lang_code>/')
def set_locale(lang_code):
langs = current_app.config.get('LANGUAGES')
if lang_code not in list(langs.keys()):
abort(400, _('Código de idioma inválido'))
referrer = request.referrer
hash = request.args.get('hash')
if hash:
referrer += "#" + hash
# salvar o lang code na sessão
session['lang'] = lang_code
return redirect(referrer)
def get_lang_from_session():
"""
Tenta retornar o idioma da seção, caso não consiga retorna
BABEL_DEFAULT_LOCALE.
"""
try:
return session['lang']
except KeyError:
return current_app.config.get('BABEL_DEFAULT_LOCALE')
@main.route('/')
@cache.cached(key_prefix=cache_key_with_lang)
def index():
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
tweets = controllers.get_collection_tweets()
press_releases = controllers.get_press_releases({'language': language})
urls = {
'downloads': '{0}/w/accesses?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'references': '{0}/w/publication/size?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION']),
'other': '{0}/?collection={1}'.format(
current_app.config['METRICS_URL'],
current_app.config['OPAC_COLLECTION'])
}
if (
g.collection is not None
and isinstance(g.collection, Collection)
and g.collection.metrics is not None
and current_app.config['USE_HOME_METRICS']
):
g.collection.metrics.total_journal = Journal.objects.filter(
is_public=True, current_status="current"
).count()
g.collection.metrics.total_article = Article.objects.filter(
is_public=True
).count()
context = {
'news': news,
'urls': urls,
'tweets': tweets,
'press_releases': press_releases,
}
return render_template("collection/index.html", **context)
# ##################################Collection###################################
@main.route('/journals/alpha')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list():
allowed_filters = ["current", "no-current", ""]
query_filter = request.args.get("status", "")
if not query_filter in allowed_filters:
query_filter = ""
journals_list = [
controllers.get_journal_json_data(journal)
for journal in controllers.get_journals(query_filter=query_filter)
]
return render_template("collection/list_journal.html",
**{'journals_list': journals_list, 'query_filter': query_filter})
@main.route("/journals/thematic")
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_thematic():
allowed_query_filters = ["current", "no-current", ""]
allowed_thematic_filters = ["areas", "wos", "publisher"]
thematic_table = {
"areas": "study_areas",
"wos": "subject_categories",
"publisher": "publisher_name",
}
query_filter = request.args.get("status", "")
title_query = request.args.get("query", "")
thematic_filter = request.args.get("filter", "areas")
if not query_filter in allowed_query_filters:
query_filter = ""
if not thematic_filter in allowed_thematic_filters:
thematic_filter = "areas"
lang = get_lang_from_session()[:2].lower()
objects = controllers.get_journals_grouped_by(
thematic_table[thematic_filter],
title_query,
query_filter=query_filter,
lang=lang,
)
return render_template(
"collection/list_thematic.html",
**{"objects": objects, "query_filter": query_filter, "filter": thematic_filter}
)
@main.route('/journals/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def collection_list_feed():
language = session.get('lang', get_locale())
collection = controllers.get_current_collection()
title = 'SciELO - %s - %s' % (collection.name, _('Últimos periódicos inseridos na coleção'))
subtitle = _('10 últimos periódicos inseridos na coleção %s' % collection.name)
feed = AtomFeed(title,
subtitle=subtitle,
feed_url=request.url, url=request.url_root)
journals = controllers.get_journals_paginated(
title_query='', page=1, order_by='-created', per_page=10)
if not journals.items:
feed.add('Nenhum periódico encontrado',
url=request.url,
updated=datetime.now())
for journal in journals.items:
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = []
if last_issue:
articles = controllers.get_articles_by_iid(last_issue.iid,
is_public=True)
result_dict = OrderedDict()
for article in articles:
section = article.get_section_by_lang(language[:2])
result_dict.setdefault(section, [])
result_dict[section].append(article)
context = {
'journal': journal,
'articles': result_dict,
'language': language,
'last_issue': last_issue
}
feed.add(journal.title,
render_template("collection/list_feed_content.html", **context),
content_type='html',
author=journal.publisher_name,
url=url_external('main.journal_detail', url_seg=journal.url_segment),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/about/", methods=['GET'])
@main.route('/about/<string:slug_name>', methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def about_collection(slug_name=None):
language = session.get('lang', get_locale())
context = {}
page = None
if slug_name:
# caso seja uma página
page = controllers.get_page_by_slug_name(slug_name, language)
if not page:
abort(404, _('Página não encontrada'))
context['page'] = page
else:
# caso não seja uma página é uma lista
pages = controllers.get_pages_by_lang(language)
context['pages'] = pages
return render_template("collection/about.html", **context)
# ###################################Journal#####################################
@main.route('/scielo.php/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy():
script_php = request.args.get('script', None)
pid = request.args.get('pid', None)
tlng = request.args.get('tlng', None)
allowed_scripts = [
'sci_serial', 'sci_issuetoc', 'sci_arttext', 'sci_abstract', 'sci_issues', 'sci_pdf'
]
if (script_php is not None) and (script_php in allowed_scripts) and not pid:
# se tem pelo menos um param: pid ou script_php
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
elif script_php and pid:
if script_php == 'sci_serial':
# pid = issn
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.journal_detail',
url_seg=journal.url_segment), code=301)
elif script_php == 'sci_issuetoc':
issue = controllers.get_issue_by_pid(pid)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
if issue.url_segment and "ahead" in issue.url_segment:
return redirect(
url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for(
"main.issue_toc",
url_seg=issue.journal.url_segment,
url_seg_issue=issue.url_segment),
301
)
elif script_php == 'sci_arttext' or script_php == 'sci_abstract':
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
# 'abstract' or None (not False, porque False converterá a string 'False')
part = (script_php == 'sci_abstract' and 'abstract') or None
if tlng not in article.languages:
tlng = article.original_language
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
part=part,
lang=tlng),
code=301)
elif script_php == 'sci_issues':
journal = controllers.get_journal_by_issn(pid)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
return redirect(url_for('main.issue_grid',
url_seg=journal.url_segment), 301)
elif script_php == 'sci_pdf':
# accesso ao pdf do artigo:
article = controllers.get_article_by_pid_v2(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
),
code=301
)
else:
abort(400, _(u'Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
else:
return redirect('/')
@main.route('/<string:journal_seg>')
@main.route('/journal/<string:journal_seg>')
def journal_detail_legacy_url(journal_seg):
return redirect(url_for('main.journal_detail',
url_seg=journal_seg), code=301)
@main.route('/j/<string:url_seg>/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_detail(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
# todo: ajustar para que seja só noticias relacionadas ao periódico
language = session.get('lang', get_locale())
news = controllers.get_latest_news_by_lang(language)
# Press releases
press_releases = controllers.get_press_releases({
'journal': journal,
'language': language})
# Lista de seções
# Mantendo sempre o idioma inglês para as seções na página incial do periódico
if journal.last_issue and journal.current_status == "current":
sections = [section for section in journal.last_issue.sections if section.language == 'en']
recent_articles = controllers.get_recent_articles_of_issue(journal.last_issue.iid, is_public=True)
else:
sections = []
recent_articles = []
latest_issue = journal.last_issue
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = ''
journal_metrics = controllers.get_journal_metrics(journal)
context = {
'journal': journal,
'press_releases': press_releases,
'recent_articles': recent_articles,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
# o primiero item da lista é o último número.
# condicional para verificar se issues contém itens
'last_issue': latest_issue,
'latest_issue_legend': latest_issue_legend,
'sections': sections if sections else None,
'news': news,
'journal_metrics': journal_metrics
}
return render_template("journal/detail.html", **context)
@main.route('/journal/<string:url_seg>/feed/')
@cache.cached(key_prefix=cache_key_with_lang)
def journal_feed(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
issues = controllers.get_issues_by_jid(journal.jid, is_public=True)
last_issue = issues[0] if issues else None
articles = controllers.get_articles_by_iid(last_issue.iid, is_public=True)
feed = AtomFeed(journal.title,
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(last_issue))
feed_language = session.get('lang', get_locale())
feed_language = feed_language[:2].lower()
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or _('Artigo sem título'),
render_template("issue/feed_content.html", article=article),
content_type='html',
id=article.doi or article.pid,
author=article.authors,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
@main.route("/journal/<string:url_seg>/about/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def about_journal(url_seg):
language = session.get('lang', get_locale())
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
latest_issue = utils.fix_journal_last_issue(journal)
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
page = controllers.get_page_by_journal_acron_lang(journal.acronym, language)
context = {
'journal': journal,
'latest_issue_legend': latest_issue_legend,
'last_issue': latest_issue,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
if page:
context['content'] = page.content
if page.updated_at:
context['page_updated_at'] = page.updated_at
return render_template("journal/about.html", **context)
@main.route("/journals/search/alpha/ajax/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_alpha_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
page = request.args.get('page', 1, type=int)
lang = get_lang_from_session()[:2].lower()
response_data = controllers.get_alpha_list_from_paginated_journals(
title_query=query,
query_filter=query_filter,
page=page,
lang=lang)
return jsonify(response_data)
@main.route("/journals/search/group/by/filter/ajax/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def journals_search_by_theme_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida. Deve ser por ajax'))
query = request.args.get('query', '', type=str)
query_filter = request.args.get('query_filter', '', type=str)
filter = request.args.get('filter', 'areas', type=str)
lang = get_lang_from_session()[:2].lower()
if filter == 'areas':
objects = controllers.get_journals_grouped_by('study_areas', query, query_filter=query_filter, lang=lang)
elif filter == 'wos':
objects = controllers.get_journals_grouped_by('subject_categories', query, query_filter=query_filter, lang=lang)
elif filter == 'publisher':
objects = controllers.get_journals_grouped_by('publisher_name', query, query_filter=query_filter, lang=lang)
else:
return jsonify({
'error': 401,
'message': _('Parámetro "filter" é inválido, deve ser "areas", "wos" ou "publisher".')
})
return jsonify(objects)
@main.route("/journals/download/<string:list_type>/<string:extension>/", methods=['GET', ])
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def download_journal_list(list_type, extension):
if extension.lower() not in ['csv', 'xls']:
abort(401, _('Parámetro "extension" é inválido, deve ser "csv" ou "xls".'))
elif list_type.lower() not in ['alpha', 'areas', 'wos', 'publisher']:
abort(401, _('Parámetro "list_type" é inválido, deve ser: "alpha", "areas", "wos" ou "publisher".'))
else:
if extension.lower() == 'xls':
mimetype = 'application/vnd.ms-excel'
else:
mimetype = 'text/csv'
query = request.args.get('query', '', type=str)
data = controllers.get_journal_generator_for_csv(list_type=list_type,
title_query=query,
extension=extension.lower())
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = 'journals_%s_%s.%s' % (list_type, timestamp, extension)
response = Response(data, mimetype=mimetype)
response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@main.route("/<string:url_seg>/contact", methods=['POST'])
def contact(url_seg):
if not request.is_xhr:
abort(403, _('Requisição inválida, deve ser ajax.'))
if utils.is_recaptcha_valid(request):
form = forms.ContactForm(request.form)
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal.enable_contact:
abort(403, _('Periódico não permite envio de email.'))
recipients = journal.editor_email
if form.validate():
sent, message = controllers.send_email_contact(recipients,
form.data['name'],
form.data['your_email'],
form.data['message'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
else:
abort(400, _('Requisição inválida, captcha inválido.'))
@main.route("/form_contact/<string:url_seg>/", methods=['GET'])
def form_contact(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
context = {
'journal': journal
}
return render_template("journal/includes/contact_form.html", **context)
# ###################################Issue#######################################
@main.route('/grid/<string:url_seg>/')
def issue_grid_legacy(url_seg):
return redirect(url_for('main.issue_grid', url_seg=url_seg), 301)
@main.route('/j/<string:url_seg>/grid')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_grid(url_seg):
journal = controllers.get_journal_by_url_seg(url_seg)
if not journal:
abort(404, _('Periódico não encontrado'))
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# idioma da sessão
language = session.get('lang', get_locale())
# A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order"
issues_data = controllers.get_issues_for_grid_by_jid(journal.id, is_public=True)
latest_issue = issues_data['last_issue']
if latest_issue:
latest_issue_legend = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(latest_issue.year), volume=latest_issue.volume, number=latest_issue.number,
suppl=latest_issue.suppl_text, language=language[:2].lower())
else:
latest_issue_legend = None
context = {
'journal': journal,
'last_issue': issues_data['last_issue'],
'latest_issue_legend': latest_issue_legend,
'volume_issue': issues_data['volume_issue'],
'ahead': issues_data['ahead'],
'result_dict': issues_data['ordered_for_grid'],
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
}
return render_template("issue/grid.html", **context)
@main.route('/toc/<string:url_seg>/<string:url_seg_issue>/')
def issue_toc_legacy(url_seg, url_seg_issue):
if url_seg_issue and "ahead" in url_seg_issue:
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
return redirect(
url_for('main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
code=301)
@main.route('/j/<string:url_seg>/i/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def issue_toc(url_seg, url_seg_issue):
section_filter = None
goto = request.args.get("goto", None, type=str)
if goto not in ("previous", "next"):
goto = None
if goto in (None, "next") and "ahead" in url_seg_issue:
# redireciona para `aop_toc`
return redirect(url_for('main.aop_toc', url_seg=url_seg), code=301)
# idioma da sessão
language = session.get('lang', get_locale())
if current_app.config["FILTER_SECTION_ENABLE"]:
# seção dos documentos, se selecionada
section_filter = request.args.get('section', '', type=str).upper()
# obtém o issue
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
# obtém o journal
journal = issue.journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
# completa url_segment do last_issue
utils.fix_journal_last_issue(journal)
# goto_next_or_previous_issue (redireciona)
goto_url = goto_next_or_previous_issue(
issue, request.args.get('goto', None, type=str))
if goto_url:
return redirect(goto_url, code=301)
# obtém os documentos
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
if articles:
# obtém TODAS as seções dos documentos deste sumário
sections = sorted({a.section.upper() for a in articles if a.section})
else:
# obtém as seções dos documentos deste sumário
sections = []
if current_app.config["FILTER_SECTION_ENABLE"] and section_filter != '':
# obtém somente os documentos da seção selecionada
articles = [a for a in articles if a.section.upper() == section_filter]
# obtém PDF e TEXT de cada documento
has_math_content = False
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
if 'mml:' in article.title:
has_math_content = True
# obtém a legenda bibliográfica
issue_bibliographic_strip = descriptive_short_format(
title=journal.title, short_title=journal.short_title,
pubdate=str(issue.year), volume=issue.volume, number=issue.number,
suppl=issue.suppl_text, language=language[:2].lower())
context = {
'this_page_url': url_for(
'main.issue_toc',
url_seg=url_seg,
url_seg_issue=url_seg_issue),
'has_math_content': has_math_content,
'journal': journal,
'issue': issue,
'issue_bibliographic_strip': issue_bibliographic_strip,
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper()) for study_area in journal.study_areas
],
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
def goto_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return None
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
selected_issue = utils.get_next_issue(all_issues, current_issue)
elif goto_param == "previous":
selected_issue = utils.get_prev_issue(all_issues, current_issue)
if selected_issue in (None, current_issue):
# nao precisa redirecionar
return None
try:
url_seg_issue = selected_issue.url_segment
except AttributeError:
return None
else:
return url_for('main.issue_toc',
url_seg=selected_issue.journal.url_segment,
url_seg_issue=url_seg_issue)
def get_next_or_previous_issue(current_issue, goto_param):
if goto_param not in ["next", "previous"]:
return current_issue
all_issues = list(
controllers.get_issues_by_jid(current_issue.journal.id, is_public=True))
if goto_param == "next":
return utils.get_next_issue(all_issues, current_issue)
return utils.get_prev_issue(all_issues, current_issue)
@main.route('/j/<string:url_seg>/aop')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def aop_toc(url_seg):
section_filter = request.args.get('section', '', type=str).upper()
aop_issues = controllers.get_aop_issues(url_seg) or []
if not aop_issues:
abort(404, _('Artigos ahead of print não encontrados'))
goto = request.args.get("goto", None, type=str)
if goto == "previous":
url = goto_next_or_previous_issue(aop_issues[-1], goto)
if url:
redirect(url, code=301)
journal = aop_issues[0].journal
if not journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(journal.unpublish_reason))
utils.fix_journal_last_issue(journal)
articles = []
for aop_issue in aop_issues:
_articles = controllers.get_articles_by_iid(
aop_issue.iid, is_public=True)
if _articles:
articles.extend(_articles)
if not articles:
abort(404, _('Artigos ahead of print não encontrados'))
sections = sorted({a.section.upper() for a in articles if a.section})
if section_filter != '':
articles = [a for a in articles if a.section.upper() == section_filter]
for article in articles:
article_text_languages = [doc['lang'] for doc in article.htmls]
article_pdf_languages = [(doc['lang'], doc['url']) for doc in article.pdfs]
setattr(article, "article_text_languages", article_text_languages)
setattr(article, "article_pdf_languages", article_pdf_languages)
context = {
'this_page_url': url_for("main.aop_toc", url_seg=url_seg),
'journal': journal,
'issue': aop_issues[0],
'issue_bibliographic_strip': "ahead of print",
'articles': articles,
'sections': sections,
'section_filter': section_filter,
'journal_study_areas': [
STUDY_AREAS.get(study_area.upper())
for study_area in journal.study_areas
],
# o primeiro item da lista é o último número.
'last_issue': journal.last_issue
}
return render_template("issue/toc.html", **context)
@main.route('/feed/<string:url_seg>/<string:url_seg_issue>/')
@cache.cached(key_prefix=cache_key_with_lang)
def issue_feed(url_seg, url_seg_issue):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Número não encontrado'))
if not issue.is_public:
abort(404, ISSUE_UNPUBLISH + _(issue.unpublish_reason))
if not issue.journal.is_public:
abort(404, JOURNAL_UNPUBLISH + _(issue.journal.unpublish_reason))
journal = issue.journal
articles = controllers.get_articles_by_iid(issue.iid, is_public=True)
feed = AtomFeed(journal.title or "",
feed_url=request.url,
url=request.url_root,
subtitle=utils.get_label_issue(issue))
feed_language = session.get('lang', get_locale())
for article in articles:
# ######### TODO: Revisar #########
article_lang = feed_language
if feed_language not in article.languages:
article_lang = article.original_language
feed.add(article.title or 'Unknow title',
render_template("issue/feed_content.html", article=article),
content_type='html',
author=article.authors,
id=article.doi or article.pid,
url=url_external('main.article_detail_v3',
url_seg=journal.url_segment,
article_pid_v3=article.aid,
lang=article_lang),
updated=journal.updated,
published=journal.created)
return feed.get_response()
# ##################################Article######################################
@main.route('/article/<regex("S\d{4}-\d{3}[0-9xX][0-2][0-9]{3}\d{4}\d{5}"):pid>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pid(pid):
article = controllers.get_article_by_pid(pid)
if not article:
article = controllers.get_article_by_oap_pid(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(url_for('main.article_detail_v3',
url_seg=article.journal.acronym,
article_pid_v3=article.aid))
def render_html_from_xml(article, lang, gs_abstract=False):
logger.debug("Get XML: %s", article.xml)
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
xml = etree.parse(BytesIO(result))
generator = HTMLGenerator.parse(
xml, valid_only=False, gs_abstract=gs_abstract, output_style="website")
return generator.generate(lang), generator.languages
def render_html_from_html(article, lang):
html_url = [html
for html in article.htmls
if html['lang'] == lang]
try:
html_url = html_url[0]['url']
except IndexError:
raise ValueError('Artigo não encontrado') from None
result = fetch_data(use_ssm_url(html_url))
html = result.decode('utf8')
text_languages = [html['lang'] for html in article.htmls]
return html, text_languages
def render_html_abstract(article, lang):
abstract_text = ''
for abstract in article.abstracts:
if abstract['language'] == lang:
abstract_text = abstract["text"]
break
return abstract_text, article.abstract_languages
def render_html(article, lang, gs_abstract=False):
if article.xml:
return render_html_from_xml(article, lang, gs_abstract)
elif article.htmls:
if gs_abstract:
return render_html_abstract(article, lang)
return render_html_from_html(article, lang)
else:
# TODO: Corrigir os teste que esperam ter o atributo ``htmls``
# O ideal seria levantar um ValueError.
return '', []
# TODO: Remover assim que o valor Article.xml estiver consistente na base de
# dados
def use_ssm_url(url):
"""Normaliza a string `url` de acordo com os valores das diretivas de
configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT.
A normalização busca obter uma URL absoluta em função de uma relativa, ou
uma absoluta em função de uma absoluta, mas com as partes *scheme* e
*authority* trocadas pelas definidas nas diretivas citadas anteriormente.
Este código deve ser removido assim que o valor de Article.xml estiver
consistente, i.e., todos os registros possuirem apenas URLs absolutas.
"""
if url.startswith("http"):
parsed_url = urlparse(url)
return current_app.config["SSM_BASE_URI"] + parsed_url.path
else:
return current_app.config["SSM_BASE_URI"] + url
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/')
@main.route('/article/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail(url_seg, url_seg_issue, url_seg_article, lang_code=''):
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if article is None:
article = controllers.get_article_by_aop_url_segs(
issue.journal, url_seg_issue, url_seg_article
)
if article is None:
abort(404, _('Artigo não encontrado'))
req_params = {
"url_seg": article.journal.acronym,
"article_pid_v3": article.aid,
}
if lang_code:
req_params["lang"] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params))
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/')
@main.route('/j/<string:url_seg>/a/<string:article_pid_v3>/<string:part>/')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_v3(url_seg, article_pid_v3, part=None):
qs_lang = request.args.get('lang', type=str) or None
qs_goto = request.args.get('goto', type=str) or None
qs_stop = request.args.get('stop', type=str) or None
qs_format = request.args.get('format', 'html', type=str)
gs_abstract = (part == "abstract")
if part and not gs_abstract:
abort(404,
_("Não existe '{}'. No seu lugar use '{}'"
).format(part, 'abstract'))
try:
qs_lang, article = controllers.get_article(
article_pid_v3, url_seg, qs_lang, gs_abstract, qs_goto)
if qs_goto:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article.aid,
part=part,
format=qs_format,
lang=qs_lang,
stop=getattr(article, 'stop', None),
),
code=301
)
except (controllers.PreviousOrNextArticleNotFoundError) as e:
if gs_abstract:
abort(404, _('Resumo inexistente'))
abort(404, _('Artigo inexistente'))
except (controllers.ArticleNotFoundError,
controllers.ArticleJournalNotFoundError):
abort(404, _('Artigo não encontrado'))
except controllers.ArticleLangNotFoundError:
return redirect(
url_for(
'main.article_detail_v3',
url_seg=url_seg,
article_pid_v3=article_pid_v3,
format=qs_format,
),
code=301
)
except controllers.ArticleAbstractNotFoundError:
abort(404, _('Recurso não encontrado'))
except controllers.ArticleIsNotPublishedError as e:
abort(404, "{}{}".format(ARTICLE_UNPUBLISH, e))
except controllers.IssueIsNotPublishedError as e:
abort(404, "{}{}".format(ISSUE_UNPUBLISH, e))
except controllers.JournalIsNotPublishedError as e:
abort(404, "{}{}".format(JOURNAL_UNPUBLISH, e))
except ValueError as e:
abort(404, str(e))
def _handle_html():
citation_pdf_url = None
for pdf_data in article.pdfs:
if pdf_data.get("lang") == qs_lang:
citation_pdf_url = url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=qs_lang,
format="pdf",
)
break
website = request.url
if website:
parsed_url = urlparse(request.url)
if current_app.config["FORCE_USE_HTTPS_GOOGLE_TAGS"]:
website = "{}://{}".format('https', parsed_url.netloc)
else:
website = "{}://{}".format(parsed_url.scheme, parsed_url.netloc)
if citation_pdf_url:
citation_pdf_url = "{}{}".format(website, citation_pdf_url)
try:
html, text_languages = render_html(article, qs_lang, gs_abstract)
except (ValueError, NonRetryableError):
abort(404, _('HTML do Artigo não encontrado ou indisponível'))
except RetryableError:
abort(500, _('Erro inesperado'))
text_versions = sorted(
[
(
lang,
display_original_lang_name(lang),
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
lang=lang
)
)
for lang in text_languages
]
)
citation_xml_url = "{}{}".format(
website,
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article_pid_v3,
format="xml",
lang=article.original_language,
)
)
context = {
'next_article': qs_stop != 'next',
'previous_article': qs_stop != 'previous',
'article': article,
'journal': article.journal,
'issue': article.issue,
'html': html,
'citation_pdf_url': citation_pdf_url,
'citation_xml_url': citation_xml_url,
'article_lang': qs_lang,
'text_versions': text_versions,
'related_links': controllers.related_links(article),
'gs_abstract': gs_abstract,
'part': part,
}
return render_template("article/detail.html", **context)
def _handle_pdf():
if not article.pdfs:
abort(404, _('PDF do Artigo não encontrado'))
pdf_info = [pdf for pdf in article.pdfs if pdf['lang'] == qs_lang]
if len(pdf_info) != 1:
abort(404, _('PDF do Artigo não encontrado'))
try:
pdf_url = pdf_info[0]['url']
except (IndexError, KeyError, ValueError, TypeError):
abort(404, _('PDF do Artigo não encontrado'))
if pdf_url:
return get_pdf_content(pdf_url)
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
def _handle_xml():
if current_app.config["SSM_XML_URL_REWRITE"]:
result = fetch_data(use_ssm_url(article.xml))
else:
result = fetch_data(article.xml)
response = make_response(result)
response.headers['Content-Type'] = 'application/xml'
return response
if 'html' == qs_format:
return _handle_html()
elif 'pdf' == qs_format:
return _handle_pdf()
elif 'xml' == qs_format:
return _handle_xml()
else:
abort(400, _('Formato não suportado'))
@main.route('/readcube/epdf/')
@main.route('/readcube/epdf.php')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_epdf():
doi = request.args.get('doi', None, type=str)
pid = request.args.get('pid', None, type=str)
pdf_path = request.args.get('pdf_path', None, type=str)
lang = request.args.get('lang', None, type=str)
if not all([doi, pid, pdf_path, lang]):
abort(400, _('Parâmetros insuficientes para obter o EPDF do artigo'))
else:
context = {
'doi': doi,
'pid': pid,
'pdf_path': pdf_path,
'lang': lang,
}
return render_template("article/epdf.html", **context)
def get_pdf_content(url):
logger.debug("Get PDF: %s", url)
if current_app.config["SSM_ARTICLE_ASSETS_OR_RENDITIONS_URL_REWRITE"]:
url = use_ssm_url(url)
try:
response = fetch_data(url)
except NonRetryableError:
abort(404, _('PDF não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
mimetype, __ = mimetypes.guess_type(url)
return Response(response, mimetype=mimetype)
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def get_content_from_ssm(resource_ssm_media_path):
resource_ssm_full_url = current_app.config['SSM_BASE_URI'] + resource_ssm_media_path
url = resource_ssm_full_url.strip()
mimetype, __ = mimetypes.guess_type(url)
try:
ssm_response = fetch_data(url)
except NonRetryableError:
abort(404, _('Recurso não encontrado'))
except RetryableError:
abort(500, _('Erro inesperado'))
else:
return Response(ssm_response, mimetype=mimetype)
@main.route('/media/assets/<regex("(.*)"):relative_media_path>')
@cache.cached(key_prefix=cache_key_with_lang)
def media_assets_proxy(relative_media_path):
resource_ssm_path = '{ssm_media_path}{resource_path}'.format(
ssm_media_path=current_app.config['SSM_MEDIA_PATH'],
resource_path=relative_media_path)
return get_content_from_ssm(resource_ssm_path)
@main.route('/article/ssm/content/raw/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def article_ssm_content_raw():
resource_ssm_path = request.args.get('resource_ssm_path', None)
if not resource_ssm_path:
raise abort(404, _('Recurso do Artigo não encontrado. Caminho inválido!'))
else:
return get_content_from_ssm(resource_ssm_path)
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<string:url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>')
@main.route('/pdf/<string:url_seg>/<string:url_seg_issue>/<regex("(.*)"):url_seg_article>/<regex("(?:\w{2})"):lang_code>')
@cache.cached(key_prefix=cache_key_with_lang)
def article_detail_pdf(url_seg, url_seg_issue, url_seg_article, lang_code=''):
"""
Padrões esperados:
`/pdf/csc/2021.v26suppl1/2557-2558`
`/pdf/csc/2021.v26suppl1/2557-2558/en`
"""
if not lang_code and "." not in url_seg_issue:
return router_legacy_pdf(url_seg, url_seg_issue, url_seg_article)
issue = controllers.get_issue_by_url_seg(url_seg, url_seg_issue)
if not issue:
abort(404, _('Issue não encontrado'))
article = controllers.get_article_by_issue_article_seg(issue.iid, url_seg_article)
if not article:
abort(404, _('Artigo não encontrado'))
req_params = {
'url_seg': article.journal.url_segment,
'article_pid_v3': article.aid,
'format': 'pdf',
}
if lang_code:
req_params['lang'] = lang_code
return redirect(url_for('main.article_detail_v3', **req_params), code=301)
@main.route('/pdf/<string:journal_acron>/<string:issue_info>/<string:pdf_filename>.pdf')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_pdf(journal_acron, issue_info, pdf_filename):
pdf_filename = '%s.pdf' % pdf_filename
journal = controllers.get_journal_by_url_seg(journal_acron)
if not journal:
abort(404, _('Este PDF não existe em http://www.scielo.br. Consulte http://search.scielo.org'))
article = controllers.get_article_by_pdf_filename(
journal_acron, issue_info, pdf_filename)
if not article:
abort(404, _('PDF do artigo não foi encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
format='pdf',
lang=article._pdf_lang,
),
code=301
)
@main.route('/cgi-bin/fbpe/<string:text_or_abstract>/')
@cache.cached(key_prefix=cache_key_with_lang_with_qs)
def router_legacy_article(text_or_abstract):
pid = request.args.get('pid', None)
lng = request.args.get('lng', None)
if not (text_or_abstract in ['fbtext', 'fbabs'] and pid):
# se tem pid
abort(400, _('Requsição inválida ao tentar acessar o artigo com pid: %s' % pid))
article = controllers.get_article_by_pid_v1(pid)
if not article:
abort(404, _('Artigo não encontrado'))
return redirect(
url_for(
'main.article_detail_v3',
url_seg=article.journal.url_segment,
article_pid_v3=article.aid,
),
code=301
)
# ###############################E-mail share##################################
@main.route("/email_share_ajax/", methods=['POST'])
def email_share_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.EmailShareForm(request.form)
if form.validate():
recipients = [email.strip() for email in form.data['recipients'].split(';') if email.strip() != '']
sent, message = controllers.send_email_share(form.data['your_email'],
recipients,
form.data['share_url'],
form.data['subject'],
form.data['comment'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/form_mail/", methods=['GET'])
def email_form():
context = {'url': request.args.get('url')}
return render_template("email/email_form.html", **context)
@main.route("/email_error_ajax/", methods=['POST'])
def email_error_ajax():
if not request.is_xhr:
abort(400, _('Requisição inválida.'))
form = forms.ErrorForm(request.form)
if form.validate():
recipients = [email.strip() for email in current_app.config.get('EMAIL_ACCOUNTS_RECEIVE_ERRORS') if email.strip() != '']
sent, message = controllers.send_email_error(form.data['name'],
form.data['your_email'],
recipients,
form.data['url'],
form.data['error_type'],
form.data['message'],
form.data['page_title'])
return jsonify({'sent': sent, 'message': str(message),
'fields': [key for key in form.data.keys()]})
else:
return jsonify({'sent': False, 'message': form.errors,
'fields': [key for key in form.data.keys()]})
@main.route("/error_mail/", methods=['GET'])
def error_form():
context = {'url': request.args.get('url')}
return render_template("includes/error_form.html", **context)
# ###############################Others########################################
@main.route("/media/<path:filename>/", methods=['GET'])
@cache.cached(key_prefix=cache_key_with_lang)
def download_file_by_filename(filename):
media_root = current_app.config['MEDIA_ROOT']
return send_from_directory(media_root, filename)
@main.route("/img/scielo.gif", methods=['GET'])
def full_text_image():
return send_from_directory('static', 'img/full_text_scielo_img.gif')
@main.route("/robots.txt", methods=['GET'])
def get_robots_txt_file():
return send_from_directory('static', 'robots.txt')
@main.route("/revistas/<path:journal_seg>/<string:page>.htm", methods=['GET'])
def router_legacy_info_pages(journal_seg, page):
"""
Essa view function realiza o redirecionamento das URLs antigas para as novas URLs.
Mantém um dicionário como uma tabela relacionamento entre o nome das páginas que pode ser:
Página âncora
[iaboutj.htm, eaboutj.htm, paboutj.htm] -> #about
[iedboard.htm, eedboard.htm, pedboard.htm] -> #editors
[iinstruc.htm einstruc.htm, pinstruc.htm]-> #instructions
isubscrp.htm -> Sem âncora
"""
page_anchor = {
'iaboutj': '#about',
'eaboutj': '#about',
'paboutj': '#about',
'eedboard': '#editors',
'iedboard': '#editors',
'pedboard': '#editors',
'iinstruc': '#instructions',
'pinstruc': '#instructions',
'einstruc': '#instructions'
}
return redirect('%s%s' % (url_for('main.about_journal',
url_seg=journal_seg), page_anchor.get(page, '')), code=301)
@main.route("/api/v1/counter_dict", methods=['GET'])
def router_counter_dicts():
"""
Essa view function retorna um dicionário, em formato JSON, que mapeia PIDs a insumos
necessários para o funcionamento das aplicações Matomo & COUNTER & SUSHI.
"""
end_date = request.args.get('end_date', '', type=str)
try:
end_date = datetime.strptime(end_date, '%Y-%m-%d')
except ValueError:
end_date = datetime.now()
begin_date = end_date - timedelta(days=30)
page = request.args.get('page', type=int)
if not page:
page = 1
limit = request.args.get('limit', type=int)
if not limit or limit > 100 or limit < 0:
limit = 100
results = {'dictionary_date': end_date,
'end_date': end_date.strftime('%Y-%m-%d %H-%M-%S'),
'begin_date': begin_date.strftime('%Y-%m-%d %H-%M-%S'),
'documents': {},
'collection': current_app.config['OPAC_COLLECTION']}
articles = controllers.get_articles_by_date_range(begin_date, end_date, page, limit)
for a in articles.items:
results['documents'].update(get_article_counter_data(a))
results['total'] = articles.total
results['pages'] = articles.pages
results['limit'] = articles.per_page
results['page'] = articles.page
return jsonify(results)
def get_article_counter_data(article):
return {
article.aid: {
"journal_acronym": article.journal.acronym,
"pid": article.pid if article.pid else '',
"aop_pid": article.aop_pid if article.aop_pid else '',
"pid_v1": article.scielo_pids.get('v1', ''),
"pid_v2": article.scielo_pids.get('v2', ''),
"pid_v3": article.scielo_pids.get('v3', ''),
"publication_date": article.publication_date,
"default_language": article.original_language,
"create": article.created,
"update": article.updated
}
}
@main.route('/cgi-bin/wxis.exe/iah/')
def author_production():
# http://www.scielo.br/cgi-bin/wxis.exe/iah/
# ?IsisScript=iah/iah.xis&base=article%5Edlibrary&format=iso.pft&
# lang=p&nextAction=lnk&
# indexSearch=AU&exprSearch=MEIERHOFFER,+LILIAN+KOZSLOWSKI
# ->
# //search.scielo.org/?lang=pt&q=au:MEIERHOFFER,+LILIAN+KOZSLOWSKI
search_url = current_app.config.get('URL_SEARCH')
if not search_url:
abort(404, "URL_SEARCH: {}".format(_('Página não encontrada')))
qs_exprSearch = request.args.get('exprSearch', type=str) or ''
qs_indexSearch = request.args.get('indexSearch', type=str) or ''
qs_lang = request.args.get('lang', type=str) or ''
_lang = IAHX_LANGS.get(qs_lang) or ''
_lang = _lang and "lang={}".format(_lang)
_expr = "{}{}".format(
qs_indexSearch == "AU" and "au:" or '', qs_exprSearch)
_expr = _expr and "q={}".format(_expr.replace(" ", "+"))
_and = _lang and _expr and "&" or ''
_question_mark = (_lang or _expr) and "?" or ""
if search_url.startswith("//"):
protocol = "https:"
elif search_url.startswith("http"):
protocol = ""
else:
protocol = "https://"
url = "{}{}{}{}{}{}".format(
protocol, search_url, _question_mark, _lang, _and, _expr)
return redirect(url, code=301)
| pt | 0.817294 | # coding: utf-8 Erro recuperável sem que seja necessário modificar o estado dos dados na parte cliente, e.g., timeouts, erros advindos de particionamento de rede etc. Erro do qual não pode ser recuperado sem modificar o estado dos dados na parte cliente, e.g., recurso solicitado não exite, URI inválida etc. # discutir o que fazer aqui # Caso não seja possível detectar o idioma e não tenhamos a chave lang # no seção, fixamos o idioma padrão. # salvar o lang code na sessão Tenta retornar o idioma da seção, caso não consiga retorna BABEL_DEFAULT_LOCALE. # ##################################Collection################################### # caso seja uma página # caso não seja uma página é uma lista # ###################################Journal##################################### # se tem pelo menos um param: pid ou script_php # pid = issn # 'abstract' or None (not False, porque False converterá a string 'False') # accesso ao pdf do artigo: # todo: ajustar para que seja só noticias relacionadas ao periódico # Press releases # Lista de seções # Mantendo sempre o idioma inglês para as seções na página incial do periódico # o primiero item da lista é o último número. # condicional para verificar se issues contém itens # ######### TODO: Revisar ######### # ###################################Issue####################################### # idioma da sessão # A ordenação padrão da função ``get_issues_by_jid``: "-year", "-volume", "-order" # redireciona para `aop_toc` # idioma da sessão # seção dos documentos, se selecionada # obtém o issue # obtém o journal # completa url_segment do last_issue # goto_next_or_previous_issue (redireciona) # obtém os documentos # obtém TODAS as seções dos documentos deste sumário # obtém as seções dos documentos deste sumário # obtém somente os documentos da seção selecionada # obtém PDF e TEXT de cada documento # obtém a legenda bibliográfica # nao precisa redirecionar # o primeiro item da lista é o último número. # ######### TODO: Revisar ######### # ##################################Article###################################### # TODO: Corrigir os teste que esperam ter o atributo ``htmls`` # O ideal seria levantar um ValueError. # TODO: Remover assim que o valor Article.xml estiver consistente na base de # dados Normaliza a string `url` de acordo com os valores das diretivas de configuração OPAC_SSM_SCHEME, OPAC_SSM_DOMAIN e OPAC_SSM_PORT. A normalização busca obter uma URL absoluta em função de uma relativa, ou uma absoluta em função de uma absoluta, mas com as partes *scheme* e *authority* trocadas pelas definidas nas diretivas citadas anteriormente. Este código deve ser removido assim que o valor de Article.xml estiver consistente, i.e., todos os registros possuirem apenas URLs absolutas. Padrões esperados: `/pdf/csc/2021.v26suppl1/2557-2558` `/pdf/csc/2021.v26suppl1/2557-2558/en` # se tem pid # ###############################E-mail share################################## # ###############################Others######################################## Essa view function realiza o redirecionamento das URLs antigas para as novas URLs. Mantém um dicionário como uma tabela relacionamento entre o nome das páginas que pode ser: Página âncora [iaboutj.htm, eaboutj.htm, paboutj.htm] -> #about [iedboard.htm, eedboard.htm, pedboard.htm] -> #editors [iinstruc.htm einstruc.htm, pinstruc.htm]-> #instructions isubscrp.htm -> Sem âncora Essa view function retorna um dicionário, em formato JSON, que mapeia PIDs a insumos necessários para o funcionamento das aplicações Matomo & COUNTER & SUSHI. # http://www.scielo.br/cgi-bin/wxis.exe/iah/ # ?IsisScript=iah/iah.xis&base=article%5Edlibrary&format=iso.pft& # lang=p&nextAction=lnk& # indexSearch=AU&exprSearch=MEIERHOFFER,+LILIAN+KOZSLOWSKI # -> # //search.scielo.org/?lang=pt&q=au:MEIERHOFFER,+LILIAN+KOZSLOWSKI | 2.024308 | 2 |
create_read_write_1/Writing/to_csv.py | CodeXfull/Pandas | 0 | 9302 | """
Converter um DataFrame para CSV
"""
import pandas as pd
dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamão"],
"Nomes": ["Éverton", "Márcia"]},
index=["Linha 1", "Linha 2"])
dataset.to_csv("dataset.csv") | """
Converter um DataFrame para CSV
"""
import pandas as pd
dataset = pd.DataFrame({'Frutas': ["Abacaxi", "Mamão"],
"Nomes": ["Éverton", "Márcia"]},
index=["Linha 1", "Linha 2"])
dataset.to_csv("dataset.csv") | pt | 0.757614 | Converter um DataFrame para CSV | 3.387647 | 3 |
venv/Lib/site-packages/pygsheets/client.py | 13rilliant/Python-CMS | 1 | 9303 | <filename>venv/Lib/site-packages/pygsheets/client.py
# -*- coding: utf-8 -*-.
import re
import warnings
import os
import logging
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from pygsheets.spreadsheet import Spreadsheet
from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound
from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption
from google_auth_httplib2 import AuthorizedHttp
GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000
_url_key_re_v1 = re.compile(r'key=([^&#]+)')
_url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)")
_email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?")
# _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
_deprecated_keyword_mapping = {
'parent_id': 'folder',
}
class Client(object):
"""Create or access Google spreadsheets.
Exposes members to create new spreadsheets or open existing ones. Use `authorize` to instantiate an instance of this
class.
>>> import pygsheets
>>> c = pygsheets.authorize()
The sheet API service object is stored in the sheet property and the drive API service object in the drive property.
>>> c.sheet.get('<SPREADSHEET ID>')
>>> c.drive.delete('<FILE ID>')
:param credentials: The credentials object returned by google-auth or google-auth-oauthlib.
:param retries: (Optional) Number of times to retry a connection before raising a TimeOut error.
Default: 3
:param http: The underlying HTTP object to use to make requests. If not specified, a
:class:`httplib2.Http` instance will be constructed.
"""
spreadsheet_cls = Spreadsheet
def __init__(self, credentials, retries=3, http=None):
self.oauth = credentials
self.logger = logging.getLogger(__name__)
http = AuthorizedHttp(credentials, http=http)
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
self.sheet = SheetAPIWrapper(http, data_path, retries=retries)
self.drive = DriveAPIWrapper(http, data_path)
@property
def teamDriveId(self):
""" Enable team drive support
Deprecated: use client.drive.enable_team_drive(team_drive_id=?)
"""
return self.drive.team_drive_id
@teamDriveId.setter
def teamDriveId(self, value):
warnings.warn("Depricated please use drive.enable_team_drive")
self.drive.enable_team_drive(value)
def spreadsheet_ids(self, query=None):
"""Get a list of all spreadsheet ids present in the Google Drive or TeamDrive accessed."""
return [x['id'] for x in self.drive.spreadsheet_metadata(query)]
def spreadsheet_titles(self, query=None):
"""Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed."""
return [x['name'] for x in self.drive.spreadsheet_metadata(query)]
def create(self, title, template=None, folder=None, **kwargs):
"""Create a new spreadsheet.
The title will always be set to the given value (even overwriting the templates title). The template
can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_
or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored.
:param title: Title of the new spreadsheet.
:param template: A template to create the new spreadsheet from.
:param folder: The Id of the folder this sheet will be stored in.
:param kwargs: Standard parameters (see reference for details).
:return: :class:`~pygsheets.Spreadsheet`
"""
result = self.sheet.create(title, template=template, **kwargs)
if folder:
self.drive.move_file(result['spreadsheetId'],
old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0],
new_folder=folder)
return self.spreadsheet_cls(self, jsonsheet=result)
def open(self, title):
"""Open a spreadsheet by title.
In a case where there are several sheets with the same title, the first one found is returned.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open('TestSheet')
:param title: A title of a spreadsheet.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found.
"""
try:
spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0]
return self.open_by_key(spreadsheet['id'])
except (KeyError, IndexError):
raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title)
def open_by_key(self, key):
"""Open a spreadsheet by key.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_key('<KEY>')
:param key: The key of a spreadsheet. (can be found in the sheet URL)
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found.
"""
response = self.sheet.get(key,
fields='properties,sheets/properties,spreadsheetId,namedRanges',
includeGridData=False)
return self.spreadsheet_cls(self, response)
def open_by_url(self, url):
"""Open a spreadsheet by URL.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
:param url: URL of a spreadsheet as it appears in a browser.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL.
"""
m1 = _url_key_re_v1.search(url)
if m1:
return self.open_by_key(m1.group(1))
else:
m2 = _url_key_re_v2.search(url)
if m2:
return self.open_by_key(m2.group(1))
else:
raise NoValidUrlKeyFound
def open_all(self, query=''):
"""Opens all available spreadsheets.
Result can be filtered when specifying the query parameter. On the details on how to form the query:
`Reference <https://developers.google.com/drive/v3/web/search-parameters>`_
:param query: (Optional) Can be used to filter the returned metadata.
:returns: A list of :class:`~pygsheets.Spreadsheet`.
"""
return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)]
def open_as_json(self, key):
"""Return a json representation of the spreadsheet.
See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details.
"""
return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,'
'spreadsheetId,namedRanges',
includeGridData=False)
def get_range(self, spreadsheet_id,
value_range,
major_dimension='ROWS',
value_render_option=ValueRenderOption.FORMATTED_VALUE,
date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER):
"""Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__
:param spreadsheet_id: The ID of the spreadsheet to retrieve data from.
:param value_range: The A1 notation of the values to retrieve.
:param major_dimension: The major dimension that results should use.
For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then
requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]],
whereas requesting range=A1:B2,majorDimension=COLUMNS will return
[[1,3],[2,4]].
:param value_render_option: How values should be represented in the output. The default
render option is `ValueRenderOption.FORMATTED_VALUE`.
:param date_time_render_option: How dates, times, and durations should be represented in the output.
This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default
dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`].
:return: An array of arrays with the values fetched. Returns an empty array if no
values were fetched. Values are dynamically typed as int, float or string.
"""
result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option,
date_time_render_option)
try:
return result['values']
except KeyError:
return [['']] | <filename>venv/Lib/site-packages/pygsheets/client.py
# -*- coding: utf-8 -*-.
import re
import warnings
import os
import logging
from pygsheets.drive import DriveAPIWrapper
from pygsheets.sheet import SheetAPIWrapper
from pygsheets.spreadsheet import Spreadsheet
from pygsheets.exceptions import SpreadsheetNotFound, NoValidUrlKeyFound
from pygsheets.custom_types import ValueRenderOption, DateTimeRenderOption
from google_auth_httplib2 import AuthorizedHttp
GOOGLE_SHEET_CELL_UPDATES_LIMIT = 50000
_url_key_re_v1 = re.compile(r'key=([^&#]+)')
_url_key_re_v2 = re.compile(r"/spreadsheets/d/([a-zA-Z0-9-_]+)")
_email_patttern = re.compile(r"\"?([-a-zA-Z0-9.`?{}]+@[-a-zA-Z0-9.]+\.\w+)\"?")
# _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
_deprecated_keyword_mapping = {
'parent_id': 'folder',
}
class Client(object):
"""Create or access Google spreadsheets.
Exposes members to create new spreadsheets or open existing ones. Use `authorize` to instantiate an instance of this
class.
>>> import pygsheets
>>> c = pygsheets.authorize()
The sheet API service object is stored in the sheet property and the drive API service object in the drive property.
>>> c.sheet.get('<SPREADSHEET ID>')
>>> c.drive.delete('<FILE ID>')
:param credentials: The credentials object returned by google-auth or google-auth-oauthlib.
:param retries: (Optional) Number of times to retry a connection before raising a TimeOut error.
Default: 3
:param http: The underlying HTTP object to use to make requests. If not specified, a
:class:`httplib2.Http` instance will be constructed.
"""
spreadsheet_cls = Spreadsheet
def __init__(self, credentials, retries=3, http=None):
self.oauth = credentials
self.logger = logging.getLogger(__name__)
http = AuthorizedHttp(credentials, http=http)
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
self.sheet = SheetAPIWrapper(http, data_path, retries=retries)
self.drive = DriveAPIWrapper(http, data_path)
@property
def teamDriveId(self):
""" Enable team drive support
Deprecated: use client.drive.enable_team_drive(team_drive_id=?)
"""
return self.drive.team_drive_id
@teamDriveId.setter
def teamDriveId(self, value):
warnings.warn("Depricated please use drive.enable_team_drive")
self.drive.enable_team_drive(value)
def spreadsheet_ids(self, query=None):
"""Get a list of all spreadsheet ids present in the Google Drive or TeamDrive accessed."""
return [x['id'] for x in self.drive.spreadsheet_metadata(query)]
def spreadsheet_titles(self, query=None):
"""Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed."""
return [x['name'] for x in self.drive.spreadsheet_metadata(query)]
def create(self, title, template=None, folder=None, **kwargs):
"""Create a new spreadsheet.
The title will always be set to the given value (even overwriting the templates title). The template
can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_
or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored.
:param title: Title of the new spreadsheet.
:param template: A template to create the new spreadsheet from.
:param folder: The Id of the folder this sheet will be stored in.
:param kwargs: Standard parameters (see reference for details).
:return: :class:`~pygsheets.Spreadsheet`
"""
result = self.sheet.create(title, template=template, **kwargs)
if folder:
self.drive.move_file(result['spreadsheetId'],
old_folder=self.drive.spreadsheet_metadata(query="name = '" + title + "'")[0]['parents'][0],
new_folder=folder)
return self.spreadsheet_cls(self, jsonsheet=result)
def open(self, title):
"""Open a spreadsheet by title.
In a case where there are several sheets with the same title, the first one found is returned.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open('TestSheet')
:param title: A title of a spreadsheet.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found.
"""
try:
spreadsheet = list(filter(lambda x: x['name'] == title, self.drive.spreadsheet_metadata()))[0]
return self.open_by_key(spreadsheet['id'])
except (KeyError, IndexError):
raise SpreadsheetNotFound('Could not find a spreadsheet with title %s.' % title)
def open_by_key(self, key):
"""Open a spreadsheet by key.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_key('<KEY>')
:param key: The key of a spreadsheet. (can be found in the sheet URL)
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found.
"""
response = self.sheet.get(key,
fields='properties,sheets/properties,spreadsheetId,namedRanges',
includeGridData=False)
return self.spreadsheet_cls(self, response)
def open_by_url(self, url):
"""Open a spreadsheet by URL.
>>> import pygsheets
>>> c = pygsheets.authorize()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
:param url: URL of a spreadsheet as it appears in a browser.
:returns: :class:`~pygsheets.Spreadsheet`
:raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL.
"""
m1 = _url_key_re_v1.search(url)
if m1:
return self.open_by_key(m1.group(1))
else:
m2 = _url_key_re_v2.search(url)
if m2:
return self.open_by_key(m2.group(1))
else:
raise NoValidUrlKeyFound
def open_all(self, query=''):
"""Opens all available spreadsheets.
Result can be filtered when specifying the query parameter. On the details on how to form the query:
`Reference <https://developers.google.com/drive/v3/web/search-parameters>`_
:param query: (Optional) Can be used to filter the returned metadata.
:returns: A list of :class:`~pygsheets.Spreadsheet`.
"""
return [self.open_by_key(key) for key in self.spreadsheet_ids(query=query)]
def open_as_json(self, key):
"""Return a json representation of the spreadsheet.
See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details.
"""
return self.sheet.get(key, fields='properties,sheets/properties,sheets/protectedRanges,'
'spreadsheetId,namedRanges',
includeGridData=False)
def get_range(self, spreadsheet_id,
value_range,
major_dimension='ROWS',
value_render_option=ValueRenderOption.FORMATTED_VALUE,
date_time_render_option=DateTimeRenderOption.SERIAL_NUMBER):
"""Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range.
Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__
:param spreadsheet_id: The ID of the spreadsheet to retrieve data from.
:param value_range: The A1 notation of the values to retrieve.
:param major_dimension: The major dimension that results should use.
For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then
requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]],
whereas requesting range=A1:B2,majorDimension=COLUMNS will return
[[1,3],[2,4]].
:param value_render_option: How values should be represented in the output. The default
render option is `ValueRenderOption.FORMATTED_VALUE`.
:param date_time_render_option: How dates, times, and durations should be represented in the output.
This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default
dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`].
:return: An array of arrays with the values fetched. Returns an empty array if no
values were fetched. Values are dynamically typed as int, float or string.
"""
result = self.sheet.values_get(spreadsheet_id, value_range, major_dimension, value_render_option,
date_time_render_option)
try:
return result['values']
except KeyError:
return [['']] | en | 0.695434 | # -*- coding: utf-8 -*-. #]+)') # _domain_pattern = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) Create or access Google spreadsheets. Exposes members to create new spreadsheets or open existing ones. Use `authorize` to instantiate an instance of this class. >>> import pygsheets >>> c = pygsheets.authorize() The sheet API service object is stored in the sheet property and the drive API service object in the drive property. >>> c.sheet.get('<SPREADSHEET ID>') >>> c.drive.delete('<FILE ID>') :param credentials: The credentials object returned by google-auth or google-auth-oauthlib. :param retries: (Optional) Number of times to retry a connection before raising a TimeOut error. Default: 3 :param http: The underlying HTTP object to use to make requests. If not specified, a :class:`httplib2.Http` instance will be constructed. Enable team drive support Deprecated: use client.drive.enable_team_drive(team_drive_id=?) Get a list of all spreadsheet ids present in the Google Drive or TeamDrive accessed. Get a list of all spreadsheet titles present in the Google Drive or TeamDrive accessed. Create a new spreadsheet. The title will always be set to the given value (even overwriting the templates title). The template can either be a `spreadsheet resource <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#resource-spreadsheet>`_ or an instance of :class:`~pygsheets.Spreadsheet`. In both cases undefined values will be ignored. :param title: Title of the new spreadsheet. :param template: A template to create the new spreadsheet from. :param folder: The Id of the folder this sheet will be stored in. :param kwargs: Standard parameters (see reference for details). :return: :class:`~pygsheets.Spreadsheet` Open a spreadsheet by title. In a case where there are several sheets with the same title, the first one found is returned. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open('TestSheet') :param title: A title of a spreadsheet. :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: No spreadsheet with the given title was found. Open a spreadsheet by key. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open_by_key('<KEY>') :param key: The key of a spreadsheet. (can be found in the sheet URL) :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: The given spreadsheet ID was not found. Open a spreadsheet by URL. >>> import pygsheets >>> c = pygsheets.authorize() >>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl') :param url: URL of a spreadsheet as it appears in a browser. :returns: :class:`~pygsheets.Spreadsheet` :raises pygsheets.SpreadsheetNotFound: No spreadsheet was found with the given URL. Opens all available spreadsheets. Result can be filtered when specifying the query parameter. On the details on how to form the query: `Reference <https://developers.google.com/drive/v3/web/search-parameters>`_ :param query: (Optional) Can be used to filter the returned metadata. :returns: A list of :class:`~pygsheets.Spreadsheet`. Return a json representation of the spreadsheet. See `Reference <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet>`__ for details. Returns a range of values from a spreadsheet. The caller must specify the spreadsheet ID and a range. Reference: `request <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get>`__ :param spreadsheet_id: The ID of the spreadsheet to retrieve data from. :param value_range: The A1 notation of the values to retrieve. :param major_dimension: The major dimension that results should use. For example, if the spreadsheet data is: A1=1,B1=2,A2=3,B2=4, then requesting range=A1:B2,majorDimension=ROWS will return [[1,2],[3,4]], whereas requesting range=A1:B2,majorDimension=COLUMNS will return [[1,3],[2,4]]. :param value_render_option: How values should be represented in the output. The default render option is `ValueRenderOption.FORMATTED_VALUE`. :param date_time_render_option: How dates, times, and durations should be represented in the output. This is ignored if `valueRenderOption` is `FORMATTED_VALUE`. The default dateTime render option is [`DateTimeRenderOption.SERIAL_NUMBER`]. :return: An array of arrays with the values fetched. Returns an empty array if no values were fetched. Values are dynamically typed as int, float or string. | 2.441757 | 2 |
model/group_contact.py | NatalyAristova/Training_python | 0 | 9304 | from sys import maxsize
class Group_contact:
def __init__(self,firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None,
address=None, home=None, mobile=None, work=None, fax=None, email=None, email2=None, email3=None, byear=None,
address2=None, phone2=None, notes=None, all_phones_from_home_page=None, id=None, all_emails_from_home_page=None):
self.firstname=firstname
self.middlename=middlename
self.lastname=lastname
self.nickname=nickname
self.title=title
self.company=company
self.address=address
self.home=home
self.mobile=mobile
self.work=work
self.fax=fax
self.email=email
self.email2 = email2
self.email3 = email3
self.byear=byear
self.address2=address2
self.phone2=phone2
self.notes=notes
self.id = id
self.all_phones_from_home_page=all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.middlename, self.nickname, self.title)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and (self.lastname, self.firstname) == (other.lastname, other.firstname)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | from sys import maxsize
class Group_contact:
def __init__(self,firstname=None, middlename=None, lastname=None, nickname=None, title=None, company=None,
address=None, home=None, mobile=None, work=None, fax=None, email=None, email2=None, email3=None, byear=None,
address2=None, phone2=None, notes=None, all_phones_from_home_page=None, id=None, all_emails_from_home_page=None):
self.firstname=firstname
self.middlename=middlename
self.lastname=lastname
self.nickname=nickname
self.title=title
self.company=company
self.address=address
self.home=home
self.mobile=mobile
self.work=work
self.fax=fax
self.email=email
self.email2 = email2
self.email3 = email3
self.byear=byear
self.address2=address2
self.phone2=phone2
self.notes=notes
self.id = id
self.all_phones_from_home_page=all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.middlename, self.nickname, self.title)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and (self.lastname, self.firstname) == (other.lastname, other.firstname)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | none | 1 | 2.966517 | 3 |
|
test/manual/documents/test_iter_documents.py | membranepotential/mendeley-python-sdk | 103 | 9305 | <gh_stars>100-1000
from itertools import islice
from test import get_user_session, cassette
from test.resources.documents import delete_all_documents, create_document
def test_should_iterate_through_documents():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/documents/iter_documents/iterate_through_documents.yaml'):
create_document(session, 'title 1')
create_document(session, 'title 2')
create_document(session, 'title 3')
docs = list(islice(session.documents.iter(page_size=2), 3))
assert len(docs) == 3
assert docs[0].title == 'title 1'
assert docs[1].title == 'title 2'
assert docs[2].title == 'title 3'
| from itertools import islice
from test import get_user_session, cassette
from test.resources.documents import delete_all_documents, create_document
def test_should_iterate_through_documents():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/documents/iter_documents/iterate_through_documents.yaml'):
create_document(session, 'title 1')
create_document(session, 'title 2')
create_document(session, 'title 3')
docs = list(islice(session.documents.iter(page_size=2), 3))
assert len(docs) == 3
assert docs[0].title == 'title 1'
assert docs[1].title == 'title 2'
assert docs[2].title == 'title 3' | none | 1 | 2.289405 | 2 |
|
demo.py | cbsudux/minimal-hand | 0 | 9306 | <reponame>cbsudux/minimal-hand<filename>demo.py
import argparse
import cv2
import keyboard
import numpy as np
import open3d as o3d
import os
import pygame
from transforms3d.axangles import axangle2mat
import config
from hand_mesh import HandMesh
from kinematics import mpii_to_mano
from utils import OneEuroFilter, imresize
from wrappers import ModelPipeline
from utils import *
def video_to_images(vid_file, img_folder=None, return_info=False):
if img_folder is None:
img_folder = osp.join('/tmp', osp.basename(vid_file).replace('.', '_'))
os.makedirs(img_folder, exist_ok=True)
command = ['ffmpeg',
'-i', vid_file,
'-f', 'image2',
'-v', 'error',
f'{img_folder}/%06d.png']
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
print(f'Images saved to \"{img_folder}\"')
img_shape = cv2.imread(osp.join(img_folder, '000001.png')).shape
if return_info:
return img_folder, len(os.listdir(img_folder)), img_shape
else:
return img_folder
def run(args):
############ output visualization ############
# view_mat = axangle2mat([1, 0, 0], np.pi) # align different coordinate systems
# window_size = 1080
# hand_mesh = HandMesh(config.HAND_MESH_MODEL_PATH)
# mesh = o3d.geometry.TriangleMesh()
# mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
# mesh.vertices = \
# o3d.utility.Vector3dVector(np.matmul(view_mat, hand_mesh.verts.T).T * 1000)
# mesh.compute_vertex_normals()
# viewer = o3d.visualization.Visualizer()
# viewer.create_window(
# width=window_size + 1, height=window_size + 1,
# window_name='Minimal Hand - output'
# )
# viewer.add_geometry(mesh)
# view_control = viewer.get_view_control()
# cam_params = view_control.convert_to_pinhole_camera_parameters()
# extrinsic = cam_params.extrinsic.copy()
# extrinsic[0:3, 3] = 0
# cam_params.extrinsic = extrinsic
# cam_params.intrinsic.set_intrinsics(
# window_size + 1, window_size + 1, config.CAM_FX, config.CAM_FY,
# window_size // 2, window_size // 2
# )
# view_control.convert_from_pinhole_camera_parameters(cam_params)
# view_control.set_constant_z_far(1000)
# render_option = viewer.get_render_option()
# render_option.load_from_json('./render_option.json')
# viewer.update_renderer()
# ############ input visualization ############
# pygame.init()
# display = pygame.display.set_mode((window_size, window_size))
# pygame.display.set_caption('Minimal Hand - input')
# ############ misc ############
# mesh_smoother = OneEuroFilter(4.0, 0.0)
# clock = pygame.time.Clock()
############ Move all of above code to local to render ###########
video_file = args.vid_file
if not os.path.isfile(video_file):
exit(f'Input video \"{video_file}\" does not exist!')
output_path = os.path.join(args.output_folder, os.path.basename(video_file).replace('.mp4', ''))
os.makedirs(output_path, exist_ok=True)
image_folder, num_frames, img_shape = video_to_images(video_file, return_info=True)
print(f'Input video number of frames {num_frames}')
orig_height, orig_width = img_shape[:2]
# total_time = time.time()
import pdb; pdb.set_trace()
image_file_names = [
osp.join(image_folder, x)
for x in os.listdir(image_folder)
if x.endswith('.png') or x.endswith('.jpg')
]
model = ModelPipeline()
for i in image_file_names:
# What do all these conditions check for?
frame_large = x
if frame_large is None:
continue
if frame_large.shape[0] > frame_large.shape[1]:
margin = int((frame_large.shape[0] - frame_large.shape[1]) / 2)
frame_large = frame_large[margin:-margin]
else:
margin = int((frame_large.shape[1] - frame_large.shape[0]) / 2)
frame_large = frame_large[:, margin:-margin]
frame_large = np.flip(frame_large, axis=1).copy() # why? Camera flip?
frame = imresize(frame_large, (128, 128)) # needed
######## Golden lines, run this here #########
_, theta_mpii = model.process(frame)
theta_mano = mpii_to_mano(theta_mpii)
######## Save theta_mano and pass as input to local ########
v = hand_mesh.set_abs_quat(theta_mano)
v *= 2 # for better visualization
v = v * 1000 + np.array([0, 0, 400])
v = mesh_smoother.process(v)
mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
mesh.vertices = o3d.utility.Vector3dVector(np.matmul(view_mat, v.T).T)
mesh.paint_uniform_color(config.HAND_COLOR)
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
# for some version of open3d you may need `viewer.update_geometry(mesh)`
viewer.update_geometry()
viewer.poll_events()
display.blit(
pygame.surfarray.make_surface(
np.transpose(
imresize(frame_large, (window_size, window_size)
), (1, 0, 2))
),
(0, 0)
)
pygame.display.update()
if keyboard.is_pressed("esc"):
break
clock.tick(30) # What's this do? If it adds delay remove it
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--vid_file', type=str,
help='input video path or youtube link')
args = parser.parse_args()
run(args)
| import argparse
import cv2
import keyboard
import numpy as np
import open3d as o3d
import os
import pygame
from transforms3d.axangles import axangle2mat
import config
from hand_mesh import HandMesh
from kinematics import mpii_to_mano
from utils import OneEuroFilter, imresize
from wrappers import ModelPipeline
from utils import *
def video_to_images(vid_file, img_folder=None, return_info=False):
if img_folder is None:
img_folder = osp.join('/tmp', osp.basename(vid_file).replace('.', '_'))
os.makedirs(img_folder, exist_ok=True)
command = ['ffmpeg',
'-i', vid_file,
'-f', 'image2',
'-v', 'error',
f'{img_folder}/%06d.png']
print(f'Running \"{" ".join(command)}\"')
subprocess.call(command)
print(f'Images saved to \"{img_folder}\"')
img_shape = cv2.imread(osp.join(img_folder, '000001.png')).shape
if return_info:
return img_folder, len(os.listdir(img_folder)), img_shape
else:
return img_folder
def run(args):
############ output visualization ############
# view_mat = axangle2mat([1, 0, 0], np.pi) # align different coordinate systems
# window_size = 1080
# hand_mesh = HandMesh(config.HAND_MESH_MODEL_PATH)
# mesh = o3d.geometry.TriangleMesh()
# mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
# mesh.vertices = \
# o3d.utility.Vector3dVector(np.matmul(view_mat, hand_mesh.verts.T).T * 1000)
# mesh.compute_vertex_normals()
# viewer = o3d.visualization.Visualizer()
# viewer.create_window(
# width=window_size + 1, height=window_size + 1,
# window_name='Minimal Hand - output'
# )
# viewer.add_geometry(mesh)
# view_control = viewer.get_view_control()
# cam_params = view_control.convert_to_pinhole_camera_parameters()
# extrinsic = cam_params.extrinsic.copy()
# extrinsic[0:3, 3] = 0
# cam_params.extrinsic = extrinsic
# cam_params.intrinsic.set_intrinsics(
# window_size + 1, window_size + 1, config.CAM_FX, config.CAM_FY,
# window_size // 2, window_size // 2
# )
# view_control.convert_from_pinhole_camera_parameters(cam_params)
# view_control.set_constant_z_far(1000)
# render_option = viewer.get_render_option()
# render_option.load_from_json('./render_option.json')
# viewer.update_renderer()
# ############ input visualization ############
# pygame.init()
# display = pygame.display.set_mode((window_size, window_size))
# pygame.display.set_caption('Minimal Hand - input')
# ############ misc ############
# mesh_smoother = OneEuroFilter(4.0, 0.0)
# clock = pygame.time.Clock()
############ Move all of above code to local to render ###########
video_file = args.vid_file
if not os.path.isfile(video_file):
exit(f'Input video \"{video_file}\" does not exist!')
output_path = os.path.join(args.output_folder, os.path.basename(video_file).replace('.mp4', ''))
os.makedirs(output_path, exist_ok=True)
image_folder, num_frames, img_shape = video_to_images(video_file, return_info=True)
print(f'Input video number of frames {num_frames}')
orig_height, orig_width = img_shape[:2]
# total_time = time.time()
import pdb; pdb.set_trace()
image_file_names = [
osp.join(image_folder, x)
for x in os.listdir(image_folder)
if x.endswith('.png') or x.endswith('.jpg')
]
model = ModelPipeline()
for i in image_file_names:
# What do all these conditions check for?
frame_large = x
if frame_large is None:
continue
if frame_large.shape[0] > frame_large.shape[1]:
margin = int((frame_large.shape[0] - frame_large.shape[1]) / 2)
frame_large = frame_large[margin:-margin]
else:
margin = int((frame_large.shape[1] - frame_large.shape[0]) / 2)
frame_large = frame_large[:, margin:-margin]
frame_large = np.flip(frame_large, axis=1).copy() # why? Camera flip?
frame = imresize(frame_large, (128, 128)) # needed
######## Golden lines, run this here #########
_, theta_mpii = model.process(frame)
theta_mano = mpii_to_mano(theta_mpii)
######## Save theta_mano and pass as input to local ########
v = hand_mesh.set_abs_quat(theta_mano)
v *= 2 # for better visualization
v = v * 1000 + np.array([0, 0, 400])
v = mesh_smoother.process(v)
mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces)
mesh.vertices = o3d.utility.Vector3dVector(np.matmul(view_mat, v.T).T)
mesh.paint_uniform_color(config.HAND_COLOR)
mesh.compute_triangle_normals()
mesh.compute_vertex_normals()
# for some version of open3d you may need `viewer.update_geometry(mesh)`
viewer.update_geometry()
viewer.poll_events()
display.blit(
pygame.surfarray.make_surface(
np.transpose(
imresize(frame_large, (window_size, window_size)
), (1, 0, 2))
),
(0, 0)
)
pygame.display.update()
if keyboard.is_pressed("esc"):
break
clock.tick(30) # What's this do? If it adds delay remove it
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--vid_file', type=str,
help='input video path or youtube link')
args = parser.parse_args()
run(args) | en | 0.344005 | ############ output visualization ############ # view_mat = axangle2mat([1, 0, 0], np.pi) # align different coordinate systems # window_size = 1080 # hand_mesh = HandMesh(config.HAND_MESH_MODEL_PATH) # mesh = o3d.geometry.TriangleMesh() # mesh.triangles = o3d.utility.Vector3iVector(hand_mesh.faces) # mesh.vertices = \ # o3d.utility.Vector3dVector(np.matmul(view_mat, hand_mesh.verts.T).T * 1000) # mesh.compute_vertex_normals() # viewer = o3d.visualization.Visualizer() # viewer.create_window( # width=window_size + 1, height=window_size + 1, # window_name='Minimal Hand - output' # ) # viewer.add_geometry(mesh) # view_control = viewer.get_view_control() # cam_params = view_control.convert_to_pinhole_camera_parameters() # extrinsic = cam_params.extrinsic.copy() # extrinsic[0:3, 3] = 0 # cam_params.extrinsic = extrinsic # cam_params.intrinsic.set_intrinsics( # window_size + 1, window_size + 1, config.CAM_FX, config.CAM_FY, # window_size // 2, window_size // 2 # ) # view_control.convert_from_pinhole_camera_parameters(cam_params) # view_control.set_constant_z_far(1000) # render_option = viewer.get_render_option() # render_option.load_from_json('./render_option.json') # viewer.update_renderer() # ############ input visualization ############ # pygame.init() # display = pygame.display.set_mode((window_size, window_size)) # pygame.display.set_caption('Minimal Hand - input') # ############ misc ############ # mesh_smoother = OneEuroFilter(4.0, 0.0) # clock = pygame.time.Clock() ############ Move all of above code to local to render ########### # total_time = time.time() # What do all these conditions check for? # why? Camera flip? # needed ######## Golden lines, run this here ######### ######## Save theta_mano and pass as input to local ######## # for better visualization # for some version of open3d you may need `viewer.update_geometry(mesh)` # What's this do? If it adds delay remove it | 2.273765 | 2 |
test_project/settings.py | incuna/incuna-groups | 1 | 9307 | import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = []
ROOT_URLCONF = 'groups.tests.urls'
STATIC_URL = '/static/'
SECRET_KEY = '<KEY>'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost/groups')
}
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
INSTALLED_APPS = (
'groups',
'crispy_forms',
'pagination',
'polymorphic',
# Put contenttypes before auth to work around test issue.
# See: https://code.djangoproject.com/ticket/10827#comment:12
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'groups', 'tests', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
TEST_RUNNER = 'test_project.test_runner.Runner'
| import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
ALLOWED_HOSTS = []
ROOT_URLCONF = 'groups.tests.urls'
STATIC_URL = '/static/'
SECRET_KEY = '<KEY>'
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost/groups')
}
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
INSTALLED_APPS = (
'groups',
'crispy_forms',
'pagination',
'polymorphic',
# Put contenttypes before auth to work around test issue.
# See: https://code.djangoproject.com/ticket/10827#comment:12
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'groups', 'tests', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CRISPY_TEMPLATE_PACK = 'bootstrap3'
TEST_RUNNER = 'test_project.test_runner.Runner'
| en | 0.846381 | # Put contenttypes before auth to work around test issue. # See: https://code.djangoproject.com/ticket/10827#comment:12 | 1.743116 | 2 |
tests/test_akismet.py | cclauss/akismet | 9 | 9308 | <gh_stars>1-10
import datetime
import os
import sys
import unittest
from unittest import mock
import akismet
class AkismetTests(unittest.TestCase):
api_key = os.getenv("TEST_AKISMET_API_KEY")
blog_url = os.getenv("TEST_AKISMET_BLOG_URL")
api_key_env_var = "PYTHON_AKISMET_API_KEY"
blog_url_env_var = "PYTHON_AKISMET_BLOG_URL"
def setUp(self):
self.api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
class AkismetConfigurationTests(AkismetTests):
"""
Tests configuration of the Akismet class.
"""
def test_config_from_args(self):
"""
Configuring via explicit arguments succeeds.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
def test_bad_config_args(self):
"""
Configuring with bad arguments fails.
"""
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet(key="invalid", blog_url="http://invalid")
def test_config_from_env(self):
"""
Configuring via environment variables succeeds.
"""
try:
os.environ[self.api_key_env_var] = self.api_key
os.environ[self.blog_url_env_var] = self.blog_url
api = akismet.Akismet(key=None, blog_url=None)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
api = akismet.Akismet()
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_config_env(self):
"""
Configuring with bad environment variables fails.
"""
try:
os.environ[self.api_key_env_var] = "invalid"
os.environ[self.blog_url_env_var] = "http://invalid"
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet()
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_url(self):
"""
Configuring with a bad URL fails.
"""
bad_urls = (
"example.com",
"ftp://example.com",
"www.example.com",
"http//example.com",
"https//example.com",
)
for url in bad_urls:
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=self.api_key, blog_url=url)
def test_missing_config(self):
"""
Instantiating without any configuration fails.
"""
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=None, blog_url=None)
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet()
def test_user_agent(self):
"""
The Akismet class creates the correct user-agent string.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
expected_agent = "Python/{} | akismet.py/{}".format(
"{}.{}".format(*sys.version_info[:2]), akismet.__version__
)
self.assertEqual(expected_agent, api.user_agent_header["User-Agent"])
class AkismetAPITests(AkismetTests):
"""
Tests implementation of the Akismet API.
"""
base_kwargs = {
"user_ip": "127.0.0.1",
"user_agent": "Mozilla",
# Always send this when testing; Akismet recognizes it as a
# test query and does not train/learn from it.
"is_test": 1,
}
def test_verify_key_valid(self):
"""
The verify_key operation succeeds with a valid key and URL.
"""
self.assertTrue(akismet.Akismet.verify_key(self.api_key, self.blog_url))
def test_verify_key_invalid(self):
"""
The verify_key operation fails with an invalid key and URL.
"""
self.assertFalse(akismet.Akismet.verify_key("invalid", "http://invalid"))
def test_comment_check_spam(self):
"""
The comment_check method correctly identifies spam.
"""
check_kwargs = {
# Akismet guarantees this will be classified spam.
"comment_author": "<PASSWORD>",
**self.base_kwargs,
}
self.assertTrue(self.api.comment_check(**check_kwargs))
def test_comment_check_not_spam(self):
"""
The comment_check method correctly identifies non-spam.
"""
check_kwargs = {
# Akismet guarantees this will not be classified spam.
"user_role": "administrator",
**self.base_kwargs,
}
self.assertFalse(self.api.comment_check(**check_kwargs))
def test_submit_spam(self):
"""
The submit_spam method succeeds.
"""
spam_kwargs = {
"comment_type": "comment",
"comment_author": "<PASSWORD>",
"comment_content": "<PASSWORD>",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_spam(**spam_kwargs))
def test_submit_ham(self):
"""
The submit_ham method succeeds.
"""
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_ham(**ham_kwargs))
def test_unexpected_verify_key_response(self):
"""
Unexpected verify_key API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
def test_unexpected_comment_check_response(self):
"""
Unexpected comment_check API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
check_kwargs = {"comment_author": "<PASSWORD>", **self.base_kwargs}
self.api.comment_check(**check_kwargs)
def test_unexpected_submit_spam_response(self):
"""
Unexpected submit_spam API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
spam_kwargs = {
"comment_type": "comment",
"comment_author": "<PASSWORD>",
"comment_content": "viagra-test-123",
**self.base_kwargs,
}
self.api.submit_spam(**spam_kwargs)
def test_unexpected_submit_ham_response(self):
"""
Unexpected submit_ham API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.api.submit_ham(**ham_kwargs)
class AkismetRequestTests(AkismetTests):
"""
Tests the requests constructed by the Akismet class.
"""
def _get_mock(self, text):
"""
Create a mock for requests.post() returning expected text.
"""
post_mock = mock.MagicMock()
post_mock.return_value.text = text
return post_mock
def _mock_request(self, method, endpoint, text, method_kwargs):
"""
Issue a mocked request and verify requests.post() was called
with the correct arguments.
"""
method_kwargs.update(user_ip="127.0.0.1", user_agent="Mozilla", is_test=1)
expected_kwargs = {"blog": self.blog_url, **method_kwargs}
post_mock = self._get_mock(text)
with mock.patch("requests.post", post_mock):
getattr(self.api, method)(**method_kwargs)
post_mock.assert_called_with(
endpoint.format(self.api_key),
data=expected_kwargs,
headers=akismet.Akismet.user_agent_header,
)
def test_verify_key(self):
"""
The request issued by verify_key() is correct.
"""
post_mock = self._get_mock("valid")
with mock.patch("requests.post", post_mock):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
post_mock.assert_called_with(
akismet.Akismet.VERIFY_KEY_URL,
data={"key": self.api_key, "blog": self.blog_url},
headers=akismet.Akismet.user_agent_header,
)
def test_comment_check(self):
"""
The request issued by comment_check() is correct.
"""
self._mock_request(
"comment_check",
akismet.Akismet.COMMENT_CHECK_URL,
"true",
{"comment_author": "<PASSWORD>"},
)
def test_submit_spam(self):
"""
The request issued by submit_spam() is correct.
"""
self._mock_request(
"submit_spam",
akismet.Akismet.SUBMIT_SPAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{"comment_content": "Bad comment", "comment_author": "<PASSWORD>ra-test-<PASSWORD>"},
)
def test_submit_ham(self):
"""
The request issued by submit_ham() is correct.
"""
self._mock_request(
"submit_ham",
akismet.Akismet.SUBMIT_HAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{
"comment_content": "Good comment",
"comment_author": "Legitimate commenter",
},
)
def test_full_kwargs(self):
"""
All optional Akismet arguments are correctly passed through.
"""
modified_timestamp = datetime.datetime.now()
posted_timestamp = modified_timestamp - datetime.timedelta(seconds=30)
full_kwargs = {
"referrer": "http://www.example.com/",
"permalink": "http://www.example.com/#comment123",
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_author_email": "<EMAIL>",
"comment_author_url": "http://www.example.com/",
"comment_content": "This is a fine comment.",
"comment_date_gmt": posted_timestamp.isoformat(),
"comment_post_modified_gmt": modified_timestamp.isoformat(),
"blog_lang": "en_us",
"blog_charset": "utf-8",
"user_role": "administrator",
"recheck_reason": "edit",
}
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", full_kwargs
)
def test_unknown_kwargs(self):
"""
Unknown Akismet arguments are correctly rejected.
"""
bad_kwargs = {"bad_arg": "bad_val"}
with self.assertRaises(akismet.UnknownArgumentError):
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", bad_kwargs
)
| import datetime
import os
import sys
import unittest
from unittest import mock
import akismet
class AkismetTests(unittest.TestCase):
api_key = os.getenv("TEST_AKISMET_API_KEY")
blog_url = os.getenv("TEST_AKISMET_BLOG_URL")
api_key_env_var = "PYTHON_AKISMET_API_KEY"
blog_url_env_var = "PYTHON_AKISMET_BLOG_URL"
def setUp(self):
self.api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
class AkismetConfigurationTests(AkismetTests):
"""
Tests configuration of the Akismet class.
"""
def test_config_from_args(self):
"""
Configuring via explicit arguments succeeds.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
def test_bad_config_args(self):
"""
Configuring with bad arguments fails.
"""
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet(key="invalid", blog_url="http://invalid")
def test_config_from_env(self):
"""
Configuring via environment variables succeeds.
"""
try:
os.environ[self.api_key_env_var] = self.api_key
os.environ[self.blog_url_env_var] = self.blog_url
api = akismet.Akismet(key=None, blog_url=None)
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
api = akismet.Akismet()
self.assertEqual(self.api_key, api.api_key)
self.assertEqual(self.blog_url, api.blog_url)
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_config_env(self):
"""
Configuring with bad environment variables fails.
"""
try:
os.environ[self.api_key_env_var] = "invalid"
os.environ[self.blog_url_env_var] = "http://invalid"
with self.assertRaises(akismet.APIKeyError):
akismet.Akismet()
finally:
os.environ[self.api_key_env_var] = ""
os.environ[self.blog_url_env_var] = ""
def test_bad_url(self):
"""
Configuring with a bad URL fails.
"""
bad_urls = (
"example.com",
"ftp://example.com",
"www.example.com",
"http//example.com",
"https//example.com",
)
for url in bad_urls:
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=self.api_key, blog_url=url)
def test_missing_config(self):
"""
Instantiating without any configuration fails.
"""
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet(key=None, blog_url=None)
with self.assertRaises(akismet.ConfigurationError):
akismet.Akismet()
def test_user_agent(self):
"""
The Akismet class creates the correct user-agent string.
"""
api = akismet.Akismet(key=self.api_key, blog_url=self.blog_url)
expected_agent = "Python/{} | akismet.py/{}".format(
"{}.{}".format(*sys.version_info[:2]), akismet.__version__
)
self.assertEqual(expected_agent, api.user_agent_header["User-Agent"])
class AkismetAPITests(AkismetTests):
"""
Tests implementation of the Akismet API.
"""
base_kwargs = {
"user_ip": "127.0.0.1",
"user_agent": "Mozilla",
# Always send this when testing; Akismet recognizes it as a
# test query and does not train/learn from it.
"is_test": 1,
}
def test_verify_key_valid(self):
"""
The verify_key operation succeeds with a valid key and URL.
"""
self.assertTrue(akismet.Akismet.verify_key(self.api_key, self.blog_url))
def test_verify_key_invalid(self):
"""
The verify_key operation fails with an invalid key and URL.
"""
self.assertFalse(akismet.Akismet.verify_key("invalid", "http://invalid"))
def test_comment_check_spam(self):
"""
The comment_check method correctly identifies spam.
"""
check_kwargs = {
# Akismet guarantees this will be classified spam.
"comment_author": "<PASSWORD>",
**self.base_kwargs,
}
self.assertTrue(self.api.comment_check(**check_kwargs))
def test_comment_check_not_spam(self):
"""
The comment_check method correctly identifies non-spam.
"""
check_kwargs = {
# Akismet guarantees this will not be classified spam.
"user_role": "administrator",
**self.base_kwargs,
}
self.assertFalse(self.api.comment_check(**check_kwargs))
def test_submit_spam(self):
"""
The submit_spam method succeeds.
"""
spam_kwargs = {
"comment_type": "comment",
"comment_author": "<PASSWORD>",
"comment_content": "<PASSWORD>",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_spam(**spam_kwargs))
def test_submit_ham(self):
"""
The submit_ham method succeeds.
"""
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.assertTrue(self.api.submit_ham(**ham_kwargs))
def test_unexpected_verify_key_response(self):
"""
Unexpected verify_key API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
def test_unexpected_comment_check_response(self):
"""
Unexpected comment_check API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
check_kwargs = {"comment_author": "<PASSWORD>", **self.base_kwargs}
self.api.comment_check(**check_kwargs)
def test_unexpected_submit_spam_response(self):
"""
Unexpected submit_spam API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
spam_kwargs = {
"comment_type": "comment",
"comment_author": "<PASSWORD>",
"comment_content": "viagra-test-123",
**self.base_kwargs,
}
self.api.submit_spam(**spam_kwargs)
def test_unexpected_submit_ham_response(self):
"""
Unexpected submit_ham API responses are correctly handled.
"""
post_mock = mock.MagicMock()
with mock.patch("requests.post", post_mock):
with self.assertRaises(akismet.ProtocolError):
ham_kwargs = {
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_content": "This is a legitimate comment.",
"user_role": "administrator",
**self.base_kwargs,
}
self.api.submit_ham(**ham_kwargs)
class AkismetRequestTests(AkismetTests):
"""
Tests the requests constructed by the Akismet class.
"""
def _get_mock(self, text):
"""
Create a mock for requests.post() returning expected text.
"""
post_mock = mock.MagicMock()
post_mock.return_value.text = text
return post_mock
def _mock_request(self, method, endpoint, text, method_kwargs):
"""
Issue a mocked request and verify requests.post() was called
with the correct arguments.
"""
method_kwargs.update(user_ip="127.0.0.1", user_agent="Mozilla", is_test=1)
expected_kwargs = {"blog": self.blog_url, **method_kwargs}
post_mock = self._get_mock(text)
with mock.patch("requests.post", post_mock):
getattr(self.api, method)(**method_kwargs)
post_mock.assert_called_with(
endpoint.format(self.api_key),
data=expected_kwargs,
headers=akismet.Akismet.user_agent_header,
)
def test_verify_key(self):
"""
The request issued by verify_key() is correct.
"""
post_mock = self._get_mock("valid")
with mock.patch("requests.post", post_mock):
akismet.Akismet.verify_key(self.api_key, self.blog_url)
post_mock.assert_called_with(
akismet.Akismet.VERIFY_KEY_URL,
data={"key": self.api_key, "blog": self.blog_url},
headers=akismet.Akismet.user_agent_header,
)
def test_comment_check(self):
"""
The request issued by comment_check() is correct.
"""
self._mock_request(
"comment_check",
akismet.Akismet.COMMENT_CHECK_URL,
"true",
{"comment_author": "<PASSWORD>"},
)
def test_submit_spam(self):
"""
The request issued by submit_spam() is correct.
"""
self._mock_request(
"submit_spam",
akismet.Akismet.SUBMIT_SPAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{"comment_content": "Bad comment", "comment_author": "<PASSWORD>ra-test-<PASSWORD>"},
)
def test_submit_ham(self):
"""
The request issued by submit_ham() is correct.
"""
self._mock_request(
"submit_ham",
akismet.Akismet.SUBMIT_HAM_URL,
akismet.Akismet.SUBMIT_SUCCESS_RESPONSE,
{
"comment_content": "Good comment",
"comment_author": "Legitimate commenter",
},
)
def test_full_kwargs(self):
"""
All optional Akismet arguments are correctly passed through.
"""
modified_timestamp = datetime.datetime.now()
posted_timestamp = modified_timestamp - datetime.timedelta(seconds=30)
full_kwargs = {
"referrer": "http://www.example.com/",
"permalink": "http://www.example.com/#comment123",
"comment_type": "comment",
"comment_author": "Legitimate Author",
"comment_author_email": "<EMAIL>",
"comment_author_url": "http://www.example.com/",
"comment_content": "This is a fine comment.",
"comment_date_gmt": posted_timestamp.isoformat(),
"comment_post_modified_gmt": modified_timestamp.isoformat(),
"blog_lang": "en_us",
"blog_charset": "utf-8",
"user_role": "administrator",
"recheck_reason": "edit",
}
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", full_kwargs
)
def test_unknown_kwargs(self):
"""
Unknown Akismet arguments are correctly rejected.
"""
bad_kwargs = {"bad_arg": "bad_val"}
with self.assertRaises(akismet.UnknownArgumentError):
self._mock_request(
"comment_check", akismet.Akismet.COMMENT_CHECK_URL, "false", bad_kwargs
) | en | 0.822253 | Tests configuration of the Akismet class. Configuring via explicit arguments succeeds. Configuring with bad arguments fails. Configuring via environment variables succeeds. Configuring with bad environment variables fails. Configuring with a bad URL fails. Instantiating without any configuration fails. The Akismet class creates the correct user-agent string. Tests implementation of the Akismet API. # Always send this when testing; Akismet recognizes it as a # test query and does not train/learn from it. The verify_key operation succeeds with a valid key and URL. The verify_key operation fails with an invalid key and URL. The comment_check method correctly identifies spam. # Akismet guarantees this will be classified spam. The comment_check method correctly identifies non-spam. # Akismet guarantees this will not be classified spam. The submit_spam method succeeds. The submit_ham method succeeds. Unexpected verify_key API responses are correctly handled. Unexpected comment_check API responses are correctly handled. Unexpected submit_spam API responses are correctly handled. Unexpected submit_ham API responses are correctly handled. Tests the requests constructed by the Akismet class. Create a mock for requests.post() returning expected text. Issue a mocked request and verify requests.post() was called with the correct arguments. The request issued by verify_key() is correct. The request issued by comment_check() is correct. The request issued by submit_spam() is correct. The request issued by submit_ham() is correct. All optional Akismet arguments are correctly passed through. #comment123", Unknown Akismet arguments are correctly rejected. | 3.066258 | 3 |
experimenting/dataset/datamodule.py | gaurvigoyal/lifting_events_to_3d_hpe | 19 | 9309 | <filename>experimenting/dataset/datamodule.py
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from .core import BaseCore
from .factory import BaseDataFactory
class DataModule(pl.LightningDataModule):
def __init__(
self,
dataset_factory: BaseDataFactory,
core: BaseCore,
aug_train_config,
aug_test_config,
batch_size: int,
num_workers: int,
train_val_split: float = 0.8,
):
super().__init__()
self.core = core
self.batch_size = batch_size
self.num_workers = num_workers
self.dataset_factory = dataset_factory
self.aug_train_config = aug_train_config
self.aug_test_config = aug_test_config
self.train_val_split = train_val_split
def prepare_data(self, *args, **kwargs):
pass
def setup(self, stage=None):
self.dataset_factory.set_dataset_core(self.core)
(
self.train_indexes,
self.val_indexes,
self.test_indexes,
) = self.dataset_factory.get_train_test_split(self.train_val_split)
self.train_dataset = self.dataset_factory.get_dataset(
self.train_indexes, self.aug_train_config
)
self.val_dataset = self.dataset_factory.get_dataset(
self.val_indexes, self.aug_test_config
)
self.test_dataset = self.dataset_factory.get_dataset(
self.test_indexes, self.aug_test_config
)
def train_dataloader(self):
return get_dataloader(self.train_dataset, self.batch_size, self.num_workers)
def val_dataloader(self):
return get_dataloader(
self.val_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return get_dataloader(
self.test_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_frames_only_dataloader(self):
return get_dataloader(
self.dataset_factory.get_frame_only_dataset(
self.test_indexes, self.aug_test_config
),
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def get_dataloader(
dataset: Dataset, batch_size: int, num_workers: int = 12, shuffle=True
) -> DataLoader:
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
return loader
| <filename>experimenting/dataset/datamodule.py
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
from .core import BaseCore
from .factory import BaseDataFactory
class DataModule(pl.LightningDataModule):
def __init__(
self,
dataset_factory: BaseDataFactory,
core: BaseCore,
aug_train_config,
aug_test_config,
batch_size: int,
num_workers: int,
train_val_split: float = 0.8,
):
super().__init__()
self.core = core
self.batch_size = batch_size
self.num_workers = num_workers
self.dataset_factory = dataset_factory
self.aug_train_config = aug_train_config
self.aug_test_config = aug_test_config
self.train_val_split = train_val_split
def prepare_data(self, *args, **kwargs):
pass
def setup(self, stage=None):
self.dataset_factory.set_dataset_core(self.core)
(
self.train_indexes,
self.val_indexes,
self.test_indexes,
) = self.dataset_factory.get_train_test_split(self.train_val_split)
self.train_dataset = self.dataset_factory.get_dataset(
self.train_indexes, self.aug_train_config
)
self.val_dataset = self.dataset_factory.get_dataset(
self.val_indexes, self.aug_test_config
)
self.test_dataset = self.dataset_factory.get_dataset(
self.test_indexes, self.aug_test_config
)
def train_dataloader(self):
return get_dataloader(self.train_dataset, self.batch_size, self.num_workers)
def val_dataloader(self):
return get_dataloader(
self.val_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_dataloader(self):
return get_dataloader(
self.test_dataset,
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def test_frames_only_dataloader(self):
return get_dataloader(
self.dataset_factory.get_frame_only_dataset(
self.test_indexes, self.aug_test_config
),
self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
def get_dataloader(
dataset: Dataset, batch_size: int, num_workers: int = 12, shuffle=True
) -> DataLoader:
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
return loader
| none | 1 | 2.390724 | 2 |
|
sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py | giulianoiorio/PeTar | 0 | 9310 | <filename>sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Load file
dt=pd.read_csv("sevn_output/output_0.csv")
#Give a look to the columns
print(dt.columns)
#Consider only the final states
dt=dt.drop_duplicates(["ID","name"], keep='last')
#Load evolved file
dte=pd.read_csv("sevn_output/evolved_0.dat",sep='\s+')
#Give a look to the columns
print(dte.columns)
dte=dte.rename(columns={'#ID': 'ID','Mass_0':"Mzams_0", 'Mass_1':"Mzams_1"})
#After change
print(dte.columns)
#Join the two dataset
dt = dt.merge(dte, on=["ID","name"], how="inner", suffixes=("","_ini") )
# - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables
# - how: type of join to use, see documentation here and the next slide
# - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes.
#Give a look to the columns
print(dt.columns)
#Create filter indexes
idx0 = (dt.RemnantType_0==6)
idx1 = (dt.RemnantType_1==6)
idxb0 = idx0 & dt.Semimajor.notnull()
idxb1 = idx1 & dt.Semimajor.notnull()
idxm0 = idxb0 & (dt.GWtime + dt.BWorldtime <= 14000)
idxm1 = idxb1 & (dt.GWtime + dt.BWorldtime <= 14000)
#Filter and join masses
AllBH = pd.concat([dt[idx0].Mass_0,dt[idx1].Mass_1])
BoundBH = pd.concat([dt[idxb0].Mass_0,dt[idxb1].Mass_1])
MergingBH = pd.concat([dt[idxm0].Mass_0,dt[idxm1].Mass_1])
#Filter and join initial masses
AllBHzams = pd.concat([dt[idx0].Mzams_0,dt[idx1].Mzams_1])
BoundBHzams = pd.concat([dt[idxb0].Mzams_0,dt[idxb1].Mzams_1])
MergingBHzams = pd.concat([dt[idxm0].Mzams_0,dt[idxm1].Mzams_1])
#Filter and join initial semimajor axis
AllBHa = pd.concat([dt[idx0].a,dt[idx1].a])
BoundBHa = pd.concat([dt[idxb0].a,dt[idxb1].a])
MergingBHa = pd.concat([dt[idxm0].a,dt[idxm1].a])
#Plot
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(AllBHzams,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHzams,BoundBH,zorder=2,edgecolor="k",s=30, label="Bound")
plt.scatter(MergingBHzams,MergingBH,zorder=3,edgecolor="k",s=30, label="Merging")
plt.plot(np.linspace(0,140),np.linspace(0,140),ls="dashed",c="gray")
plt.xscale("log")
plt.yscale("log")
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.xlabel("$M\mathrm{zams}$ [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.legend(fontsize=16)
plt.subplot(1,2,2)
plt.scatter(AllBHa,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHa,BoundBH,zorder=2,edgecolor="k",s=30,label="Bound")
plt.scatter(MergingBHa,MergingBH,zorder=3,edgecolor="k",s=30,label="Merging")
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Semimajor initial [R$_\odot$]",fontsize=18)
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.tight_layout()
plt.savefig("analysis3.png")
plt.show()
| <filename>sevn-interface/SEVN/resources/SEVN_walkthrough/running_folder/analysis_3_pandas.py
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Load file
dt=pd.read_csv("sevn_output/output_0.csv")
#Give a look to the columns
print(dt.columns)
#Consider only the final states
dt=dt.drop_duplicates(["ID","name"], keep='last')
#Load evolved file
dte=pd.read_csv("sevn_output/evolved_0.dat",sep='\s+')
#Give a look to the columns
print(dte.columns)
dte=dte.rename(columns={'#ID': 'ID','Mass_0':"Mzams_0", 'Mass_1':"Mzams_1"})
#After change
print(dte.columns)
#Join the two dataset
dt = dt.merge(dte, on=["ID","name"], how="inner", suffixes=("","_ini") )
# - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables
# - how: type of join to use, see documentation here and the next slide
# - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes.
#Give a look to the columns
print(dt.columns)
#Create filter indexes
idx0 = (dt.RemnantType_0==6)
idx1 = (dt.RemnantType_1==6)
idxb0 = idx0 & dt.Semimajor.notnull()
idxb1 = idx1 & dt.Semimajor.notnull()
idxm0 = idxb0 & (dt.GWtime + dt.BWorldtime <= 14000)
idxm1 = idxb1 & (dt.GWtime + dt.BWorldtime <= 14000)
#Filter and join masses
AllBH = pd.concat([dt[idx0].Mass_0,dt[idx1].Mass_1])
BoundBH = pd.concat([dt[idxb0].Mass_0,dt[idxb1].Mass_1])
MergingBH = pd.concat([dt[idxm0].Mass_0,dt[idxm1].Mass_1])
#Filter and join initial masses
AllBHzams = pd.concat([dt[idx0].Mzams_0,dt[idx1].Mzams_1])
BoundBHzams = pd.concat([dt[idxb0].Mzams_0,dt[idxb1].Mzams_1])
MergingBHzams = pd.concat([dt[idxm0].Mzams_0,dt[idxm1].Mzams_1])
#Filter and join initial semimajor axis
AllBHa = pd.concat([dt[idx0].a,dt[idx1].a])
BoundBHa = pd.concat([dt[idxb0].a,dt[idxb1].a])
MergingBHa = pd.concat([dt[idxm0].a,dt[idxm1].a])
#Plot
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(AllBHzams,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHzams,BoundBH,zorder=2,edgecolor="k",s=30, label="Bound")
plt.scatter(MergingBHzams,MergingBH,zorder=3,edgecolor="k",s=30, label="Merging")
plt.plot(np.linspace(0,140),np.linspace(0,140),ls="dashed",c="gray")
plt.xscale("log")
plt.yscale("log")
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.xlabel("$M\mathrm{zams}$ [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.legend(fontsize=16)
plt.subplot(1,2,2)
plt.scatter(AllBHa,AllBH,zorder=1,edgecolor="k",s=30,label="All")
plt.scatter(BoundBHa,BoundBH,zorder=2,edgecolor="k",s=30,label="Bound")
plt.scatter(MergingBHa,MergingBH,zorder=3,edgecolor="k",s=30,label="Merging")
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Semimajor initial [R$_\odot$]",fontsize=18)
plt.ylabel("BH mass [M$_\odot$]",fontsize=18)
plt.gca().tick_params(axis='both', which='major', labelsize=18)
plt.tight_layout()
plt.savefig("analysis3.png")
plt.show()
| en | 0.812494 | #Load file #Give a look to the columns #Consider only the final states #Load evolved file #Give a look to the columns #After change #Join the two dataset # - on: column(s, can be a list of columns) to match during the merge of the two tables. The colum(s) has(have) to be present in both the tables # - how: type of join to use, see documentation here and the next slide # - suffixes: columns with the same name in the two tables (not used in on) will be renamed adding these suffixes. #Give a look to the columns #Create filter indexes #Filter and join masses #Filter and join initial masses #Filter and join initial semimajor axis #Plot | 3.03845 | 3 |
apps/tg_bot/apps.py | VladimirLazor/Lohika | 0 | 9311 | from django.apps import AppConfig
class TgBotConfig(AppConfig):
name = 'apps.tg_bot'
| from django.apps import AppConfig
class TgBotConfig(AppConfig):
name = 'apps.tg_bot'
| none | 1 | 1.066477 | 1 |
|
office365/sharepoint/portal/group_site_manager.py | rikeshtailor/Office365-REST-Python-Client | 0 | 9312 | <reponame>rikeshtailor/Office365-REST-Python-Client
from office365.runtime.client_object import ClientObject
from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.group_creation_params import GroupCreationInformation
from office365.sharepoint.portal.group_site_info import GroupSiteInfo
class GroupSiteManager(ClientObject):
def __init__(self, context):
super(GroupSiteManager, self).__init__(context, ResourcePath("GroupSiteManager"), None)
def create_group_ex(self, display_name, alias, is_public, optional_params=None):
"""
Create a modern site
:param str display_name:
:param str alias:
:param bool is_public:
:param office365.sharepoint.portal.group_creation_params.GroupCreationParams or None optional_params:
"""
payload = GroupCreationInformation(display_name, alias, is_public, optional_params)
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "CreateGroupEx", None, payload, None, result)
self.context.add_query(qry)
return result
def delete(self, site_url):
"""
Deletes a SharePoint Team site
:type site_url: str
"""
payload = {
"siteUrl": site_url
}
qry = ServiceOperationQuery(self, "Delete", None, payload)
self.context.add_query(qry)
return self
def get_status(self, group_id):
"""Get the status of a SharePoint site
:type group_id: str
"""
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "GetSiteStatus", None, {'groupId': group_id}, None, result)
self.context.add_query(qry)
def _construct_status_request(request):
request.method = HttpMethod.Get
request.url += "?groupId='{0}'".format(group_id)
self.context.before_execute(_construct_status_request)
return result
| from office365.runtime.client_object import ClientObject
from office365.runtime.client_result import ClientResult
from office365.runtime.http.http_method import HttpMethod
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.portal.group_creation_params import GroupCreationInformation
from office365.sharepoint.portal.group_site_info import GroupSiteInfo
class GroupSiteManager(ClientObject):
def __init__(self, context):
super(GroupSiteManager, self).__init__(context, ResourcePath("GroupSiteManager"), None)
def create_group_ex(self, display_name, alias, is_public, optional_params=None):
"""
Create a modern site
:param str display_name:
:param str alias:
:param bool is_public:
:param office365.sharepoint.portal.group_creation_params.GroupCreationParams or None optional_params:
"""
payload = GroupCreationInformation(display_name, alias, is_public, optional_params)
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "CreateGroupEx", None, payload, None, result)
self.context.add_query(qry)
return result
def delete(self, site_url):
"""
Deletes a SharePoint Team site
:type site_url: str
"""
payload = {
"siteUrl": site_url
}
qry = ServiceOperationQuery(self, "Delete", None, payload)
self.context.add_query(qry)
return self
def get_status(self, group_id):
"""Get the status of a SharePoint site
:type group_id: str
"""
result = ClientResult(self.context, GroupSiteInfo())
qry = ServiceOperationQuery(self, "GetSiteStatus", None, {'groupId': group_id}, None, result)
self.context.add_query(qry)
def _construct_status_request(request):
request.method = HttpMethod.Get
request.url += "?groupId='{0}'".format(group_id)
self.context.before_execute(_construct_status_request)
return result | en | 0.425274 | Create a modern site :param str display_name: :param str alias: :param bool is_public: :param office365.sharepoint.portal.group_creation_params.GroupCreationParams or None optional_params: Deletes a SharePoint Team site :type site_url: str Get the status of a SharePoint site :type group_id: str | 2.081246 | 2 |
tests/errors/e_tuple_args_T692.py | smok-serwis/cython | 2 | 9313 | <reponame>smok-serwis/cython
# ticket: 692
# mode: error
def func((a, b)):
return a + b
_ERRORS = u"""
4:9: Missing argument name
5:11: undeclared name not builtin: a
5:15: undeclared name not builtin: b
"""
| # ticket: 692
# mode: error
def func((a, b)):
return a + b
_ERRORS = u"""
4:9: Missing argument name
5:11: undeclared name not builtin: a
5:15: undeclared name not builtin: b
""" | en | 0.656657 | # ticket: 692 # mode: error 4:9: Missing argument name 5:11: undeclared name not builtin: a 5:15: undeclared name not builtin: b | 2.429293 | 2 |
ble.py | Ladvien/esp32_upython_env | 0 | 9314 | import bluetooth
import time
bt = bluetooth.BLE() # singleton
bt.active(True) # activate BT stack
UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E')
UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,)
UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,)
UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),)
SERVICES = (UART_SERVICE,)
( (tx, rx,), ) = bt.gatts_register_services(SERVICES)
bt.gap_advertise(100) | import bluetooth
import time
bt = bluetooth.BLE() # singleton
bt.active(True) # activate BT stack
UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E')
UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,)
UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,)
UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),)
SERVICES = (UART_SERVICE,)
( (tx, rx,), ) = bt.gatts_register_services(SERVICES)
bt.gap_advertise(100) | en | 0.362788 | # singleton # activate BT stack | 2.309556 | 2 |
examples/custom-generator/customer.py | luxbe/sledo | 4 | 9315 | <filename>examples/custom-generator/customer.py
from random import randint
from sledo.generate.field_generators.base import FieldGenerator
values = ("Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
"Czech Republic",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Malta",
"Netherlands",
"Poland",
"Portugal",
"Romania",
"Slovakia",
"Slovenia",
"Spain",
"Sweden",
"United States",
"Japan",
"United Kingdom",
"Bangladesh",
"Argentina",
"China")
count = len(values) - 1
class CustomerAddressGenerator(FieldGenerator):
def generate(self, **_):
return values[randint(0, count)]
| <filename>examples/custom-generator/customer.py
from random import randint
from sledo.generate.field_generators.base import FieldGenerator
values = ("Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
"Czech Republic",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Malta",
"Netherlands",
"Poland",
"Portugal",
"Romania",
"Slovakia",
"Slovenia",
"Spain",
"Sweden",
"United States",
"Japan",
"United Kingdom",
"Bangladesh",
"Argentina",
"China")
count = len(values) - 1
class CustomerAddressGenerator(FieldGenerator):
def generate(self, **_):
return values[randint(0, count)]
| none | 1 | 3.165473 | 3 |
|
status-uncertain/baseline_model.py | crawftv/CRAwTO | 1 | 9316 | <reponame>crawftv/CRAwTO
#!/usr/bin/env python3
from sklearn.metrics import r2_score
import numpy as np
class BaselineModel(object):
def get_params(self):
return None
def predict(self, X):
return np.ones_like(X.index.values) * self._y_pred
def score(self, X, y):
y_true = y
y_pred = np.ones_like(y_true) * self._y_pred
return r2_score(y_true, y_pred)
class BaselineClassificationPrediction(BaselineModel):
def fit(
self, X, y,
):
self.y_pred = y.mode()
return self
def predict(
self, X,
):
return self.y_pred
class BaselineRegressionPrediction(BaselineModel):
def fit(self, X, y):
self._y_pred = y.median()
return self
| #!/usr/bin/env python3
from sklearn.metrics import r2_score
import numpy as np
class BaselineModel(object):
def get_params(self):
return None
def predict(self, X):
return np.ones_like(X.index.values) * self._y_pred
def score(self, X, y):
y_true = y
y_pred = np.ones_like(y_true) * self._y_pred
return r2_score(y_true, y_pred)
class BaselineClassificationPrediction(BaselineModel):
def fit(
self, X, y,
):
self.y_pred = y.mode()
return self
def predict(
self, X,
):
return self.y_pred
class BaselineRegressionPrediction(BaselineModel):
def fit(self, X, y):
self._y_pred = y.median()
return self | fr | 0.221828 | #!/usr/bin/env python3 | 2.856769 | 3 |
aligner/grow_diag_final.py | ecalder6/MT-HW2 | 0 | 9317 | import optparse
import sys
def make_set(data, s, e_vocab, f_vocab, aligned, reverse):
for pair in data.split():
cur = pair.split('-')
if reverse:
e_vocab.add(int(cur[1]))
f_vocab.add(int(cur[0]))
aligned.add(int(cur[0]))
s.add((int(cur[1]), int(cur[0])))
else:
e_vocab.add(int(cur[0]))
f_vocab.add(int(cur[1]))
aligned.add(int(cur[0]))
s.add((int(cur[0]), int(cur[1])))
def grow_diag_final_and(e2f_data, f2e_data):
directions = [(-1,0),(0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)]
for (i, (e2f, f2e)) in enumerate(zip(open(e2f_data), open(f2e_data))):
e2f_set, f2e_set, e_vocab, f_vocab, e_aligned, f_aligned = set(), set(), set(), set(), set(), set()
make_set(e2f, e2f_set, e_vocab, f_vocab, e_aligned, False)
make_set(f2e, f2e_set, e_vocab, f_vocab, f_aligned, True)
alignment = e2f_set & f2e_set
union_alignment = e2f_set | f2e_set
grow_diag(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, directions)
final(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, True)
for e, f in alignment:
sys.stdout.write("%i-%i " % (e,f))
sys.stdout.write("\n")
def grow_diag(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, directions):
prev_len = 0
while prev_len != len(alignment):
prev_len = len(alignment)
for e in e_vocab:
for f in f_vocab:
if (e, f) in alignment:
for d in directions:
en, fn = e + d[0], f + d[1]
if (en not in e_alignment or fn not in f_alignment) and (en, fn) in union_alignment:
alignment.add((en, fn))
e_alignment.add(en)
f_alignment.add(fn)
def final(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, final_and):
for e in e_vocab:
for f in f_vocab:
c = False
if final_and:
c = e not in e_alignment and f not in f_alignment
else:
c = e not in e_alignment or f not in f_alignment
if c and (e, f) in union_alignment:
alignment.add((e, f))
e_alignment.add(e)
f_alignment.add(f)
def main():
optparser = optparse.OptionParser()
optparser.add_option("-d", "--data", dest="train", default="data/alignment", help="Data filename prefix (default=data)")
optparser.add_option("-e", "--e2f", dest="e2f", default="ef", help="Suffix of English to French filename (default=ef)")
optparser.add_option("-f", "--f2e", dest="f2e", default="fe", help="Suffix of French to English filename (default=fe)")
optparser.add_option("-a", "--final_and", dest="final_and", action="store_true", help="Whether to use Final-And version of the algorithm")
(opts, args) = optparser.parse_args()
e2f_data = "%s.%s" % (opts.train, opts.e2f)
f2e_data = "%s.%s" % (opts.train, opts.f2e)
grow_diag_final_and(e2f_data, f2e_data)
if __name__ == "__main__":
main()
| import optparse
import sys
def make_set(data, s, e_vocab, f_vocab, aligned, reverse):
for pair in data.split():
cur = pair.split('-')
if reverse:
e_vocab.add(int(cur[1]))
f_vocab.add(int(cur[0]))
aligned.add(int(cur[0]))
s.add((int(cur[1]), int(cur[0])))
else:
e_vocab.add(int(cur[0]))
f_vocab.add(int(cur[1]))
aligned.add(int(cur[0]))
s.add((int(cur[0]), int(cur[1])))
def grow_diag_final_and(e2f_data, f2e_data):
directions = [(-1,0),(0,-1),(1,0),(0,1),(-1,-1),(-1,1),(1,-1),(1,1)]
for (i, (e2f, f2e)) in enumerate(zip(open(e2f_data), open(f2e_data))):
e2f_set, f2e_set, e_vocab, f_vocab, e_aligned, f_aligned = set(), set(), set(), set(), set(), set()
make_set(e2f, e2f_set, e_vocab, f_vocab, e_aligned, False)
make_set(f2e, f2e_set, e_vocab, f_vocab, f_aligned, True)
alignment = e2f_set & f2e_set
union_alignment = e2f_set | f2e_set
grow_diag(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, directions)
final(e_vocab, f_vocab, e_aligned, f_aligned, alignment, union_alignment, True)
for e, f in alignment:
sys.stdout.write("%i-%i " % (e,f))
sys.stdout.write("\n")
def grow_diag(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, directions):
prev_len = 0
while prev_len != len(alignment):
prev_len = len(alignment)
for e in e_vocab:
for f in f_vocab:
if (e, f) in alignment:
for d in directions:
en, fn = e + d[0], f + d[1]
if (en not in e_alignment or fn not in f_alignment) and (en, fn) in union_alignment:
alignment.add((en, fn))
e_alignment.add(en)
f_alignment.add(fn)
def final(e_vocab, f_vocab, e_alignment, f_alignment, alignment, union_alignment, final_and):
for e in e_vocab:
for f in f_vocab:
c = False
if final_and:
c = e not in e_alignment and f not in f_alignment
else:
c = e not in e_alignment or f not in f_alignment
if c and (e, f) in union_alignment:
alignment.add((e, f))
e_alignment.add(e)
f_alignment.add(f)
def main():
optparser = optparse.OptionParser()
optparser.add_option("-d", "--data", dest="train", default="data/alignment", help="Data filename prefix (default=data)")
optparser.add_option("-e", "--e2f", dest="e2f", default="ef", help="Suffix of English to French filename (default=ef)")
optparser.add_option("-f", "--f2e", dest="f2e", default="fe", help="Suffix of French to English filename (default=fe)")
optparser.add_option("-a", "--final_and", dest="final_and", action="store_true", help="Whether to use Final-And version of the algorithm")
(opts, args) = optparser.parse_args()
e2f_data = "%s.%s" % (opts.train, opts.e2f)
f2e_data = "%s.%s" % (opts.train, opts.f2e)
grow_diag_final_and(e2f_data, f2e_data)
if __name__ == "__main__":
main()
| none | 1 | 2.826631 | 3 |
|
tests/test_tbears_db.py | Transcranial-Solutions/t-bears | 35 | 9318 | # -*- coding: utf-8 -*-
# Copyright 2017-2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from tbears.block_manager.tbears_db import TbearsDB
DIRECTORY_PATH = os.path.abspath((os.path.dirname(__file__)))
DB_PATH = os.path.join(DIRECTORY_PATH, './.tbears_db')
class TestTBearsDB(unittest.TestCase):
def setUp(self):
self.TBEARS_DB = TbearsDB(TbearsDB.make_db(DB_PATH))
self.test_key = b'test_key'
self.test_value = b'test_value'
def tearDown(self):
self.TBEARS_DB.close()
shutil.rmtree(DB_PATH)
def test_put_and_get(self):
# Put and get
self.TBEARS_DB.put(self.test_key, self.test_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, self.test_value)
# overwrite
overwrite_value = b'test_value_overwrite'
self.TBEARS_DB.put(self.test_key, overwrite_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, overwrite_value)
# get invalid key
ret = self.TBEARS_DB.get(b'invalid_key')
self.assertIsNone(ret)
# put invalid type
self.assertRaises(TypeError, self.TBEARS_DB.put, 'test_key', self.test_value)
self.assertRaises(TypeError, self.TBEARS_DB.put, self.test_key, 123)
def test_delete(self):
self.TBEARS_DB.put(self.test_key, self.test_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, self.test_value)
self.TBEARS_DB.delete(self.test_key)
ret = self.TBEARS_DB.get(self.test_key)
self.assertIsNone(ret)
def test_iterator(self):
self.TBEARS_DB.put(b'key1', b'value1')
self.TBEARS_DB.put(b'key2', b'value2')
self.TBEARS_DB.put(b'key3', b'value3')
self.TBEARS_DB.put(b'key4', b'value4')
i = 1
for _, actual_value in self.TBEARS_DB.iterator():
expected_value = ('value' + str(i)).encode()
self.assertEqual(expected_value, actual_value)
i += 1
| # -*- coding: utf-8 -*-
# Copyright 2017-2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import unittest
from tbears.block_manager.tbears_db import TbearsDB
DIRECTORY_PATH = os.path.abspath((os.path.dirname(__file__)))
DB_PATH = os.path.join(DIRECTORY_PATH, './.tbears_db')
class TestTBearsDB(unittest.TestCase):
def setUp(self):
self.TBEARS_DB = TbearsDB(TbearsDB.make_db(DB_PATH))
self.test_key = b'test_key'
self.test_value = b'test_value'
def tearDown(self):
self.TBEARS_DB.close()
shutil.rmtree(DB_PATH)
def test_put_and_get(self):
# Put and get
self.TBEARS_DB.put(self.test_key, self.test_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, self.test_value)
# overwrite
overwrite_value = b'test_value_overwrite'
self.TBEARS_DB.put(self.test_key, overwrite_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, overwrite_value)
# get invalid key
ret = self.TBEARS_DB.get(b'invalid_key')
self.assertIsNone(ret)
# put invalid type
self.assertRaises(TypeError, self.TBEARS_DB.put, 'test_key', self.test_value)
self.assertRaises(TypeError, self.TBEARS_DB.put, self.test_key, 123)
def test_delete(self):
self.TBEARS_DB.put(self.test_key, self.test_value)
ret = self.TBEARS_DB.get(self.test_key)
self.assertEqual(ret, self.test_value)
self.TBEARS_DB.delete(self.test_key)
ret = self.TBEARS_DB.get(self.test_key)
self.assertIsNone(ret)
def test_iterator(self):
self.TBEARS_DB.put(b'key1', b'value1')
self.TBEARS_DB.put(b'key2', b'value2')
self.TBEARS_DB.put(b'key3', b'value3')
self.TBEARS_DB.put(b'key4', b'value4')
i = 1
for _, actual_value in self.TBEARS_DB.iterator():
expected_value = ('value' + str(i)).encode()
self.assertEqual(expected_value, actual_value)
i += 1
| en | 0.826489 | # -*- coding: utf-8 -*- # Copyright 2017-2018 ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Put and get # overwrite # get invalid key # put invalid type | 2.150286 | 2 |
src/exabgp/bgp/message/update/attribute/bgpls/link/mplsmask.py | pierky/exabgp | 1,560 | 9319 | # encoding: utf-8
"""
mplsmask.py
Created by <NAME> on 2016-12-01.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState
from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |L|R| Reserved |
# +-+-+-+-+-+-+-+-+
# https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask
#
# +------------+------------------------------------------+-----------+
# | Bit | Description | Reference |
# +------------+------------------------------------------+-----------+
# | 'L' | Label Distribution Protocol (LDP) | [RFC5036] |
# | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] |
# | | (RSVP-TE) | |
# | 'Reserved' | Reserved for future use | |
# +------------+------------------------------------------+-----------+
# RFC 7752 3.3.2.2. MPLS Protocol Mask TLV
@LinkState.register()
class MplsMask(FlagLS):
REPR = 'MPLS Protocol mask'
JSON = 'mpls-mask'
TLV = 1094
FLAGS = ['LDP', 'RSVP-TE', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV']
LEN = 1
| # encoding: utf-8
"""
mplsmask.py
Created by <NAME> on 2016-12-01.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.attribute.bgpls.linkstate import LinkState
from exabgp.bgp.message.update.attribute.bgpls.linkstate import FlagLS
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |L|R| Reserved |
# +-+-+-+-+-+-+-+-+
# https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask
#
# +------------+------------------------------------------+-----------+
# | Bit | Description | Reference |
# +------------+------------------------------------------+-----------+
# | 'L' | Label Distribution Protocol (LDP) | [RFC5036] |
# | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] |
# | | (RSVP-TE) | |
# | 'Reserved' | Reserved for future use | |
# +------------+------------------------------------------+-----------+
# RFC 7752 3.3.2.2. MPLS Protocol Mask TLV
@LinkState.register()
class MplsMask(FlagLS):
REPR = 'MPLS Protocol mask'
JSON = 'mpls-mask'
TLV = 1094
FLAGS = ['LDP', 'RSVP-TE', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV', 'RSV']
LEN = 1
| en | 0.284276 | # encoding: utf-8 mplsmask.py Created by <NAME> on 2016-12-01. Copyright (c) 2014-2017 Exa Networks. All rights reserved. # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Type | Length | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # |L|R| Reserved | # +-+-+-+-+-+-+-+-+ # https://tools.ietf.org/html/rfc7752#section-3.3.2.2 MPLS Protocol Mask # # +------------+------------------------------------------+-----------+ # | Bit | Description | Reference | # +------------+------------------------------------------+-----------+ # | 'L' | Label Distribution Protocol (LDP) | [RFC5036] | # | 'R' | Extension to RSVP for LSP Tunnels | [RFC3209] | # | | (RSVP-TE) | | # | 'Reserved' | Reserved for future use | | # +------------+------------------------------------------+-----------+ # RFC 7752 3.3.2.2. MPLS Protocol Mask TLV | 1.255285 | 1 |
tests/test_cecum.py | hsorby/scaffoldmaker | 0 | 9320 | import unittest
from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.context import Context
from opencmiss.zinc.element import Element
from opencmiss.zinc.field import Field
from opencmiss.zinc.result import RESULT_OK
from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1
from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace
from testutils import assertAlmostEqualList
class CecumScaffoldTestCase(unittest.TestCase):
def test_cecum1(self):
"""
Test creation of cecum scaffold.
"""
parameterSetNames = MeshType_3d_cecum1.getParameterSetNames()
self.assertEqual(parameterSetNames, ["Default", "Pig 1"])
options = MeshType_3d_cecum1.getDefaultOptions("Pig 1")
self.assertEqual(30, len(options))
self.assertEqual(5, options.get("Number of segments"))
self.assertEqual(2, options.get("Number of elements around tenia coli"))
self.assertEqual(8, options.get("Number of elements along segment"))
self.assertEqual(1, options.get("Number of elements through wall"))
self.assertEqual(35.0, options.get("Start inner radius"))
self.assertEqual(3.0, options.get("Start inner radius derivative"))
self.assertEqual(38.0, options.get("End inner radius"))
self.assertEqual(3.0, options.get("End inner radius derivative"))
self.assertEqual(0.5, options.get("Corner inner radius factor"))
self.assertEqual(0.25, options.get("Haustrum inner radius factor"))
self.assertEqual(4.0, options.get("Segment length mid derivative factor"))
self.assertEqual(3, options.get("Number of tenia coli"))
self.assertEqual(5.0, options.get("Start tenia coli width"))
self.assertEqual(0.0, options.get("End tenia coli width derivative"))
self.assertEqual(2.0, options.get("Wall thickness"))
ostiumOptions = options['Ileocecal junction']
ostiumSettings = ostiumOptions.getScaffoldSettings()
self.assertEqual(1, ostiumSettings.get("Number of vessels"))
self.assertEqual(8, ostiumSettings.get("Number of elements around ostium"))
self.assertEqual(1, ostiumSettings.get("Number of elements through wall"))
self.assertEqual(20.0, ostiumSettings.get("Ostium diameter"))
self.assertEqual(10.0, ostiumSettings.get("Vessel inner diameter"))
self.assertEqual(60, options.get("Ileocecal junction angular position degrees"))
self.assertEqual(0.5, options.get("Ileocecal junction position along factor"))
context = Context("Test")
region = context.getDefaultRegion()
self.assertTrue(region.isValid())
annotationGroups = MeshType_3d_cecum1.generateBaseMesh(region, options)
self.assertEqual(2, len(annotationGroups))
fieldmodule = region.getFieldmodule()
self.assertEqual(RESULT_OK, fieldmodule.defineAllFaces())
mesh3d = fieldmodule.findMeshByDimension(3)
self.assertEqual(1492, mesh3d.getSize())
mesh2d = fieldmodule.findMeshByDimension(2)
self.assertEqual(5617, mesh2d.getSize())
mesh1d = fieldmodule.findMeshByDimension(1)
self.assertEqual(6767, mesh1d.getSize())
nodes = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
self.assertEqual(2642, nodes.getSize())
datapoints = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_DATAPOINTS)
self.assertEqual(0, datapoints.getSize())
coordinates = fieldmodule.findFieldByName("coordinates").castFiniteElement()
self.assertTrue(coordinates.isValid())
minimums, maximums = evaluateFieldNodesetRange(coordinates, nodes)
assertAlmostEqualList(self, minimums, [-49.01658984455258, -46.89686037622053, -2.343256155753525], 1.0E-6)
assertAlmostEqualList(self, maximums, [42.18085849205387, 54.89264119402881, 180.0], 1.0E-6)
with ChangeManager(fieldmodule):
one = fieldmodule.createFieldConstant(1.0)
faceMeshGroup = createFaceMeshGroupExteriorOnFace(fieldmodule, Element.FACE_TYPE_XI3_1)
surfaceAreaField = fieldmodule.createFieldMeshIntegral(one, coordinates, faceMeshGroup)
surfaceAreaField.setNumbersOfPoints(4)
volumeField = fieldmodule.createFieldMeshIntegral(one, coordinates, mesh3d)
volumeField.setNumbersOfPoints(3)
fieldcache = fieldmodule.createFieldcache()
result, surfaceArea = surfaceAreaField.evaluateReal(fieldcache, 1)
self.assertEqual(result, RESULT_OK)
self.assertAlmostEqual(surfaceArea, 65960.20655074248, delta=1.0E-6)
result, volume = volumeField.evaluateReal(fieldcache, 1)
self.assertEqual(result, RESULT_OK)
self.assertAlmostEqual(volume, 127905.28250502056, delta=1.0E-6)
if __name__ == "__main__":
unittest.main()
| import unittest
from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.context import Context
from opencmiss.zinc.element import Element
from opencmiss.zinc.field import Field
from opencmiss.zinc.result import RESULT_OK
from scaffoldmaker.meshtypes.meshtype_3d_cecum1 import MeshType_3d_cecum1
from scaffoldmaker.utils.zinc_utils import createFaceMeshGroupExteriorOnFace
from testutils import assertAlmostEqualList
class CecumScaffoldTestCase(unittest.TestCase):
def test_cecum1(self):
"""
Test creation of cecum scaffold.
"""
parameterSetNames = MeshType_3d_cecum1.getParameterSetNames()
self.assertEqual(parameterSetNames, ["Default", "Pig 1"])
options = MeshType_3d_cecum1.getDefaultOptions("Pig 1")
self.assertEqual(30, len(options))
self.assertEqual(5, options.get("Number of segments"))
self.assertEqual(2, options.get("Number of elements around tenia coli"))
self.assertEqual(8, options.get("Number of elements along segment"))
self.assertEqual(1, options.get("Number of elements through wall"))
self.assertEqual(35.0, options.get("Start inner radius"))
self.assertEqual(3.0, options.get("Start inner radius derivative"))
self.assertEqual(38.0, options.get("End inner radius"))
self.assertEqual(3.0, options.get("End inner radius derivative"))
self.assertEqual(0.5, options.get("Corner inner radius factor"))
self.assertEqual(0.25, options.get("Haustrum inner radius factor"))
self.assertEqual(4.0, options.get("Segment length mid derivative factor"))
self.assertEqual(3, options.get("Number of tenia coli"))
self.assertEqual(5.0, options.get("Start tenia coli width"))
self.assertEqual(0.0, options.get("End tenia coli width derivative"))
self.assertEqual(2.0, options.get("Wall thickness"))
ostiumOptions = options['Ileocecal junction']
ostiumSettings = ostiumOptions.getScaffoldSettings()
self.assertEqual(1, ostiumSettings.get("Number of vessels"))
self.assertEqual(8, ostiumSettings.get("Number of elements around ostium"))
self.assertEqual(1, ostiumSettings.get("Number of elements through wall"))
self.assertEqual(20.0, ostiumSettings.get("Ostium diameter"))
self.assertEqual(10.0, ostiumSettings.get("Vessel inner diameter"))
self.assertEqual(60, options.get("Ileocecal junction angular position degrees"))
self.assertEqual(0.5, options.get("Ileocecal junction position along factor"))
context = Context("Test")
region = context.getDefaultRegion()
self.assertTrue(region.isValid())
annotationGroups = MeshType_3d_cecum1.generateBaseMesh(region, options)
self.assertEqual(2, len(annotationGroups))
fieldmodule = region.getFieldmodule()
self.assertEqual(RESULT_OK, fieldmodule.defineAllFaces())
mesh3d = fieldmodule.findMeshByDimension(3)
self.assertEqual(1492, mesh3d.getSize())
mesh2d = fieldmodule.findMeshByDimension(2)
self.assertEqual(5617, mesh2d.getSize())
mesh1d = fieldmodule.findMeshByDimension(1)
self.assertEqual(6767, mesh1d.getSize())
nodes = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
self.assertEqual(2642, nodes.getSize())
datapoints = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_DATAPOINTS)
self.assertEqual(0, datapoints.getSize())
coordinates = fieldmodule.findFieldByName("coordinates").castFiniteElement()
self.assertTrue(coordinates.isValid())
minimums, maximums = evaluateFieldNodesetRange(coordinates, nodes)
assertAlmostEqualList(self, minimums, [-49.01658984455258, -46.89686037622053, -2.343256155753525], 1.0E-6)
assertAlmostEqualList(self, maximums, [42.18085849205387, 54.89264119402881, 180.0], 1.0E-6)
with ChangeManager(fieldmodule):
one = fieldmodule.createFieldConstant(1.0)
faceMeshGroup = createFaceMeshGroupExteriorOnFace(fieldmodule, Element.FACE_TYPE_XI3_1)
surfaceAreaField = fieldmodule.createFieldMeshIntegral(one, coordinates, faceMeshGroup)
surfaceAreaField.setNumbersOfPoints(4)
volumeField = fieldmodule.createFieldMeshIntegral(one, coordinates, mesh3d)
volumeField.setNumbersOfPoints(3)
fieldcache = fieldmodule.createFieldcache()
result, surfaceArea = surfaceAreaField.evaluateReal(fieldcache, 1)
self.assertEqual(result, RESULT_OK)
self.assertAlmostEqual(surfaceArea, 65960.20655074248, delta=1.0E-6)
result, volume = volumeField.evaluateReal(fieldcache, 1)
self.assertEqual(result, RESULT_OK)
self.assertAlmostEqual(volume, 127905.28250502056, delta=1.0E-6)
if __name__ == "__main__":
unittest.main()
| en | 0.789202 | Test creation of cecum scaffold. | 2.120829 | 2 |
samples/destroy_vm.py | jm66/pyvmomi-community-samples | 4 | 9321 | #!/usr/bin/env python
# Copyright 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
from pyVim import connect
from pyVmomi import vim
from tools import cli
from tools import tasks
def setup_args():
"""Adds additional ARGS to allow the vm name or uuid to
be set.
"""
parser = cli.build_arg_parser()
# using j here because -u is used for user
parser.add_argument('-j', '--uuid',
help='BIOS UUID of the VirtualMachine you want '
'to destroy.')
parser.add_argument('-n', '--name',
help='DNS Name of the VirtualMachine you want to '
'destroy.')
parser.add_argument('-i', '--ip',
help='IP Address of the VirtualMachine you want to '
'destroy')
parser.add_argument('-v', '--vm',
help='VM name of the VirtualMachine you want '
'to destroy.')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vimtype, name):
"""Create contrainer view and search for object in it"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
ARGS = setup_args()
SI = None
try:
SI = connect.SmartConnectNoSSL(host=ARGS.host,
user=ARGS.user,
pwd=<PASSWORD>,
port=ARGS.port)
atexit.register(connect.Disconnect, SI)
except (IOError, vim.fault.InvalidLogin):
pass
if not SI:
raise SystemExit("Unable to connect to host with supplied credentials.")
VM = None
if ARGS.vm:
VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm)
elif ARGS.uuid:
VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid,
True,
False)
elif ARGS.name:
VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name,
True)
elif ARGS.ip:
VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True)
if VM is None:
raise SystemExit(
"Unable to locate VirtualMachine. Arguments given: "
"vm - {0} , uuid - {1} , name - {2} , ip - {3}"
.format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip)
)
print("Found: {0}".format(VM.name))
print("The current powerState is: {0}".format(VM.runtime.powerState))
if format(VM.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(VM.name))
TASK = VM.PowerOffVM_Task()
tasks.wait_for_tasks(SI, [TASK])
print("{0}".format(TASK.info.state))
print("Destroying VM from vSphere.")
TASK = VM.Destroy_Task()
tasks.wait_for_tasks(SI, [TASK])
print("Done.")
| #!/usr/bin/env python
# Copyright 2015 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
from pyVim import connect
from pyVmomi import vim
from tools import cli
from tools import tasks
def setup_args():
"""Adds additional ARGS to allow the vm name or uuid to
be set.
"""
parser = cli.build_arg_parser()
# using j here because -u is used for user
parser.add_argument('-j', '--uuid',
help='BIOS UUID of the VirtualMachine you want '
'to destroy.')
parser.add_argument('-n', '--name',
help='DNS Name of the VirtualMachine you want to '
'destroy.')
parser.add_argument('-i', '--ip',
help='IP Address of the VirtualMachine you want to '
'destroy')
parser.add_argument('-v', '--vm',
help='VM name of the VirtualMachine you want '
'to destroy.')
my_args = parser.parse_args()
return cli.prompt_for_password(my_args)
def get_obj(content, vimtype, name):
"""Create contrainer view and search for object in it"""
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if name:
if c.name == name:
obj = c
break
else:
obj = c
break
container.Destroy()
return obj
ARGS = setup_args()
SI = None
try:
SI = connect.SmartConnectNoSSL(host=ARGS.host,
user=ARGS.user,
pwd=<PASSWORD>,
port=ARGS.port)
atexit.register(connect.Disconnect, SI)
except (IOError, vim.fault.InvalidLogin):
pass
if not SI:
raise SystemExit("Unable to connect to host with supplied credentials.")
VM = None
if ARGS.vm:
VM = get_obj(SI.content, [vim.VirtualMachine], ARGS.vm)
elif ARGS.uuid:
VM = SI.content.searchIndex.FindByUuid(None, ARGS.uuid,
True,
False)
elif ARGS.name:
VM = SI.content.searchIndex.FindByDnsName(None, ARGS.name,
True)
elif ARGS.ip:
VM = SI.content.searchIndex.FindByIp(None, ARGS.ip, True)
if VM is None:
raise SystemExit(
"Unable to locate VirtualMachine. Arguments given: "
"vm - {0} , uuid - {1} , name - {2} , ip - {3}"
.format(ARGS.vm, ARGS.uuid, ARGS.name, ARGS.ip)
)
print("Found: {0}".format(VM.name))
print("The current powerState is: {0}".format(VM.runtime.powerState))
if format(VM.runtime.powerState) == "poweredOn":
print("Attempting to power off {0}".format(VM.name))
TASK = VM.PowerOffVM_Task()
tasks.wait_for_tasks(SI, [TASK])
print("{0}".format(TASK.info.state))
print("Destroying VM from vSphere.")
TASK = VM.Destroy_Task()
tasks.wait_for_tasks(SI, [TASK])
print("Done.")
| en | 0.830446 | #!/usr/bin/env python # Copyright 2015 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Adds additional ARGS to allow the vm name or uuid to be set. # using j here because -u is used for user Create contrainer view and search for object in it | 2.513161 | 3 |
helpers/Screen.py | 1000monkeys/MastermindRedux | 0 | 9322 | import sys
class Screen:
def __init__(self) -> None:
pass
def handle_events(self, events):
for event in events:
if event.type == self.pygame.QUIT:
sys.exit()
def draw(self, screen):
pass | import sys
class Screen:
def __init__(self) -> None:
pass
def handle_events(self, events):
for event in events:
if event.type == self.pygame.QUIT:
sys.exit()
def draw(self, screen):
pass | none | 1 | 2.622758 | 3 |
|
VirtualStage/BackgroundMatting/fixed_threshold.py | chris-han/ailab | 0 | 9323 | <filename>VirtualStage/BackgroundMatting/fixed_threshold.py
import os
def fixed_split(videos, thresholds, mask_suffix, overlap=0, background_path="/"):
# crop target background video frames
backgrounds = [os.path.join(background_path, f[:-4]) for f in os.listdir(background_path) if f.endswith(".mp4")]
print(f"Splitting {len(backgrounds)} target background videos vertically by a fixed threshold")
for i, background in enumerate(backgrounds):
if i >= (len(thresholds)) or not thresholds[i]:
continue
try:
os.makedirs(background + "_up")
os.makedirs(background + "_dw")
except FileExistsError:
continue
threshold = int(thresholds[i])
iup_region = f"iw:{threshold + overlap}:0:0"
idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}"
cmd=(
f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(background+'_up', '%04d_img.png')}\""
" > split_background_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(background+'_dw', '%04d_img.png')}\""
" > split_background_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Splitting {len(videos)} videos vertically by a fixed threshold")
for i, video in enumerate(videos):
if i >= (len(thresholds)) or not thresholds[i]:
continue
try:
os.makedirs(video + "_up")
os.makedirs(video + "_dw")
except FileExistsError:
continue
threshold = int(thresholds[i])
iup_region = f"iw:{threshold + overlap}:0:0"
idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}"
# crop target background single image
cmd = (
f"ffmpeg -y -i \"{video+'.png'}\" "
f'-filter:v \"crop={iup_region}\" '
f"\"{video+'_up.png'}\""
" > split_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -y -i \"{video+'.png'}\" "
f'-filter:v "crop={idw_region}" '
f"\"{video+'_dw.png'}\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
# crop color images
cmd=(
f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(video+'_up', '%04d_img.png')}\""
" > split_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(video+'_dw', '%04d_img.png')}\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
# crop mask images
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(video+'_up', '%04d')}{mask_suffix}.png\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(video+'_dw', '%04d')}{mask_suffix}.png\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f" Splitted {video} ({i+1}/{len(videos)})")
def fixed_merge(videos, factors, output_dir, suffix, outputs_list, overlap=0):
print(f"Reconstructing {len(videos)} output images")
for i, video in enumerate(videos):
if i < (len(factors)) and factors[i]:
# video split, merging
out_path = os.path.join(output_dir, os.path.basename(video)).replace(
"\\", "/"
)
try:
os.makedirs(out_path + suffix)
except FileExistsError:
continue
outpup = (out_path + "_up" + suffix).replace("\\", "/")
outpdw = (out_path + "_dw" + suffix).replace("\\", "/")
for o in outputs_list:
code = os.system(
f"ffmpeg -i \"{outpup}/%04d_{o}.png\" -i \"{outpdw}/%04d_{o}.png\" "
f'-filter_complex "[0:0]crop=iw:ih-{overlap}:0:0[v0];'
f"[1:0]crop=iw:ih-{overlap}:0:{overlap}[v1];"
f'[v0][v1]vstack" '
f"\"{out_path + suffix}/%04d_{o}.png\" -hide_banner"
" > merge_logs.txt"
)
if code != 0:
exit(code)
print(f" Merged {video} ({i+1}/{len(videos)})")
| <filename>VirtualStage/BackgroundMatting/fixed_threshold.py
import os
def fixed_split(videos, thresholds, mask_suffix, overlap=0, background_path="/"):
# crop target background video frames
backgrounds = [os.path.join(background_path, f[:-4]) for f in os.listdir(background_path) if f.endswith(".mp4")]
print(f"Splitting {len(backgrounds)} target background videos vertically by a fixed threshold")
for i, background in enumerate(backgrounds):
if i >= (len(thresholds)) or not thresholds[i]:
continue
try:
os.makedirs(background + "_up")
os.makedirs(background + "_dw")
except FileExistsError:
continue
threshold = int(thresholds[i])
iup_region = f"iw:{threshold + overlap}:0:0"
idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}"
cmd=(
f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(background+'_up', '%04d_img.png')}\""
" > split_background_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(background, '%04d_img.png')}\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(background+'_dw', '%04d_img.png')}\""
" > split_background_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f"Splitting {len(videos)} videos vertically by a fixed threshold")
for i, video in enumerate(videos):
if i >= (len(thresholds)) or not thresholds[i]:
continue
try:
os.makedirs(video + "_up")
os.makedirs(video + "_dw")
except FileExistsError:
continue
threshold = int(thresholds[i])
iup_region = f"iw:{threshold + overlap}:0:0"
idw_region = f"iw:ih-{threshold + overlap}:0:{threshold - overlap}"
# crop target background single image
cmd = (
f"ffmpeg -y -i \"{video+'.png'}\" "
f'-filter:v \"crop={iup_region}\" '
f"\"{video+'_up.png'}\""
" > split_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -y -i \"{video+'.png'}\" "
f'-filter:v "crop={idw_region}" '
f"\"{video+'_dw.png'}\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
# crop color images
cmd=(
f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(video+'_up', '%04d_img.png')}\""
" > split_logs.txt 2>&1"
)
code = os.system(
cmd
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d_img.png')}\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(video+'_dw', '%04d_img.png')}\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
# crop mask images
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" "
f'-filter:v "crop={iup_region}" '
f"\"{os.path.join(video+'_up', '%04d')}{mask_suffix}.png\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
code = os.system(
f"ffmpeg -i \"{os.path.join(video, '%04d')}{mask_suffix}.png\" "
f'-filter:v "crop={idw_region}" '
f"\"{os.path.join(video+'_dw', '%04d')}{mask_suffix}.png\""
" > split_logs.txt 2>&1"
)
if code != 0:
exit(code)
print(f" Splitted {video} ({i+1}/{len(videos)})")
def fixed_merge(videos, factors, output_dir, suffix, outputs_list, overlap=0):
print(f"Reconstructing {len(videos)} output images")
for i, video in enumerate(videos):
if i < (len(factors)) and factors[i]:
# video split, merging
out_path = os.path.join(output_dir, os.path.basename(video)).replace(
"\\", "/"
)
try:
os.makedirs(out_path + suffix)
except FileExistsError:
continue
outpup = (out_path + "_up" + suffix).replace("\\", "/")
outpdw = (out_path + "_dw" + suffix).replace("\\", "/")
for o in outputs_list:
code = os.system(
f"ffmpeg -i \"{outpup}/%04d_{o}.png\" -i \"{outpdw}/%04d_{o}.png\" "
f'-filter_complex "[0:0]crop=iw:ih-{overlap}:0:0[v0];'
f"[1:0]crop=iw:ih-{overlap}:0:{overlap}[v1];"
f'[v0][v1]vstack" '
f"\"{out_path + suffix}/%04d_{o}.png\" -hide_banner"
" > merge_logs.txt"
)
if code != 0:
exit(code)
print(f" Merged {video} ({i+1}/{len(videos)})")
| en | 0.538083 | # crop target background video frames # crop target background single image # crop color images # crop mask images # video split, merging | 2.649339 | 3 |
hn2016_falwa/utilities.py | veredsil/hn2016_falwa | 0 | 9324 | import numpy as np
from math import pi,exp
def static_stability(height,area,theta,s_et=None,n_et=None):
"""
The function "static_stability" computes the vertical gradient (z-derivative)
of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def-
inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing.
At the boundary, the static stability is estimated by forward/backward differen-
cing involving two adjacent z-grid points:
i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
height : sequence or array_like
Array of z-coordinate [in meters] with dimension = (kmax), equally spaced
area : ndarray
Two-dimension numpy array specifying differential areal element of each grid point;
dimension = (nlat, nlon).
theta : ndarray
Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat)
s_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
n_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
Returns
-------
t0_n : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_s : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_n : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_s : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
"""
nlat = theta.shape[1]
if s_et==None:
s_et = nlat//2
if n_et==None:
n_et = nlat//2
stat_n = np.zeros(theta.shape[0])
stat_s = np.zeros(theta.shape[0])
if theta.ndim==3:
zonal_mean = np.mean(theta,axis=-1)
elif theta.ndim==2:
zonal_mean = theta
if area.ndim==2:
area_zonal_mean = np.mean(area,axis=-1)
elif area.ndim==1:
area_zonal_mean = area
csm_n_et = np.sum(area_zonal_mean[-n_et:])
csm_s_et = np.sum(area_zonal_mean[:s_et])
t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et
t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et
stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2])
stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2])
stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0])
stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1])
return t0_n,t0_s,stat_n,stat_s
def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp,
t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.):
"""
The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential
vorticity based on the absolute vorticity, potential temperature and static
stability given.
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
omega : float, optional
Rotation rate of the planet.
nlat : int
Latitudinal dimension of the latitude grid.
nlon : int
Longitudinal dimension of the longitude grid.
kmax : int
Vertical dimension of the height grid.
unih : sequence or array_like
Numpy array of height in [meters]; dimension = (kmax)
ylat : sequence or array_like
Numpy array of latitudes in [degrees]; dimension = (nlat)
avort : ndarray
Three-dimension numpy array of absolute vorticity (i.e. relative vorticity
+ 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon)
potential_temp : ndarray
Three-dimension numpy array of potential temperature in [K];
dimension = (kmax x nlat x nlon)
t0_cn : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_cs : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_cn : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_cs : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
scale_height : float
Scale height of the atmosphere in [m] with default value 7000.
Returns
-------
QGPV : ndarray
Three-dimension numpy array of quasi-geostrophic potential vorticity;
dimension = (kmax x nlat x nlon)
dzdiv : ndarray
Three-dimension numpy array of the stretching term in QGPV;
dimension = (kmax x nlat x nlon)
"""
if nlat_s==None:
nlat_s=nlat//2
clat = np.cos(ylat*pi/180.)
clat = np.abs(clat) # Just to avoid the negative value at poles
# --- Next, calculate PV ---
av2 = np.empty_like(potential_temp) # dv/d(lon)
av3 = np.empty_like(potential_temp) # du/d(lat)
qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv
av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.)
# Calculate the z-divergence term
zdiv = np.empty_like(potential_temp)
dzdiv = np.empty_like(potential_temp)
for kk in range(kmax): # This is more efficient
zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk]
zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk]
dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \
(zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \
/(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis])
dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \
(unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis])
dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \
(unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis])
qgpv = avort+dzdiv * av1
return qgpv, dzdiv
| import numpy as np
from math import pi,exp
def static_stability(height,area,theta,s_et=None,n_et=None):
"""
The function "static_stability" computes the vertical gradient (z-derivative)
of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def-
inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing.
At the boundary, the static stability is estimated by forward/backward differen-
cing involving two adjacent z-grid points:
i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
height : sequence or array_like
Array of z-coordinate [in meters] with dimension = (kmax), equally spaced
area : ndarray
Two-dimension numpy array specifying differential areal element of each grid point;
dimension = (nlat, nlon).
theta : ndarray
Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat)
s_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
n_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
Returns
-------
t0_n : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_s : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_n : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_s : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
"""
nlat = theta.shape[1]
if s_et==None:
s_et = nlat//2
if n_et==None:
n_et = nlat//2
stat_n = np.zeros(theta.shape[0])
stat_s = np.zeros(theta.shape[0])
if theta.ndim==3:
zonal_mean = np.mean(theta,axis=-1)
elif theta.ndim==2:
zonal_mean = theta
if area.ndim==2:
area_zonal_mean = np.mean(area,axis=-1)
elif area.ndim==1:
area_zonal_mean = area
csm_n_et = np.sum(area_zonal_mean[-n_et:])
csm_s_et = np.sum(area_zonal_mean[:s_et])
t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et
t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et
stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2])
stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2])
stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0])
stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1])
return t0_n,t0_s,stat_n,stat_s
def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp,
t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.):
"""
The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential
vorticity based on the absolute vorticity, potential temperature and static
stability given.
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
omega : float, optional
Rotation rate of the planet.
nlat : int
Latitudinal dimension of the latitude grid.
nlon : int
Longitudinal dimension of the longitude grid.
kmax : int
Vertical dimension of the height grid.
unih : sequence or array_like
Numpy array of height in [meters]; dimension = (kmax)
ylat : sequence or array_like
Numpy array of latitudes in [degrees]; dimension = (nlat)
avort : ndarray
Three-dimension numpy array of absolute vorticity (i.e. relative vorticity
+ 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon)
potential_temp : ndarray
Three-dimension numpy array of potential temperature in [K];
dimension = (kmax x nlat x nlon)
t0_cn : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_cs : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_cn : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_cs : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
scale_height : float
Scale height of the atmosphere in [m] with default value 7000.
Returns
-------
QGPV : ndarray
Three-dimension numpy array of quasi-geostrophic potential vorticity;
dimension = (kmax x nlat x nlon)
dzdiv : ndarray
Three-dimension numpy array of the stretching term in QGPV;
dimension = (kmax x nlat x nlon)
"""
if nlat_s==None:
nlat_s=nlat//2
clat = np.cos(ylat*pi/180.)
clat = np.abs(clat) # Just to avoid the negative value at poles
# --- Next, calculate PV ---
av2 = np.empty_like(potential_temp) # dv/d(lon)
av3 = np.empty_like(potential_temp) # du/d(lat)
qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv
av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.)
# Calculate the z-divergence term
zdiv = np.empty_like(potential_temp)
dzdiv = np.empty_like(potential_temp)
for kk in range(kmax): # This is more efficient
zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk]
zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk]
dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \
(zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \
/(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis])
dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \
(unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis])
dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \
(unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis])
qgpv = avort+dzdiv * av1
return qgpv, dzdiv
| en | 0.647227 | The function "static_stability" computes the vertical gradient (z-derivative) of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def- inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing. At the boundary, the static stability is estimated by forward/backward differen- cing involving two adjacent z-grid points: i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0]) stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1]) Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues Parameters ---------- height : sequence or array_like Array of z-coordinate [in meters] with dimension = (kmax), equally spaced area : ndarray Two-dimension numpy array specifying differential areal element of each grid point; dimension = (nlat, nlon). theta : ndarray Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat) s_et : int, optional Index of the latitude that defines the boundary of the Southern hemispheric domain; initialized as nlat/2 if not input n_et : int, optional Index of the latitude that defines the boundary of the Southern hemispheric domain; initialized as nlat/2 if not input Returns ------- t0_n : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Northern hemispheric domain with dimension = (kmax) t0_s : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Southern hemispheric domain with dimension = (kmax) stat_n : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric domain with dimension = (kmax) stat_s : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric domain with dimension = (kmax) The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential vorticity based on the absolute vorticity, potential temperature and static stability given. Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues Parameters ---------- omega : float, optional Rotation rate of the planet. nlat : int Latitudinal dimension of the latitude grid. nlon : int Longitudinal dimension of the longitude grid. kmax : int Vertical dimension of the height grid. unih : sequence or array_like Numpy array of height in [meters]; dimension = (kmax) ylat : sequence or array_like Numpy array of latitudes in [degrees]; dimension = (nlat) avort : ndarray Three-dimension numpy array of absolute vorticity (i.e. relative vorticity + 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon) potential_temp : ndarray Three-dimension numpy array of potential temperature in [K]; dimension = (kmax x nlat x nlon) t0_cn : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Northern hemispheric domain with dimension = (kmax) t0_cs : sequence or array_like Area-weighted average of potential temperature (\tilde{\theta} in HN16) in the Southern hemispheric domain with dimension = (kmax) stat_cn : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric domain with dimension = (kmax) stat_cs : sequence or array_like Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric domain with dimension = (kmax) scale_height : float Scale height of the atmosphere in [m] with default value 7000. Returns ------- QGPV : ndarray Three-dimension numpy array of quasi-geostrophic potential vorticity; dimension = (kmax x nlat x nlon) dzdiv : ndarray Three-dimension numpy array of the stretching term in QGPV; dimension = (kmax x nlat x nlon) # Just to avoid the negative value at poles # --- Next, calculate PV --- # dv/d(lon) # du/d(lat) # av1+av2+av3+dzdiv # Calculate the z-divergence term # This is more efficient | 3.073622 | 3 |
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py | JennyLawrance/azure-cli | 0 | 9325 | <filename>src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (get_location_type,
file_type,
get_resource_name_completion_list,
get_enum_type,
get_three_state_flag)
from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku
from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku,
AllocationPolicy,
AccessRightsDescription)
from .custom import KeyType, SimpleAccessRights
from ._validators import validate_policy_permissions
from ._completers import get_device_id_completion_list
hub_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'),
help='IoT Hub name.')
dps_name_type = CLIArgumentType(
options_list=['--dps-name'],
completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'),
help='IoT Provisioning Service name')
def load_arguments(self, _): # pylint: disable=too-many-statements
# Arguments for IoT DPS
with self.argument_context('iot dps') as c:
c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('iot dps create') as c:
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Provisioning Service. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotDpsSku),
help='Pricing tier for the IoT provisioning service.')
c.argument('unit', help='Units in your IoT Provisioning Service.', type=int)
for subgroup in ['access-policy', 'linked-hub', 'certificate']:
with self.argument_context('iot dps {}'.format(subgroup)) as c:
c.argument('dps_name', options_list=['--dps-name'], id_part=None)
with self.argument_context('iot dps access-policy') as c:
c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'],
help='A friendly name for DPS access policy.')
with self.argument_context('iot dps access-policy create') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps access-policy update') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps linked-hub') as c:
c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.')
with self.argument_context('iot dps linked-hub create') as c:
c.argument('connection_string', help='Connection string of the IoT hub.')
c.argument('location', get_location_type(self.cli_ctx),
help='Location of the IoT hub.')
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the IoT hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps linked-hub update') as c:
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the Iot hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps allocation-policy update') as c:
c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy),
help='Allocation policy for the IoT provisioning service.')
with self.argument_context('iot dps certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'],
help='A friendly name for the certificate.')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
# Arguments for IoT Hub
with self.argument_context('iot') as c:
c.argument('device_id', options_list=['--device-id', '-d'], help='Device Id.',
completer=get_device_id_completion_list)
with self.argument_context('iot hub') as c:
c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
for subgroup in ['consumer-group', 'policy', 'job', 'certificate']:
with self.argument_context('iot hub {}'.format(subgroup)) as c:
c.argument('hub_name', options_list=['--hub-name'])
with self.argument_context('iot device') as c:
c.argument('hub_name', hub_name_type)
with self.argument_context('iot hub certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.')
with self.argument_context('iot hub consumer-group') as c:
c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2',
help='Event hub consumer group name.')
c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.')
with self.argument_context('iot hub policy') as c:
c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1',
help='Shared access policy name.')
permission_values = ', '.join([x.value for x in SimpleAccessRights])
c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower,
help='Permissions of shared access policy. Use space-separated list for multiple permissions. '
'Possible values: {}'.format(permission_values))
with self.argument_context('iot hub job') as c:
c.argument('job_id', id_part='child_name_1', help='Job Id.')
with self.argument_context('iot hub create') as c:
c.argument('hub_name', completer=None)
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Hub. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotHubSku),
help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. '
'Note that only one free IoT hub instance is allowed in each '
'subscription. Exception will be thrown if free instances exceed one.')
c.argument('unit', help='Units in your IoT Hub.', type=int)
c.argument('partition_count', help='The number of partitions for device-to-cloud messages.', type=int)
with self.argument_context('iot hub show-connection-string') as c:
c.argument('policy_name', help='Shared access policy to use.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device create') as c:
c.argument('device_id', completer=None)
with self.argument_context('iot device create', arg_group='X.509 Certificate') as c:
c.argument('x509', action='store_true', help='Use X.509 certificate for device authentication.')
c.argument('primary_thumbprint', help='Primary X.509 certificate thumbprint to authenticate device.')
c.argument('secondary_thumbprint', help='Secondary X.509 certificate thumbprint to authenticate device.')
c.argument('valid_days', type=int, help='Number of days the generated self-signed X.509 certificate should be '
'valid for. Default validity is 365 days.')
c.argument('output_dir', help='Output directory for generated self-signed X.509 certificate. '
'Default is current working directory.')
with self.argument_context('iot device list') as c:
c.argument('top', help='Maximum number of device identities to return.', type=int)
with self.argument_context('iot device delete') as c:
c.argument('etag', help='ETag of the target device. It is used for the purpose of optimistic '
'concurrency. Delete operation will be performed only if the specified '
'ETag matches the value maintained by the server, indicating that the '
'device identity has not been modified since it was retrieved. Default '
'value is set to wildcard character (*) to force an unconditional '
'delete.')
with self.argument_context('iot device show-connection-string') as c:
c.argument('top', type=int, help='Maximum number of connection strings to return.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device message') as c:
c.argument('lock_token', help='Message lock token.')
with self.argument_context('iot device message send', arg_group='Messaging') as c:
c.argument('data', help='Device-to-cloud message body.')
c.argument('message_id', help='Device-to-cloud message Id.')
c.argument('correlation_id', help='Device-to-cloud message correlation Id.')
c.argument('user_id', help='Device-to-cloud message user Id.')
with self.argument_context('iot device message receive') as c:
c.argument('lock_timeout', type=int,
help='In case a message returned to this call, this specifies the amount of '
'time in seconds, the message will be invisible to other receive calls.')
with self.argument_context('iot device export') as c:
c.argument('blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
c.argument('include_keys', action='store_true',
help='If set, keys are exported normally. Otherwise, keys are set to null in '
'export output.')
with self.argument_context('iot device import') as c:
c.argument('input_blob_container_uri',
help='Blob Shared Access Signature URI with read access to a blob container.'
'This blob contains the operations to be performed on the identity '
'registry ')
c.argument('output_blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
| <filename>src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/_params.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from argcomplete.completers import FilesCompleter
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (get_location_type,
file_type,
get_resource_name_completion_list,
get_enum_type,
get_three_state_flag)
from azure.mgmt.iothub.models.iot_hub_client_enums import IotHubSku
from azure.mgmt.iothubprovisioningservices.models.iot_dps_client_enums import (IotDpsSku,
AllocationPolicy,
AccessRightsDescription)
from .custom import KeyType, SimpleAccessRights
from ._validators import validate_policy_permissions
from ._completers import get_device_id_completion_list
hub_name_type = CLIArgumentType(
completer=get_resource_name_completion_list('Microsoft.Devices/IotHubs'),
help='IoT Hub name.')
dps_name_type = CLIArgumentType(
options_list=['--dps-name'],
completer=get_resource_name_completion_list('Microsoft.Devices/ProvisioningServices'),
help='IoT Provisioning Service name')
def load_arguments(self, _): # pylint: disable=too-many-statements
# Arguments for IoT DPS
with self.argument_context('iot dps') as c:
c.argument('dps_name', dps_name_type, options_list=['--name', '-n'], id_part='name')
with self.argument_context('iot dps create') as c:
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Provisioning Service. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotDpsSku),
help='Pricing tier for the IoT provisioning service.')
c.argument('unit', help='Units in your IoT Provisioning Service.', type=int)
for subgroup in ['access-policy', 'linked-hub', 'certificate']:
with self.argument_context('iot dps {}'.format(subgroup)) as c:
c.argument('dps_name', options_list=['--dps-name'], id_part=None)
with self.argument_context('iot dps access-policy') as c:
c.argument('access_policy_name', options_list=['--access-policy-name', '--name', '-n'],
help='A friendly name for DPS access policy.')
with self.argument_context('iot dps access-policy create') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps access-policy update') as c:
c.argument('rights', options_list=['--rights', '-r'], nargs='+',
arg_type=get_enum_type(AccessRightsDescription),
help='Access rights for the IoT provisioning service. Use space-separated list for multiple rights.')
c.argument('primary_key', help='Primary SAS key value.')
c.argument('secondary_key', help='Secondary SAS key value.')
with self.argument_context('iot dps linked-hub') as c:
c.argument('linked_hub', options_list=['--linked-hub'], help='Host name of linked IoT Hub.')
with self.argument_context('iot dps linked-hub create') as c:
c.argument('connection_string', help='Connection string of the IoT hub.')
c.argument('location', get_location_type(self.cli_ctx),
help='Location of the IoT hub.')
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the IoT hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps linked-hub update') as c:
c.argument('apply_allocation_policy',
help='A boolean indicating whether to apply allocation policy to the Iot hub.',
arg_type=get_three_state_flag())
c.argument('allocation_weight', help='Allocation weight of the IoT hub.')
with self.argument_context('iot dps allocation-policy update') as c:
c.argument('allocation_policy', options_list=['--policy', '-p'], arg_type=get_enum_type(AllocationPolicy),
help='Allocation policy for the IoT provisioning service.')
with self.argument_context('iot dps certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--certificate-name', '--name', '-n'],
help='A friendly name for the certificate.')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
# Arguments for IoT Hub
with self.argument_context('iot') as c:
c.argument('device_id', options_list=['--device-id', '-d'], help='Device Id.',
completer=get_device_id_completion_list)
with self.argument_context('iot hub') as c:
c.argument('hub_name', hub_name_type, options_list=['--name', '-n'], id_part='name')
c.argument('etag', options_list=['--etag', '-e'], help='Entity Tag (etag) of the object.')
for subgroup in ['consumer-group', 'policy', 'job', 'certificate']:
with self.argument_context('iot hub {}'.format(subgroup)) as c:
c.argument('hub_name', options_list=['--hub-name'])
with self.argument_context('iot device') as c:
c.argument('hub_name', hub_name_type)
with self.argument_context('iot hub certificate') as c:
c.argument('certificate_path', options_list=['--path', '-p'], type=file_type,
completer=FilesCompleter([".cer", ".pem"]), help='The path to the file containing the certificate.')
c.argument('certificate_name', options_list=['--name', '-n'], help='A friendly name for the certificate.')
with self.argument_context('iot hub consumer-group') as c:
c.argument('consumer_group_name', options_list=['--name', '-n'], id_part='child_name_2',
help='Event hub consumer group name.')
c.argument('event_hub_name', id_part='child_name_1', help='Event hub endpoint name.')
with self.argument_context('iot hub policy') as c:
c.argument('policy_name', options_list=['--name', '-n'], id_part='child_name_1',
help='Shared access policy name.')
permission_values = ', '.join([x.value for x in SimpleAccessRights])
c.argument('permissions', nargs='*', validator=validate_policy_permissions, type=str.lower,
help='Permissions of shared access policy. Use space-separated list for multiple permissions. '
'Possible values: {}'.format(permission_values))
with self.argument_context('iot hub job') as c:
c.argument('job_id', id_part='child_name_1', help='Job Id.')
with self.argument_context('iot hub create') as c:
c.argument('hub_name', completer=None)
c.argument('location', get_location_type(self.cli_ctx),
help='Location of your IoT Hub. Default is the location of target resource group.')
c.argument('sku', arg_type=get_enum_type(IotHubSku),
help='Pricing tier for Azure IoT Hub. Default value is F1, which is free. '
'Note that only one free IoT hub instance is allowed in each '
'subscription. Exception will be thrown if free instances exceed one.')
c.argument('unit', help='Units in your IoT Hub.', type=int)
c.argument('partition_count', help='The number of partitions for device-to-cloud messages.', type=int)
with self.argument_context('iot hub show-connection-string') as c:
c.argument('policy_name', help='Shared access policy to use.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device create') as c:
c.argument('device_id', completer=None)
with self.argument_context('iot device create', arg_group='X.509 Certificate') as c:
c.argument('x509', action='store_true', help='Use X.509 certificate for device authentication.')
c.argument('primary_thumbprint', help='Primary X.509 certificate thumbprint to authenticate device.')
c.argument('secondary_thumbprint', help='Secondary X.509 certificate thumbprint to authenticate device.')
c.argument('valid_days', type=int, help='Number of days the generated self-signed X.509 certificate should be '
'valid for. Default validity is 365 days.')
c.argument('output_dir', help='Output directory for generated self-signed X.509 certificate. '
'Default is current working directory.')
with self.argument_context('iot device list') as c:
c.argument('top', help='Maximum number of device identities to return.', type=int)
with self.argument_context('iot device delete') as c:
c.argument('etag', help='ETag of the target device. It is used for the purpose of optimistic '
'concurrency. Delete operation will be performed only if the specified '
'ETag matches the value maintained by the server, indicating that the '
'device identity has not been modified since it was retrieved. Default '
'value is set to wildcard character (*) to force an unconditional '
'delete.')
with self.argument_context('iot device show-connection-string') as c:
c.argument('top', type=int, help='Maximum number of connection strings to return.')
c.argument('key_type', arg_type=get_enum_type(KeyType), options_list=['--key'], help='The key to use.')
with self.argument_context('iot device message') as c:
c.argument('lock_token', help='Message lock token.')
with self.argument_context('iot device message send', arg_group='Messaging') as c:
c.argument('data', help='Device-to-cloud message body.')
c.argument('message_id', help='Device-to-cloud message Id.')
c.argument('correlation_id', help='Device-to-cloud message correlation Id.')
c.argument('user_id', help='Device-to-cloud message user Id.')
with self.argument_context('iot device message receive') as c:
c.argument('lock_timeout', type=int,
help='In case a message returned to this call, this specifies the amount of '
'time in seconds, the message will be invisible to other receive calls.')
with self.argument_context('iot device export') as c:
c.argument('blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
c.argument('include_keys', action='store_true',
help='If set, keys are exported normally. Otherwise, keys are set to null in '
'export output.')
with self.argument_context('iot device import') as c:
c.argument('input_blob_container_uri',
help='Blob Shared Access Signature URI with read access to a blob container.'
'This blob contains the operations to be performed on the identity '
'registry ')
c.argument('output_blob_container_uri',
help='Blob Shared Access Signature URI with write access to a blob container.'
'This is used to output the status of the job and the results.')
| en | 0.451115 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint: disable=too-many-statements # Arguments for IoT DPS # Arguments for IoT Hub | 1.745245 | 2 |
metrics-calculator/tests/integration/test_s3.py | nhsconnect/prm-practice-migration-dashboard | 0 | 9326 | <reponame>nhsconnect/prm-practice-migration-dashboard<filename>metrics-calculator/tests/integration/test_s3.py<gh_stars>0
import boto3
import gzip
from moto import mock_s3
import pytest
import os
from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist
from tests.builders.file import build_gzip_csv
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
@pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.resource('s3', region_name='us-east-1')
@mock_s3
def test_read_object_s3_returns_object_content(s3):
bucket = s3.create_bucket(Bucket="test_bucket")
s3_object = bucket.Object("test_object.csv.gz")
gzipped_content = build_gzip_csv(
header=["id", "message", "comment"],
rows=[["123", "A message", "A comment"], [
"321", "Another message", "Another comment"]],
)
s3_object.put(
Body=gzipped_content
)
expected = "id,message,comment\n123,A message,A comment\n321,Another message,Another comment"
csv_stream = read_object_s3(s3, "s3://test_bucket/test_object.csv.gz")
with gzip.open(csv_stream, mode="rt") as f:
actual = f.read()
assert actual == expected
@mock_s3
def test_write_object_s3_writes_object_content(s3):
s3.create_bucket(Bucket="test_bucket")
json_string = b'{"fruit": "mango"}'
write_object_s3(s3, "s3://test_bucket/test_object.json", json_string)
s3_object_response = s3.Object("test_bucket", "test_object.json").get()
assert s3_object_response["Body"].read() == json_string
@mock_s3
def test_write_object_s3_writes_object_content_with_metadata(s3):
s3.create_bucket(Bucket="test_bucket")
json_string = b'{"fruit": "mango"}'
metadata = {
"start_date": "start-date",
"end_date": "end-date"
}
write_object_s3(s3, "s3://test_bucket/test_object.json", json_string, metadata)
s3_object_response = s3.Object("test_bucket", "test_object.json").get()
assert s3_object_response["Metadata"] == metadata
@mock_s3
def test_objects_exist_returns_true_when_all_objects_exist(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content')
write_object_s3(s3, f"s3://test_bucket/{object_two}", 'object-two-content')
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert result
@mock_s3
def test_objects_exist_returns_false_when_only_one_object_exists(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content')
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert not result
@mock_s3
def test_objects_exist_returns_false_when_no_objects_exist(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert not result
| import boto3
import gzip
from moto import mock_s3
import pytest
import os
from chalicelib.s3 import read_object_s3, write_object_s3, objects_exist
from tests.builders.file import build_gzip_csv
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
@pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.resource('s3', region_name='us-east-1')
@mock_s3
def test_read_object_s3_returns_object_content(s3):
bucket = s3.create_bucket(Bucket="test_bucket")
s3_object = bucket.Object("test_object.csv.gz")
gzipped_content = build_gzip_csv(
header=["id", "message", "comment"],
rows=[["123", "A message", "A comment"], [
"321", "Another message", "Another comment"]],
)
s3_object.put(
Body=gzipped_content
)
expected = "id,message,comment\n123,A message,A comment\n321,Another message,Another comment"
csv_stream = read_object_s3(s3, "s3://test_bucket/test_object.csv.gz")
with gzip.open(csv_stream, mode="rt") as f:
actual = f.read()
assert actual == expected
@mock_s3
def test_write_object_s3_writes_object_content(s3):
s3.create_bucket(Bucket="test_bucket")
json_string = b'{"fruit": "mango"}'
write_object_s3(s3, "s3://test_bucket/test_object.json", json_string)
s3_object_response = s3.Object("test_bucket", "test_object.json").get()
assert s3_object_response["Body"].read() == json_string
@mock_s3
def test_write_object_s3_writes_object_content_with_metadata(s3):
s3.create_bucket(Bucket="test_bucket")
json_string = b'{"fruit": "mango"}'
metadata = {
"start_date": "start-date",
"end_date": "end-date"
}
write_object_s3(s3, "s3://test_bucket/test_object.json", json_string, metadata)
s3_object_response = s3.Object("test_bucket", "test_object.json").get()
assert s3_object_response["Metadata"] == metadata
@mock_s3
def test_objects_exist_returns_true_when_all_objects_exist(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content')
write_object_s3(s3, f"s3://test_bucket/{object_two}", 'object-two-content')
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert result
@mock_s3
def test_objects_exist_returns_false_when_only_one_object_exists(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
write_object_s3(s3, f"s3://test_bucket/{object_one}", 'object-one-content')
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert not result
@mock_s3
def test_objects_exist_returns_false_when_no_objects_exist(s3):
s3.create_bucket(Bucket="test_bucket")
object_one = "object-one"
object_two = "object-two"
result = objects_exist(s3, "test_bucket", [object_one, object_two])
assert not result | en | 0.693671 | Mocked AWS Credentials for moto. | 2.12308 | 2 |
image_analogy/losses/patch_matcher.py | kaldap/image-analogies | 3,722 | 9327 | import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32)
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size);
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
| import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32)
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size);
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
| en | 0.801333 | x shape: (num_channels, rows, cols) #patches = np.rollaxis(patches, -1, 2) Reconstruct an image from these `patches` input shape: (rows, cols, channels, patch_row, patch_col) # (patches, channels, pr, pc) # (patches, p, p, channels) A matcher of image patches inspired by the PatchMatch algorithm. image shape: (width, height, channels) # TODO: switch to pixels # * [[[self.num_input_rows]],[[self.num_input_cols]]] # NOTE this should actually stop when the move is < 1 Check the similarity of the patches specified in coords. # TODO: should this all be in pixel space? Create a new matcher of the given shape and replace its state with a scaled up version of the current matcher's state. Arbitrary resampling of source array to new dimension sizes. Currently only supports maintaining the same number of dimensions. To use 1-D arrays, first promote them to shape (x,1). Uses the same parameters and creates the same co-ordinate lookup points as IDL''s congrid routine, which apparently originally came from a VAX/VMS routine of the same name. method: neighbour - closest value from original data nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d (see Numerical Recipes for validity of use of n 1-D interpolations) spline - uses ndimage.map_coordinates centre: True - interpolation points are at the centres of the bins False - points are at the front edge of the bin minusone: For example- inarray.shape = (i,j) & new dimensions = (x,y) False - inarray is resampled by factors of (i/x) * (j/y) True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1) This prevents extrapolation one element beyond bounds of input array. # calculate new dims # specify old dims # first interpolation - for ndims = any # need one more transpose to return to original dimensions #make first index last # makes a view that affects newcoords #.transpose((2,1,0)) #.transpose((2,1,0)) # # # #print end-start # # imsave takes (rows, cols, channels) | 2.881291 | 3 |
muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py | desafinadude/muni-portal-backend | 1 | 9328 | <filename>muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py<gh_stars>1-10
# Generated by Django 2.2.10 on 2021-02-24 09:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20210224_0936'),
]
operations = [
migrations.RemoveField(
model_name='servicerequest',
name='mobile_reference',
),
]
| <filename>muni_portal/core/migrations/0030_remove_servicerequest_mobile_reference.py<gh_stars>1-10
# Generated by Django 2.2.10 on 2021-02-24 09:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0029_auto_20210224_0936'),
]
operations = [
migrations.RemoveField(
model_name='servicerequest',
name='mobile_reference',
),
]
| en | 0.800865 | # Generated by Django 2.2.10 on 2021-02-24 09:42 | 1.30581 | 1 |
rllib/agents/ppo/tests/test_appo.py | noahshpak/ray | 2 | 9329 | <filename>rllib/agents/ppo/tests/test_appo.py
import unittest
import ray
import ray.rllib.agents.ppo as ppo
from ray.rllib.utils.test_utils import check_compute_single_action, \
framework_iterator
class TestAPPO(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_appo_compilation(self):
"""Test whether an APPOTrainer can be built with both frameworks."""
config = ppo.appo.DEFAULT_CONFIG.copy()
config["num_workers"] = 1
num_iterations = 2
for _ in framework_iterator(config, frameworks=("torch", "tf")):
_config = config.copy()
trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0")
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
_config = config.copy()
_config["vtrace"] = True
trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0")
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| <filename>rllib/agents/ppo/tests/test_appo.py
import unittest
import ray
import ray.rllib.agents.ppo as ppo
from ray.rllib.utils.test_utils import check_compute_single_action, \
framework_iterator
class TestAPPO(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init()
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_appo_compilation(self):
"""Test whether an APPOTrainer can be built with both frameworks."""
config = ppo.appo.DEFAULT_CONFIG.copy()
config["num_workers"] = 1
num_iterations = 2
for _ in framework_iterator(config, frameworks=("torch", "tf")):
_config = config.copy()
trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0")
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
_config = config.copy()
_config["vtrace"] = True
trainer = ppo.APPOTrainer(config=_config, env="CartPole-v0")
for i in range(num_iterations):
print(trainer.train())
check_compute_single_action(trainer)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| en | 0.847905 | Test whether an APPOTrainer can be built with both frameworks. | 2.212034 | 2 |
ezeeai/utils/hooks.py | jmarine/ezeeai | 19 | 9330 | <reponame>jmarine/ezeeai<filename>ezeeai/utils/hooks.py<gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.tf_export import tf_export
import smtplib
from email.mime.text import MIMEText
@tf_export("train.EmailAtStepHook")
class EmailAtStepHook(session_run_hook.SessionRunHook):
def __init__(self, user_info, server_info, every_n_iter=None, every_n_secs=None,
at_end=False):
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
self._user_info = user_info
self._server_info = server_info
self._timer.reset()
self._iter_count = 0
def begin(self):
pass
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._send_email()
self._iter_count += 1
def end(self, session):
if self._log_at_end:
self._send_email()
def _send_email(self):
smtpserver = 'smtp.gmail.com:587'
header = 'From: %s' % self._server_info['email_address']
header += 'To: %s' % self._user_info['email_address']
header += 'Subject: %s' % "Training finished"
message = header + "Training finished"
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(self._server_info['login'], self._server_info['password'])
problems = server.sendmail(self._server_info['email_address'], self._user_info['email_address'], message)
server.quit()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.tf_export import tf_export
import smtplib
from email.mime.text import MIMEText
@tf_export("train.EmailAtStepHook")
class EmailAtStepHook(session_run_hook.SessionRunHook):
def __init__(self, user_info, server_info, every_n_iter=None, every_n_secs=None,
at_end=False):
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
self._user_info = user_info
self._server_info = server_info
self._timer.reset()
self._iter_count = 0
def begin(self):
pass
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._send_email()
self._iter_count += 1
def end(self, session):
if self._log_at_end:
self._send_email()
def _send_email(self):
smtpserver = 'smtp.gmail.com:587'
header = 'From: %s' % self._server_info['email_address']
header += 'To: %s' % self._user_info['email_address']
header += 'Subject: %s' % "Training finished"
message = header + "Training finished"
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(self._server_info['login'], self._server_info['password'])
problems = server.sendmail(self._server_info['email_address'], self._user_info['email_address'], message)
server.quit() | en | 0.227922 | # pylint: disable=unused-argument | 2.268078 | 2 |
tests/factory_fixtures/dummy_resource.py | whiletrace/dwellinglybackend | 15 | 9331 | <filename>tests/factory_fixtures/dummy_resource.py
from flask import request
from flask_restful import Resource
from utils.gatekeeper import allowed_params
class DummyResource(Resource):
dummy_params = set()
@allowed_params(dummy_params)
def put(self):
return request.json
| <filename>tests/factory_fixtures/dummy_resource.py
from flask import request
from flask_restful import Resource
from utils.gatekeeper import allowed_params
class DummyResource(Resource):
dummy_params = set()
@allowed_params(dummy_params)
def put(self):
return request.json
| none | 1 | 1.969477 | 2 |
|
quizzes/00.organize.me/hackerrank/sorted_set/server2.py | JiniousChoi/encyclopedia-in-code | 2 | 9332 | #!/usr/bin/env python3
import socket, threading
from queue import Queue
import sys, struct
# NOTE: Use this path to create the UDS Server socket
SERVER_SOCKET_PATH = "./socket";
class Result:
def __init__(self):
self._evt = threading.Event()
self._result = None
def set_result(self, value):
self._result = value
self._evt.set()
def result(self):
self._evt.wait()
return self._result
class ActorExit(Exception):
pass
class Actor(object):
def __init__(self):
self._mailbox = Queue()
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
if msg is ActorExit:
raise ActorExit()
return msg
def close(self):
self.send(ActorExit)
def start(self):
self._terminated = threading.Event()
t = threading.Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except ActorExit:
pass
finally:
self._terminated.set()
def join(self):
self._terminated.wait()
def run(self):
while True:
msg = self.recv()
class Worker(Actor):
def __init__(self):
super().__init__()
self.db = {}
def submit(self, values):
r = Result()
self.send((values, r))
return r
def run(self):
while True:
values, r = self.recv()
r.set_result(self.execute(values))
def execute(self, values):
cmd, *opts = values
print('[*]', cmd, opts)
if cmd == 1: #add
s, k, v = opts
self.db.setdefault(s, {})
self.db[s][k] = v
return [0]
elif cmd == 2: #remove
s, k = opts
if s in self.db and k in self.db[s]:
self.db[s].pop(k)
return [0]
elif cmd == 3: #get size
s = opts[0]
size = len(self.db[s]) if s in self.db else 0
return [1, size]
elif cmd == 4: #get value
s, k = opts
if s in self.db and k in self.db[s]:
score = self.db[s][k]
else:
score = 0
return [1, score]
elif cmd == 5: #range
*sets, _, lower, upper = opts
res = []
for s in sets:
if s not in self.db:
continue
for k,v in self.db[s].items():
if lower <= v <= upper:
res.append((k,v))
res.sort()
return [len(res)*2] + [e for kv in res for e in kv]
elif cmd == 6: #disconnect
return None
else:
raise Exception("Not supported CMD(%s)" % (cmd))
FMT = "!L"
def read_number_from_socket(connection):
return struct.unpack(FMT, connection.recv(4))[0]
def write_number_to_socket(connection, number):
connection.send(struct.pack(FMT, number))
def process_client_connection(connection, worker):
while True:
value_num = read_number_from_socket(connection)
values = []
for _ in range(value_num):
values.append(read_number_from_socket(connection))
res = worker.submit(values)
if res.result() == None:
break
for num in res.result():
write_number_to_socket(connection, num)
connection.close()
def main():
worker = Worker()
worker.start()
s = socket.socket(socket.AF_UNIX)
s.bind(SERVER_SOCKET_PATH)
s.listen(1)
while True:
cl, addr = s.accept()
t = threading.Thread(target = process_client_connection, args=(cl, worker))
t.start()
#worker.close()
s.close()
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import socket, threading
from queue import Queue
import sys, struct
# NOTE: Use this path to create the UDS Server socket
SERVER_SOCKET_PATH = "./socket";
class Result:
def __init__(self):
self._evt = threading.Event()
self._result = None
def set_result(self, value):
self._result = value
self._evt.set()
def result(self):
self._evt.wait()
return self._result
class ActorExit(Exception):
pass
class Actor(object):
def __init__(self):
self._mailbox = Queue()
def send(self, msg):
self._mailbox.put(msg)
def recv(self):
msg = self._mailbox.get()
if msg is ActorExit:
raise ActorExit()
return msg
def close(self):
self.send(ActorExit)
def start(self):
self._terminated = threading.Event()
t = threading.Thread(target=self._bootstrap)
t.daemon = True
t.start()
def _bootstrap(self):
try:
self.run()
except ActorExit:
pass
finally:
self._terminated.set()
def join(self):
self._terminated.wait()
def run(self):
while True:
msg = self.recv()
class Worker(Actor):
def __init__(self):
super().__init__()
self.db = {}
def submit(self, values):
r = Result()
self.send((values, r))
return r
def run(self):
while True:
values, r = self.recv()
r.set_result(self.execute(values))
def execute(self, values):
cmd, *opts = values
print('[*]', cmd, opts)
if cmd == 1: #add
s, k, v = opts
self.db.setdefault(s, {})
self.db[s][k] = v
return [0]
elif cmd == 2: #remove
s, k = opts
if s in self.db and k in self.db[s]:
self.db[s].pop(k)
return [0]
elif cmd == 3: #get size
s = opts[0]
size = len(self.db[s]) if s in self.db else 0
return [1, size]
elif cmd == 4: #get value
s, k = opts
if s in self.db and k in self.db[s]:
score = self.db[s][k]
else:
score = 0
return [1, score]
elif cmd == 5: #range
*sets, _, lower, upper = opts
res = []
for s in sets:
if s not in self.db:
continue
for k,v in self.db[s].items():
if lower <= v <= upper:
res.append((k,v))
res.sort()
return [len(res)*2] + [e for kv in res for e in kv]
elif cmd == 6: #disconnect
return None
else:
raise Exception("Not supported CMD(%s)" % (cmd))
FMT = "!L"
def read_number_from_socket(connection):
return struct.unpack(FMT, connection.recv(4))[0]
def write_number_to_socket(connection, number):
connection.send(struct.pack(FMT, number))
def process_client_connection(connection, worker):
while True:
value_num = read_number_from_socket(connection)
values = []
for _ in range(value_num):
values.append(read_number_from_socket(connection))
res = worker.submit(values)
if res.result() == None:
break
for num in res.result():
write_number_to_socket(connection, num)
connection.close()
def main():
worker = Worker()
worker.start()
s = socket.socket(socket.AF_UNIX)
s.bind(SERVER_SOCKET_PATH)
s.listen(1)
while True:
cl, addr = s.accept()
t = threading.Thread(target = process_client_connection, args=(cl, worker))
t.start()
#worker.close()
s.close()
if __name__ == '__main__':
main()
| en | 0.26388 | #!/usr/bin/env python3 # NOTE: Use this path to create the UDS Server socket #add #remove #get size #get value #range #disconnect #worker.close() | 2.775195 | 3 |
vnpy/gateway/rohon/__init__.py | funrunskypalace/vnpy | 323 | 9333 | from .rohon_gateway import RohonGateway
| from .rohon_gateway import RohonGateway
| none | 1 | 1.070107 | 1 |
|
dnd/mobile/urls.py | dndtools2/dndtools2 | 0 | 9334 | <filename>dnd/mobile/urls.py<gh_stars>0
from django.conf.urls import patterns, url, include
from .views import force_desktop_version, return_to_mobile_version
app_name = 'mobile'
urlpatterns = [
# force desktop
url(r'^force-desktop-version/$', force_desktop_version, name='force_desktop_version'),
# return to mobile version
url(r'^return-to-mobile-version/$', return_to_mobile_version, name='return_to_mobile_version'),
# index
url(r'^', include('dnd.mobile.index.urls')),
# character classes
url(r'^classes/', include('dnd.mobile.character_classes.urls')),
# feats
url(r'^feats/', include('dnd.mobile.feats.urls')),
# items
url(r'^items/', include('dnd.mobile.items.urls')),
# languages
url(r'^languages/', include('dnd.mobile.languages.urls')),
# monsters
url(r'^monsters/', include('dnd.mobile.monsters.urls')),
# races
url(r'^races/', include('dnd.mobile.races.urls')),
# rulebooks
url(r'^rulebooks/', include('dnd.mobile.rulebooks.urls')),
# rules
url(r'^rules/', include('dnd.mobile.rules.urls')),
# skills
url(r'^skills/', include('dnd.mobile.skills.urls')),
# spells
url(r'^spells/', include('dnd.mobile.spells.urls')),
# deities
url(r'^deities/', include('dnd.mobile.deities.urls')),
]
| <filename>dnd/mobile/urls.py<gh_stars>0
from django.conf.urls import patterns, url, include
from .views import force_desktop_version, return_to_mobile_version
app_name = 'mobile'
urlpatterns = [
# force desktop
url(r'^force-desktop-version/$', force_desktop_version, name='force_desktop_version'),
# return to mobile version
url(r'^return-to-mobile-version/$', return_to_mobile_version, name='return_to_mobile_version'),
# index
url(r'^', include('dnd.mobile.index.urls')),
# character classes
url(r'^classes/', include('dnd.mobile.character_classes.urls')),
# feats
url(r'^feats/', include('dnd.mobile.feats.urls')),
# items
url(r'^items/', include('dnd.mobile.items.urls')),
# languages
url(r'^languages/', include('dnd.mobile.languages.urls')),
# monsters
url(r'^monsters/', include('dnd.mobile.monsters.urls')),
# races
url(r'^races/', include('dnd.mobile.races.urls')),
# rulebooks
url(r'^rulebooks/', include('dnd.mobile.rulebooks.urls')),
# rules
url(r'^rules/', include('dnd.mobile.rules.urls')),
# skills
url(r'^skills/', include('dnd.mobile.skills.urls')),
# spells
url(r'^spells/', include('dnd.mobile.spells.urls')),
# deities
url(r'^deities/', include('dnd.mobile.deities.urls')),
]
| en | 0.713173 | # force desktop # return to mobile version # index # character classes # feats # items # languages # monsters # races # rulebooks # rules # skills # spells # deities | 1.751771 | 2 |
ros_aruco.py | esteng/guiding-multi-step | 69 | 9335 | """
Calibrate with the ROS package aruco_detect
"""
import rospy
import roslib
from geometry_msgs.msg import Transform
class ROSArUcoCalibrate:
def __init__(self, aruco_tag_len=0.0795):
print("Please roslaunch roslaunch aruco_detect aruco_detect.launch before you run!")
self.aruco_tf_topic = "/fiducial_transforms"
self._aruco_tf_info_sub = rospy.Subscriber(self.aruco_tf_topic, Transform, self._tfCb)
self.aruco_tf = None
def _tfCb(self, tf_msg):
if tf_msg is None:
rospy.logwarn("_tfCb: tf_msg is None!")
self.aruco_tf = tf_msg
def get_tf(self):
aruco_tf = self.aruco_tf
return aruco_tf
| """
Calibrate with the ROS package aruco_detect
"""
import rospy
import roslib
from geometry_msgs.msg import Transform
class ROSArUcoCalibrate:
def __init__(self, aruco_tag_len=0.0795):
print("Please roslaunch roslaunch aruco_detect aruco_detect.launch before you run!")
self.aruco_tf_topic = "/fiducial_transforms"
self._aruco_tf_info_sub = rospy.Subscriber(self.aruco_tf_topic, Transform, self._tfCb)
self.aruco_tf = None
def _tfCb(self, tf_msg):
if tf_msg is None:
rospy.logwarn("_tfCb: tf_msg is None!")
self.aruco_tf = tf_msg
def get_tf(self):
aruco_tf = self.aruco_tf
return aruco_tf
| en | 0.609418 | Calibrate with the ROS package aruco_detect | 2.532505 | 3 |
utils/utils.py | mmalandra-kb4/service-metrics-gatherer | 0 | 9336 | <gh_stars>0
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import re
import os
import json
from urllib.parse import urlparse
import datetime
logger = logging.getLogger("metricsGatherer.utils")
def remove_credentials_from_url(url):
parsed_url = urlparse(url)
new_netloc = re.sub("^.+?:.+?@", "", parsed_url.netloc)
return url.replace(parsed_url.netloc, new_netloc)
def get_credentials_from_url(url):
parsed_url = urlparse(url)
new_netloc = re.search("^(.+?):(.+?)@", parsed_url.netloc)
try:
username = new_netloc.group(1).strip()
password = new_netloc.group(2).strip()
return username, password
except: # noqa
return "", ""
def read_json_file(folder, filename, to_json=False):
"""Read fixture from file"""
with open(os.path.join(folder, filename), "r") as file:
return file.read() if not to_json else json.loads(file.read())
def is_the_time_for_task_starting(allowed_start_time, allowed_end_time):
start = datetime.time(int(allowed_start_time.split(":")[0]), int(allowed_start_time.split(":")[1]))
end = datetime.time(int(allowed_end_time.split(":")[0]), int(allowed_end_time.split(":")[1]))
now_time = datetime.datetime.now().time()
if start > end:
return (now_time >= start and now_time <= datetime.time(23, 59)) or\
(now_time >= datetime.time(0, 0) and now_time <= end)
return now_time >= start and now_time <= end
def take_the_date_to_check():
now_time = datetime.datetime.now().time()
if (now_time >= datetime.time(12, 0) and now_time <= datetime.time(23, 59)):
return datetime.datetime.now()
return datetime.datetime.now() - datetime.timedelta(days=1)
def build_url(main_url, url_params):
"""Build url by concating url and url_params"""
return main_url + "/" + "/".join(url_params)
def unite_project_name(project_id, prefix):
return prefix + project_id
def parse_conditions(conditions):
parsed_conditions = []
for condition in conditions.split("|"):
if not condition.strip():
continue
chosen_operator = ""
for operator in [">=", "<=", "==", "=", "<", ">"]:
if operator in condition:
chosen_operator = operator
break
condition_changed = condition.replace(chosen_operator, " ").split()
if len(condition_changed) == 2:
metric_score = None
try:
metric_score = int(condition_changed[1].strip())
except: # noqa
try:
metric_score = float(condition_changed[1].strip())
except: # noqa
pass
if metric_score is not None:
parsed_conditions.append(
(condition_changed[0].strip(), chosen_operator, metric_score))
return parsed_conditions
def compare_metrics(cur_metric, metric_threshold, operator):
if operator == ">=":
return cur_metric >= metric_threshold
if operator == ">":
return cur_metric > metric_threshold
if operator == "<=":
return cur_metric <= metric_threshold
if operator == "<":
return cur_metric < metric_threshold
if operator in ["==", "="]:
return cur_metric == metric_threshold
return False
def convert_metrics_to_string(cur_metrics):
return ";".join(["%s:%s" % (metric[0], metric[1]) for metric in cur_metrics])
| """
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
import re
import os
import json
from urllib.parse import urlparse
import datetime
logger = logging.getLogger("metricsGatherer.utils")
def remove_credentials_from_url(url):
parsed_url = urlparse(url)
new_netloc = re.sub("^.+?:.+?@", "", parsed_url.netloc)
return url.replace(parsed_url.netloc, new_netloc)
def get_credentials_from_url(url):
parsed_url = urlparse(url)
new_netloc = re.search("^(.+?):(.+?)@", parsed_url.netloc)
try:
username = new_netloc.group(1).strip()
password = new_netloc.group(2).strip()
return username, password
except: # noqa
return "", ""
def read_json_file(folder, filename, to_json=False):
"""Read fixture from file"""
with open(os.path.join(folder, filename), "r") as file:
return file.read() if not to_json else json.loads(file.read())
def is_the_time_for_task_starting(allowed_start_time, allowed_end_time):
start = datetime.time(int(allowed_start_time.split(":")[0]), int(allowed_start_time.split(":")[1]))
end = datetime.time(int(allowed_end_time.split(":")[0]), int(allowed_end_time.split(":")[1]))
now_time = datetime.datetime.now().time()
if start > end:
return (now_time >= start and now_time <= datetime.time(23, 59)) or\
(now_time >= datetime.time(0, 0) and now_time <= end)
return now_time >= start and now_time <= end
def take_the_date_to_check():
now_time = datetime.datetime.now().time()
if (now_time >= datetime.time(12, 0) and now_time <= datetime.time(23, 59)):
return datetime.datetime.now()
return datetime.datetime.now() - datetime.timedelta(days=1)
def build_url(main_url, url_params):
"""Build url by concating url and url_params"""
return main_url + "/" + "/".join(url_params)
def unite_project_name(project_id, prefix):
return prefix + project_id
def parse_conditions(conditions):
parsed_conditions = []
for condition in conditions.split("|"):
if not condition.strip():
continue
chosen_operator = ""
for operator in [">=", "<=", "==", "=", "<", ">"]:
if operator in condition:
chosen_operator = operator
break
condition_changed = condition.replace(chosen_operator, " ").split()
if len(condition_changed) == 2:
metric_score = None
try:
metric_score = int(condition_changed[1].strip())
except: # noqa
try:
metric_score = float(condition_changed[1].strip())
except: # noqa
pass
if metric_score is not None:
parsed_conditions.append(
(condition_changed[0].strip(), chosen_operator, metric_score))
return parsed_conditions
def compare_metrics(cur_metric, metric_threshold, operator):
if operator == ">=":
return cur_metric >= metric_threshold
if operator == ">":
return cur_metric > metric_threshold
if operator == "<=":
return cur_metric <= metric_threshold
if operator == "<":
return cur_metric < metric_threshold
if operator in ["==", "="]:
return cur_metric == metric_threshold
return False
def convert_metrics_to_string(cur_metrics):
return ";".join(["%s:%s" % (metric[0], metric[1]) for metric in cur_metrics]) | en | 0.853167 | * Copyright 2019 EPAM Systems * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. # noqa Read fixture from file Build url by concating url and url_params # noqa # noqa | 2.387819 | 2 |
OSAnalysisHelper.py | nassermarafi/SRCSWArchetypes | 7 | 9337 | from __future__ import absolute_import
__author__ = 'marafi'
def SolutionAlgorithim(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimV2(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = 6))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Bisection... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch RegulaFalsi... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimKrylovOnly(OData, Dt, Tol, Steps, MaxDim = 6):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %e and Tol: %e ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 1000, 2))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = MaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %e ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SenSolutionAlgorithim(OData, Dt, Steps, Tol = 1e-12, KrylovMaxDim = 12, MinDt = 1e-12, NoOfIterations=3000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set conv_tol %e'%Tol))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set max_iter %d;'%NoOfIterations))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 3000, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('test EnergyIncr $conv_tol $max_iter;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('algorithm Newton;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('integrator Newmark 0.5 0.25;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('analysis Transient;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set dt %e;'%Dt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set min_dt %e;'%MinDt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set n_steps %d;'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set cur_step 1;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set div 10.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set tol 1.0e-12;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set eigenvalue [eigen 9];'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('modalDamping 0.02;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('while {$cur_step < $n_steps} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr $conv_tol $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm Newton;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> analysis failed to converge at step $cur_step";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> trying KrylovNewton";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm KrylovNewton -maxDim %d;'%KrylovMaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' while {$t < $dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < $min_dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< model did not converge (reason: time step less than $min_dt)";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< exiting safely";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' wipe;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' exit;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < [expr $dt/pow($div, 2)]} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol*10, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr [expr $conv_tol*10.0] $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt_temp];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t [expr round(($t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' } else {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp [expr round($dt_temp/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt_temp/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$cur_step % 1 == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "Running Tim History Step: $cur_step out of %d (Sen Algo.)";'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' incr cur_step;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('};'))
def PushOverSolutionAlgorithim(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
#
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimDispIncr(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithm(OData, StepSize, Tol, ControlNode, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithmDispIncr(OData, StepSize, Tol, ControlNode, NoOfIterations=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,NoOfIterations,2))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantTol(OData, Tol, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) | from __future__ import absolute_import
__author__ = 'marafi'
def SolutionAlgorithim(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimV2(OData, Dt, Tol, Steps):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = 6))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Bisection... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch RegulaFalsi... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SolutionAlgorithimKrylovOnly(OData, Dt, Tol, Steps, MaxDim = 6):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %e and Tol: %e ... "'%(Dt,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 1000, 2))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = MaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %e ]'%(Steps,Dt)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def SenSolutionAlgorithim(OData, Dt, Steps, Tol = 1e-12, KrylovMaxDim = 12, MinDt = 1e-12, NoOfIterations=3000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set conv_tol %e'%Tol))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set max_iter %d;'%NoOfIterations))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 3000, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('test EnergyIncr $conv_tol $max_iter;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('algorithm Newton;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('integrator Newmark 0.5 0.25;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('analysis Transient;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set dt %e;'%Dt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set min_dt %e;'%MinDt))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set n_steps %d;'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set cur_step 1;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set div 10.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set tol 1.0e-12;'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set eigenvalue [eigen 9];'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('modalDamping 0.02;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('while {$cur_step < $n_steps} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr $conv_tol $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm Newton;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> analysis failed to converge at step $cur_step";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> trying KrylovNewton";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm KrylovNewton -maxDim %d;'%KrylovMaxDim))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' while {$t < $dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < $min_dt} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< model did not converge (reason: time step less than $min_dt)";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< exiting safely";'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' wipe;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' exit;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < [expr $dt/pow($div, 2)]} {'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol*10, NoOfIterations, 0))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr [expr $conv_tol*10.0] $max_iter;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt_temp];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t [expr round(($t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' } else {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp [expr round($dt_temp/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt_temp/$div/$tol)*$tol];'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$cur_step % 1 == 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "Running Tim History Step: $cur_step out of %d (Sen Algo.)";'%Steps))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript(' incr cur_step;'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('};'))
def PushOverSolutionAlgorithim(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
#
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "'))
# OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
# OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
# OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimDispIncr(OData, StepSize, Tol, ControlNode):
#Insert within the While loop, make sure parameter "ok" is defined
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithm(OData, StepSize, Tol, ControlNode, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantAlgorithmDispIncr(OData, StepSize, Tol, ControlNode, NoOfIterations=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol)))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,NoOfIterations,2))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
def PushOverSolutionAlgorithimConstantTol(OData, Tol, Iter=1000):
import OpenSeesAPI
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton())
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "'))
OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0))
OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]'))
OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) | en | 0.247052 | #Insert within the While loop, make sure parameter "ok" is defined #Insert within the While loop, make sure parameter "ok" is defined #Insert within the While loop, make sure parameter "ok" is defined # OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('test EnergyIncr $conv_tol $max_iter;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('algorithm Newton;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('integrator Newmark 0.5 0.25;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('analysis Transient;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('set eigenvalue [eigen 9];')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('modalDamping 0.02;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr $conv_tol $max_iter;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr [expr $conv_tol*10.0] $max_iter;')) #Insert within the While loop, make sure parameter "ok" is defined # OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "')) # OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) # OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) # # OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "')) # OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) # OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) #Insert within the While loop, make sure parameter "ok" is defined | 2.365582 | 2 |
unityparser/commands.py | socialpoint-labs/unity-yaml-parser | 76 | 9338 | <gh_stars>10-100
import re
from argparse import ArgumentParser
from multiprocessing import Pool, Manager, Process
from pathlib import Path
from .utils import UnityDocument
YAML_HEADER = '%YAML'
class UnityProjectTester:
"""
Class to run tests on a given Unity project folder
"""
AVAILABLE_COMMANDS = ('test_no_yaml_is_modified',)
def __init__(self):
self.options = None
def run(self):
top_parser = ArgumentParser()
subparser = top_parser.add_subparsers()
subparser.required = True
for cmd in UnityProjectTester.AVAILABLE_COMMANDS:
fn = getattr(self, cmd)
parser = subparser.add_parser(cmd, help=fn.__doc__)
parser.set_defaults(func=fn)
top_parser.add_argument('project_path', help='Path to the Unity project folder')
top_parser.add_argument('--exclude',
help='Exclude regexp when searching project files. Can be specified multiple times.',
default=None,
action='append')
top_parser.add_argument('--keep-changes',
help='If a file changes after serialization, do not revert the changes.',
default=False,
action='store_true')
top_parser.add_argument('--dry-run',
help='Dont\'t modify.',
default=False,
action='store_true')
try:
self.options = top_parser.parse_args()
except TypeError:
top_parser.print_help()
return 2
# run given function
self.options.func()
def test_no_yaml_is_modified(self):
"""
Recurse the whole project folder looking for '.asset' files, load and save them all, and check that
there are no modifications
"""
if self.options.dry_run:
print("Dry-run mode enabled: YAMLs won't be dumped.")
if self.options.keep_changes:
print("Keep changes mode will not have any effect during dry run.")
elif self.options.keep_changes:
print("Keep changes mode enabled: Changes to files will be kept.")
project_path = Path(self.options.project_path)
asset_file_paths = [p for p in project_path.rglob('*.asset')]
print("Found {} '.asset' files".format(len(asset_file_paths)))
def is_path_included(path):
# compare regexp against absolute path
return not any(rexp.search(str(path.resolve())) for rexp in rexps)
if self.options.exclude is not None:
rexps = [re.compile(rexp) for rexp in self.options.exclude]
valid_file_paths = [p for p in filter(is_path_included, asset_file_paths)]
print("Excluded {} '.asset' files".format(len(asset_file_paths) - len(valid_file_paths)))
else:
valid_file_paths = asset_file_paths
file_results = []
with Manager() as manager:
print_queue = manager.Queue()
diff_list = manager.list()
queue_process = Process(target=UnityProjectTester.read_output, args=(print_queue,))
queue_process.start()
with Pool() as pool:
for f in valid_file_paths:
async_res = pool.apply_async(UnityProjectTester.open_and_save,
(f, print_queue, diff_list, self.options.keep_changes,
self.options.dry_run))
file_results.append((f, async_res))
pool.close()
pool.join()
# signal end of queue with None token
print_queue.put(None)
queue_process.join()
error_results = list(filter(lambda r: not r[1].successful(), file_results))
if len(error_results):
# raise the first exception
file_path, result = error_results[0]
print("Python process evaluating file {} failed with the following exception:".format(
file_path.resolve()), flush=True)
result.get()
if len(diff_list):
print("{} files are different now:".format(len(diff_list)))
print('\n'.join([str(f.resolve()) for f in diff_list]))
@staticmethod
def read_output(print_queue):
msg = print_queue.get()
while msg is not None:
print(msg, flush=True)
msg = print_queue.get()
@staticmethod
def open_and_save(asset_file_path, print_queue, diff_list, keep_changes=False, dry_run=False):
# check YAML version header, save original content
with open(str(asset_file_path), 'rb') as fp:
header = fp.read(len(YAML_HEADER))
try:
is_yaml_file = header.decode('utf-8') == YAML_HEADER
except UnicodeDecodeError:
is_yaml_file = False
finally:
if not is_yaml_file:
print_queue.put("Ignoring non-yaml file {}".format(asset_file_path))
return
else:
fp.seek(0)
print_queue.put("Processing {}".format(asset_file_path))
a_file_content = fp.read()
doc = UnityDocument.load_yaml(str(asset_file_path))
if dry_run:
return
try:
doc.dump_yaml()
with open(str(asset_file_path), 'rb') as fp:
b_file_content = fp.read()
# compare
if a_file_content != b_file_content:
diff_list.append(asset_file_path)
if not keep_changes:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
except Exception:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
raise
if __name__ == '__main__':
# None is considered successful
code = UnityProjectTester().run() or 0
exit(code)
| import re
from argparse import ArgumentParser
from multiprocessing import Pool, Manager, Process
from pathlib import Path
from .utils import UnityDocument
YAML_HEADER = '%YAML'
class UnityProjectTester:
"""
Class to run tests on a given Unity project folder
"""
AVAILABLE_COMMANDS = ('test_no_yaml_is_modified',)
def __init__(self):
self.options = None
def run(self):
top_parser = ArgumentParser()
subparser = top_parser.add_subparsers()
subparser.required = True
for cmd in UnityProjectTester.AVAILABLE_COMMANDS:
fn = getattr(self, cmd)
parser = subparser.add_parser(cmd, help=fn.__doc__)
parser.set_defaults(func=fn)
top_parser.add_argument('project_path', help='Path to the Unity project folder')
top_parser.add_argument('--exclude',
help='Exclude regexp when searching project files. Can be specified multiple times.',
default=None,
action='append')
top_parser.add_argument('--keep-changes',
help='If a file changes after serialization, do not revert the changes.',
default=False,
action='store_true')
top_parser.add_argument('--dry-run',
help='Dont\'t modify.',
default=False,
action='store_true')
try:
self.options = top_parser.parse_args()
except TypeError:
top_parser.print_help()
return 2
# run given function
self.options.func()
def test_no_yaml_is_modified(self):
"""
Recurse the whole project folder looking for '.asset' files, load and save them all, and check that
there are no modifications
"""
if self.options.dry_run:
print("Dry-run mode enabled: YAMLs won't be dumped.")
if self.options.keep_changes:
print("Keep changes mode will not have any effect during dry run.")
elif self.options.keep_changes:
print("Keep changes mode enabled: Changes to files will be kept.")
project_path = Path(self.options.project_path)
asset_file_paths = [p for p in project_path.rglob('*.asset')]
print("Found {} '.asset' files".format(len(asset_file_paths)))
def is_path_included(path):
# compare regexp against absolute path
return not any(rexp.search(str(path.resolve())) for rexp in rexps)
if self.options.exclude is not None:
rexps = [re.compile(rexp) for rexp in self.options.exclude]
valid_file_paths = [p for p in filter(is_path_included, asset_file_paths)]
print("Excluded {} '.asset' files".format(len(asset_file_paths) - len(valid_file_paths)))
else:
valid_file_paths = asset_file_paths
file_results = []
with Manager() as manager:
print_queue = manager.Queue()
diff_list = manager.list()
queue_process = Process(target=UnityProjectTester.read_output, args=(print_queue,))
queue_process.start()
with Pool() as pool:
for f in valid_file_paths:
async_res = pool.apply_async(UnityProjectTester.open_and_save,
(f, print_queue, diff_list, self.options.keep_changes,
self.options.dry_run))
file_results.append((f, async_res))
pool.close()
pool.join()
# signal end of queue with None token
print_queue.put(None)
queue_process.join()
error_results = list(filter(lambda r: not r[1].successful(), file_results))
if len(error_results):
# raise the first exception
file_path, result = error_results[0]
print("Python process evaluating file {} failed with the following exception:".format(
file_path.resolve()), flush=True)
result.get()
if len(diff_list):
print("{} files are different now:".format(len(diff_list)))
print('\n'.join([str(f.resolve()) for f in diff_list]))
@staticmethod
def read_output(print_queue):
msg = print_queue.get()
while msg is not None:
print(msg, flush=True)
msg = print_queue.get()
@staticmethod
def open_and_save(asset_file_path, print_queue, diff_list, keep_changes=False, dry_run=False):
# check YAML version header, save original content
with open(str(asset_file_path), 'rb') as fp:
header = fp.read(len(YAML_HEADER))
try:
is_yaml_file = header.decode('utf-8') == YAML_HEADER
except UnicodeDecodeError:
is_yaml_file = False
finally:
if not is_yaml_file:
print_queue.put("Ignoring non-yaml file {}".format(asset_file_path))
return
else:
fp.seek(0)
print_queue.put("Processing {}".format(asset_file_path))
a_file_content = fp.read()
doc = UnityDocument.load_yaml(str(asset_file_path))
if dry_run:
return
try:
doc.dump_yaml()
with open(str(asset_file_path), 'rb') as fp:
b_file_content = fp.read()
# compare
if a_file_content != b_file_content:
diff_list.append(asset_file_path)
if not keep_changes:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
except Exception:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
raise
if __name__ == '__main__':
# None is considered successful
code = UnityProjectTester().run() or 0
exit(code) | en | 0.83087 | Class to run tests on a given Unity project folder # run given function Recurse the whole project folder looking for '.asset' files, load and save them all, and check that there are no modifications # compare regexp against absolute path # signal end of queue with None token # raise the first exception # check YAML version header, save original content # compare # None is considered successful | 2.65053 | 3 |
DP/Leetcode 221. Maximal Square.py | kaizhengny/LeetCode | 31 | 9339 | class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
dp = [[0]*n for _ in range(m)]
res = 0
for i in range(m):
dp[i][0] = int(matrix[i][0])
for j in range(n):
dp[0][j] = int(matrix[0][j])
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == '1':
dp[i][j] = min(dp[i-1][j],dp[i-1][j-1],dp[i][j-1])+1
res = max(res, dp[i][j])
return res**2 | class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
dp = [[0]*n for _ in range(m)]
res = 0
for i in range(m):
dp[i][0] = int(matrix[i][0])
for j in range(n):
dp[0][j] = int(matrix[0][j])
for i in range(1, m):
for j in range(1, n):
if matrix[i][j] == '1':
dp[i][j] = min(dp[i-1][j],dp[i-1][j-1],dp[i][j-1])+1
res = max(res, dp[i][j])
return res**2 | none | 1 | 3.001089 | 3 |
|
dokang/harvesters/__init__.py | Polyconseil/dokang | 6 | 9340 | # -*- coding: utf-8 -*-
# Copyright (c) Polyconseil SAS. All rights reserved.
import hashlib
import json
import logging
import os
import re
from .html import html_config, HtmlHarvester # pylint: disable=unused-import
from .sphinx import ( # pylint: disable=unused-import
sphinx_config, sphinx_rtd_config,
SphinxHarvester, ReadTheDocsSphinxHarvester
)
logger = logging.getLogger(__name__)
def _must_process_path(path, include, exclude):
for exp in include:
if exp.match(path):
return True
for exp in exclude:
if exp.match(path):
return False
return True
def _compute_hash(path):
h = hashlib.md5()
with open(path, 'rb') as fp:
while 1:
buff = fp.read(8192)
if not buff:
break
h.update(buff)
return h.hexdigest()
def harvest_set(base_dir, doc_set, config, hashes, force):
"""Harvest a document set and return documents as dictionaries.
``config`` is the harvester configuration. It should contain a key
for each supported file extensions. ``hashes`` is a dictionary
that links the path of each indexed file to its hash. It is used
to decide whether the document should be indexed again. ``force``
indicates whether to reindex a document even if it has not ben
modified since the last indexation.
This function is a generator. It yields dictionaries. Each
dictionary should represent a document and contain the following
keys in addition to the keys returned by the harvester itself.
Each text-like value should be a string (in Python 3) or a unicode
object (in Python 2).
path
The path of the document relative to the root of the document
set.
set
The id of the document set. It should be ``doc_set``.
"""
config_copy = config.copy()
include = [re.compile(exp) for exp in config_copy.pop('include') or ()]
exclude = [re.compile(exp) for exp in config_copy.pop('exclude') or ()]
extensions = config_copy
for dir_path, _dir_names, file_names in os.walk(base_dir):
for filename in file_names:
path = os.path.join(dir_path, filename)
relative_path = os.path.relpath(path, base_dir)
if not _must_process_path(relative_path, include, exclude):
logger.debug('Excluded file "%s": include/exclude rules.', relative_path)
continue
_, extension = os.path.splitext(filename)
extension = extension.lstrip('.') # remove leading dot
harvester_class = extensions.get(extension)
if harvester_class is None:
logger.debug('Excluded file "%s": no harvester found for %s.', relative_path, extension)
continue
current_hash = _compute_hash(path)
indexed_hash = hashes.get(relative_path)
if not force and (indexed_hash == current_hash):
logger.debug('Excluded file: "%s": not modified since last indexation.', relative_path)
continue
try:
logger.debug('Indexing file "%s"', relative_path)
doc = harvester_class().harvest_file(path)
except Exception: # pylint: disable=broad-except
logger.exception("Could not index document %s", path)
else:
if doc:
if relative_path == 'index.html':
with open(os.path.join(base_dir, '.dokang'), 'w') as fp:
json.dump({'title': doc['title']}, fp)
doc['path'] = relative_path
doc['set'] = doc_set
doc['hash'] = current_hash
yield doc
| # -*- coding: utf-8 -*-
# Copyright (c) Polyconseil SAS. All rights reserved.
import hashlib
import json
import logging
import os
import re
from .html import html_config, HtmlHarvester # pylint: disable=unused-import
from .sphinx import ( # pylint: disable=unused-import
sphinx_config, sphinx_rtd_config,
SphinxHarvester, ReadTheDocsSphinxHarvester
)
logger = logging.getLogger(__name__)
def _must_process_path(path, include, exclude):
for exp in include:
if exp.match(path):
return True
for exp in exclude:
if exp.match(path):
return False
return True
def _compute_hash(path):
h = hashlib.md5()
with open(path, 'rb') as fp:
while 1:
buff = fp.read(8192)
if not buff:
break
h.update(buff)
return h.hexdigest()
def harvest_set(base_dir, doc_set, config, hashes, force):
"""Harvest a document set and return documents as dictionaries.
``config`` is the harvester configuration. It should contain a key
for each supported file extensions. ``hashes`` is a dictionary
that links the path of each indexed file to its hash. It is used
to decide whether the document should be indexed again. ``force``
indicates whether to reindex a document even if it has not ben
modified since the last indexation.
This function is a generator. It yields dictionaries. Each
dictionary should represent a document and contain the following
keys in addition to the keys returned by the harvester itself.
Each text-like value should be a string (in Python 3) or a unicode
object (in Python 2).
path
The path of the document relative to the root of the document
set.
set
The id of the document set. It should be ``doc_set``.
"""
config_copy = config.copy()
include = [re.compile(exp) for exp in config_copy.pop('include') or ()]
exclude = [re.compile(exp) for exp in config_copy.pop('exclude') or ()]
extensions = config_copy
for dir_path, _dir_names, file_names in os.walk(base_dir):
for filename in file_names:
path = os.path.join(dir_path, filename)
relative_path = os.path.relpath(path, base_dir)
if not _must_process_path(relative_path, include, exclude):
logger.debug('Excluded file "%s": include/exclude rules.', relative_path)
continue
_, extension = os.path.splitext(filename)
extension = extension.lstrip('.') # remove leading dot
harvester_class = extensions.get(extension)
if harvester_class is None:
logger.debug('Excluded file "%s": no harvester found for %s.', relative_path, extension)
continue
current_hash = _compute_hash(path)
indexed_hash = hashes.get(relative_path)
if not force and (indexed_hash == current_hash):
logger.debug('Excluded file: "%s": not modified since last indexation.', relative_path)
continue
try:
logger.debug('Indexing file "%s"', relative_path)
doc = harvester_class().harvest_file(path)
except Exception: # pylint: disable=broad-except
logger.exception("Could not index document %s", path)
else:
if doc:
if relative_path == 'index.html':
with open(os.path.join(base_dir, '.dokang'), 'w') as fp:
json.dump({'title': doc['title']}, fp)
doc['path'] = relative_path
doc['set'] = doc_set
doc['hash'] = current_hash
yield doc
| en | 0.835753 | # -*- coding: utf-8 -*- # Copyright (c) Polyconseil SAS. All rights reserved. # pylint: disable=unused-import # pylint: disable=unused-import Harvest a document set and return documents as dictionaries. ``config`` is the harvester configuration. It should contain a key for each supported file extensions. ``hashes`` is a dictionary that links the path of each indexed file to its hash. It is used to decide whether the document should be indexed again. ``force`` indicates whether to reindex a document even if it has not ben modified since the last indexation. This function is a generator. It yields dictionaries. Each dictionary should represent a document and contain the following keys in addition to the keys returned by the harvester itself. Each text-like value should be a string (in Python 3) or a unicode object (in Python 2). path The path of the document relative to the root of the document set. set The id of the document set. It should be ``doc_set``. # remove leading dot # pylint: disable=broad-except | 2.200569 | 2 |
__init__.py | semccomas/string-method-gmxapi | 6 | 9341 | __all__ = ["stringmethod"]
| __all__ = ["stringmethod"]
| none | 1 | 1.228092 | 1 |
|
carPooling/migrations/0018_auto_20190521_1651.py | yangtao4389/pinche | 1 | 9342 | # Generated by Django 2.0.4 on 2019-05-21 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carPooling', '0017_carpoolingrecunbook'),
]
operations = [
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_name',
field=models.CharField(max_length=128, null=True, verbose_name='真实姓名'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_phone',
field=models.CharField(db_index=True, max_length=11, verbose_name='电话号码'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_weixin_id',
field=models.CharField(db_index=True, max_length=128, null=True, verbose_name='微信id'),
),
]
| # Generated by Django 2.0.4 on 2019-05-21 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carPooling', '0017_carpoolingrecunbook'),
]
operations = [
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_name',
field=models.CharField(max_length=128, null=True, verbose_name='真实姓名'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_phone',
field=models.CharField(db_index=True, max_length=11, verbose_name='电话号码'),
),
migrations.AlterField(
model_name='carpoolinguserconf',
name='c_weixin_id',
field=models.CharField(db_index=True, max_length=128, null=True, verbose_name='微信id'),
),
]
| en | 0.773586 | # Generated by Django 2.0.4 on 2019-05-21 16:51 | 1.520161 | 2 |
src/fuckbot/ticker.py | Zer0-One/fuckbot | 0 | 9343 | import discord
import logging
TRADING_API_URL='https://cloud.iexapis.com/stable/stock/{0}/quote'
TRADING_API_ICON='https://iextrading.com/favicon.ico'
def ticker_embed(symbol):
ticker = discord.Embed(title=f"{symbol}".upper(), type="rich", color=3029236, url=TRADING_API_URL.format(symbol))
ticker.set_author(name="IEXTrading")
return ticker
| import discord
import logging
TRADING_API_URL='https://cloud.iexapis.com/stable/stock/{0}/quote'
TRADING_API_ICON='https://iextrading.com/favicon.ico'
def ticker_embed(symbol):
ticker = discord.Embed(title=f"{symbol}".upper(), type="rich", color=3029236, url=TRADING_API_URL.format(symbol))
ticker.set_author(name="IEXTrading")
return ticker
| none | 1 | 2.25648 | 2 |
|
minos/lib/util/StateSet.py | johny-c/minos | 1 | 9344 | import bz2
import csv
import collections
import math
from enum import Enum
class Select(Enum):
FIRST = 'first'
RANGE_KEY = 'range_key'
RANGE_VALUE = 'range_value'
class SelectPolicy:
def __init__(self, policy, field=None):
self.policy = policy
self.field = field
class StateSet:
""" Wrapper for set of episode val/test states """
def __init__(self, scenes_file=None, states_files=None,
scene_filter=None, episode_filter=None, max_states_per_scene=None,
select_policy=SelectPolicy(Select.FIRST)):
self.states = []
self.scenes = []
self.scenes_by_id = {}
self.states_by_scene = {}
self.select_policy = select_policy
if scenes_file:
self._load_scenes(scenes_file, scene_filter)
if states_files:
if type(states_files) is str:
self._load_states(states_files, max_states_per_scene, episode_filter)
elif isinstance(states_files, collections.Iterable):
for states_file in states_files:
self._load_states(states_file, max_states_per_scene, episode_filter)
self._embed_states_in_scenes()
def get_splits(self, max_states_per_scene=None):
"""Get dictionary of StateSets keyed by scene 'set' i.e. dataset split"""
scenes_by_split = {}
for scene in self.scenes:
scenes_by_split.setdefault(scene['set'], []).append(scene)
state_sets_dict = {}
for split, scenes in scenes_by_split.items():
ss = StateSet()
ss._populate_from_lists(scenes, self.states_by_scene, max_states_per_scene)
state_sets_dict[split] = ss
return state_sets_dict
def get_scenes(self):
return self.scenes
def get_states(self):
return self.states
def get_states_by_scene_id(self, scene_id):
return self.states_by_scene[scene_id]
def _select_n_states(self, states, n):
# Select n states from big list of states
policy = self.select_policy.policy
field = self.select_policy.field
if n is not None and n < len(states):
if policy == Select.FIRST:
if field is not None:
# sort by field
states = sorted(states, key=lambda x: x[field])
return states[:n]
elif policy == Select.RANGE_KEY:
# sort by field
states = sorted(states, key=lambda x: x[field])
# select by evenly dividing indices
r = len(states)/float(n)
selected = []
for i in range(n):
si = int(math.floor(math.ceil(r*i)/2))
selected.append(states[si])
return selected
elif policy == Select.RANGE_VALUE:
# sort by field and get range (value)
states = sorted(states, key=lambda x: x[field])
fmin = states[0][field]
fmax = states[-1][field]
# print('Range is %f to %f' % (fmin,fmax))
# from range, divide up into n buckets
r = (fmax-fmin)/float(n)
buckets = []
for i in range(n):
buckets.append([])
for state in states:
bi = int(min(math.ceil((state[field] - fmin)/r), n-1))
buckets[bi].append(state)
# make sure all buckets have something
for i, bucket in enumerate(buckets):
if len(bucket) == 0:
# print('Nothing in bucket %d' % i)
# still some from other buckets
pi = max(i-1, 0)
ni = min(i+1, n-1)
nlen = len(buckets[ni])
plen = len(buckets[pi])
if nlen > plen:
# take half from bucket[ni] and put in current bucket
k = math.floor(nlen/2)
buckets[i] = buckets[ni][:k]
buckets[ni] = buckets[ni][k:]
else:
k = math.floor(plen/2)
buckets[i] = buckets[pi][:k]
buckets[pi] = buckets[pi][k:]
selected = []
for bucket in buckets:
bii = math.floor(len(bucket)/2)
selected.append(bucket[bii])
return selected
else:
raise ValueError('Unsupported select_policy ' + policy)
else:
return states
def _populate_from_lists(self, my_scenes, my_states_by_scene, max_states_per_scene):
self.scenes = my_scenes
for scene in my_scenes:
scene_id = scene['id']
self.scenes_by_id[scene_id] = scene
if scene_id in my_states_by_scene:
my_states = self._select_n_states(my_states_by_scene[scene_id], max_states_per_scene)
self.states_by_scene[scene_id] = my_states
self.states += my_states
def _load_scenes(self, filename, scene_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
self.scenes = []
for r in reader:
for v in ['nrooms', 'nobjects', 'nlevels']:
if v in r:
r[v] = int(r[v])
for v in ['dimX', 'dimY', 'dimZ', 'floorArea']:
if v in r:
r[v] = float(r[v])
if scene_filter and not scene_filter(r):
continue
self.scenes.append(r)
self.scenes_by_id[r['id']] = r
self.scenes.sort(key=lambda x: x['nobjects'])
def _load_states(self, filename, max_states_per_scene, state_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
all_states = [r for r in reader]
# Convert scene state and group by sceneId
counter = 0
for r in all_states:
for v in ['startX', 'startY', 'startZ', 'startAngle', 'goalX', 'goalY', 'goalZ', 'dist', 'pathDist']:
r[v] = float(r[v]) if v in r else None
for v in ['episodeId', 'pathNumDoors', 'pathNumRooms', 'level']:
r[v] = int(r[v]) if v in r else None
scene_id = r['sceneId']
scene_states = self.states_by_scene.setdefault(scene_id, [])
rec = {
'episode_id': counter,
'scene_id': r['sceneId'],
'room_id': r['roomId'],
'start': {'position': [r['startX'], r['startY'], r['startZ']], 'angle': r['startAngle']},
'goal': {'id': r['goalObjectId'], 'position': [r['goalX'], r['goalY'], r['goalZ']]},
'dist': r['dist']
}
for k in ['pathDist', 'pathNumRooms', 'pathRoomIds', 'pathNumDoors', 'pathDoorIds', 'level']:
if k in r:
rec[k] = r[k]
if not state_filter or state_filter(rec):
scene_states.append(rec)
counter = counter + 1
# Filter down to states per scene and create big list of all scenes
states = []
for scene_id, scene_states in self.states_by_scene.items():
self.states_by_scene[scene_id] = self._select_n_states(scene_states, max_states_per_scene)
states += self.states_by_scene[scene_id]
self.states = states
def _embed_states_in_scenes(self):
for state in self.states:
scene_id = state['scene_id']
if scene_id in self.scenes_by_id:
self.scenes_by_id[scene_id].setdefault('states', []).append(state)
scenes_with_no_states = []
for i, scene in enumerate(self.scenes):
if 'states' not in scene or len(scene['states']) == 0:
scenes_with_no_states.append(scene['id'])
del self.scenes_by_id[scene['id']]
self.scenes = [s for s in self.scenes if s['id'] not in scenes_with_no_states]
#print('Removed scenes with no episode states: ' + ','.join(scenes_with_no_states))
def main():
import argparse
# Argument processing
parser = argparse.ArgumentParser(description='Load state set')
parser.add_argument('-n', '--limit',
type=int,
help='Number of states per scene')
parser.add_argument('--select',
default=Select.FIRST,
type=Select,
help='Number of states per scene')
parser.add_argument('--field',
default=None,
help='Field to use for selection')
parser.add_argument('--scenes',
type=str,
default=None,
help='Scenes file to load')
parser.add_argument('input',
help='Input file to load')
args = parser.parse_args()
state_set = StateSet(scenes_file=args.scenes,
states_files=args.input,
max_states_per_scene=args.limit,
select_policy=SelectPolicy(args.select, args.field))
for state in state_set.states:
print(state)
if __name__ == "__main__":
main()
| import bz2
import csv
import collections
import math
from enum import Enum
class Select(Enum):
FIRST = 'first'
RANGE_KEY = 'range_key'
RANGE_VALUE = 'range_value'
class SelectPolicy:
def __init__(self, policy, field=None):
self.policy = policy
self.field = field
class StateSet:
""" Wrapper for set of episode val/test states """
def __init__(self, scenes_file=None, states_files=None,
scene_filter=None, episode_filter=None, max_states_per_scene=None,
select_policy=SelectPolicy(Select.FIRST)):
self.states = []
self.scenes = []
self.scenes_by_id = {}
self.states_by_scene = {}
self.select_policy = select_policy
if scenes_file:
self._load_scenes(scenes_file, scene_filter)
if states_files:
if type(states_files) is str:
self._load_states(states_files, max_states_per_scene, episode_filter)
elif isinstance(states_files, collections.Iterable):
for states_file in states_files:
self._load_states(states_file, max_states_per_scene, episode_filter)
self._embed_states_in_scenes()
def get_splits(self, max_states_per_scene=None):
"""Get dictionary of StateSets keyed by scene 'set' i.e. dataset split"""
scenes_by_split = {}
for scene in self.scenes:
scenes_by_split.setdefault(scene['set'], []).append(scene)
state_sets_dict = {}
for split, scenes in scenes_by_split.items():
ss = StateSet()
ss._populate_from_lists(scenes, self.states_by_scene, max_states_per_scene)
state_sets_dict[split] = ss
return state_sets_dict
def get_scenes(self):
return self.scenes
def get_states(self):
return self.states
def get_states_by_scene_id(self, scene_id):
return self.states_by_scene[scene_id]
def _select_n_states(self, states, n):
# Select n states from big list of states
policy = self.select_policy.policy
field = self.select_policy.field
if n is not None and n < len(states):
if policy == Select.FIRST:
if field is not None:
# sort by field
states = sorted(states, key=lambda x: x[field])
return states[:n]
elif policy == Select.RANGE_KEY:
# sort by field
states = sorted(states, key=lambda x: x[field])
# select by evenly dividing indices
r = len(states)/float(n)
selected = []
for i in range(n):
si = int(math.floor(math.ceil(r*i)/2))
selected.append(states[si])
return selected
elif policy == Select.RANGE_VALUE:
# sort by field and get range (value)
states = sorted(states, key=lambda x: x[field])
fmin = states[0][field]
fmax = states[-1][field]
# print('Range is %f to %f' % (fmin,fmax))
# from range, divide up into n buckets
r = (fmax-fmin)/float(n)
buckets = []
for i in range(n):
buckets.append([])
for state in states:
bi = int(min(math.ceil((state[field] - fmin)/r), n-1))
buckets[bi].append(state)
# make sure all buckets have something
for i, bucket in enumerate(buckets):
if len(bucket) == 0:
# print('Nothing in bucket %d' % i)
# still some from other buckets
pi = max(i-1, 0)
ni = min(i+1, n-1)
nlen = len(buckets[ni])
plen = len(buckets[pi])
if nlen > plen:
# take half from bucket[ni] and put in current bucket
k = math.floor(nlen/2)
buckets[i] = buckets[ni][:k]
buckets[ni] = buckets[ni][k:]
else:
k = math.floor(plen/2)
buckets[i] = buckets[pi][:k]
buckets[pi] = buckets[pi][k:]
selected = []
for bucket in buckets:
bii = math.floor(len(bucket)/2)
selected.append(bucket[bii])
return selected
else:
raise ValueError('Unsupported select_policy ' + policy)
else:
return states
def _populate_from_lists(self, my_scenes, my_states_by_scene, max_states_per_scene):
self.scenes = my_scenes
for scene in my_scenes:
scene_id = scene['id']
self.scenes_by_id[scene_id] = scene
if scene_id in my_states_by_scene:
my_states = self._select_n_states(my_states_by_scene[scene_id], max_states_per_scene)
self.states_by_scene[scene_id] = my_states
self.states += my_states
def _load_scenes(self, filename, scene_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
self.scenes = []
for r in reader:
for v in ['nrooms', 'nobjects', 'nlevels']:
if v in r:
r[v] = int(r[v])
for v in ['dimX', 'dimY', 'dimZ', 'floorArea']:
if v in r:
r[v] = float(r[v])
if scene_filter and not scene_filter(r):
continue
self.scenes.append(r)
self.scenes_by_id[r['id']] = r
self.scenes.sort(key=lambda x: x['nobjects'])
def _load_states(self, filename, max_states_per_scene, state_filter):
with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f:
reader = csv.DictReader(f)
all_states = [r for r in reader]
# Convert scene state and group by sceneId
counter = 0
for r in all_states:
for v in ['startX', 'startY', 'startZ', 'startAngle', 'goalX', 'goalY', 'goalZ', 'dist', 'pathDist']:
r[v] = float(r[v]) if v in r else None
for v in ['episodeId', 'pathNumDoors', 'pathNumRooms', 'level']:
r[v] = int(r[v]) if v in r else None
scene_id = r['sceneId']
scene_states = self.states_by_scene.setdefault(scene_id, [])
rec = {
'episode_id': counter,
'scene_id': r['sceneId'],
'room_id': r['roomId'],
'start': {'position': [r['startX'], r['startY'], r['startZ']], 'angle': r['startAngle']},
'goal': {'id': r['goalObjectId'], 'position': [r['goalX'], r['goalY'], r['goalZ']]},
'dist': r['dist']
}
for k in ['pathDist', 'pathNumRooms', 'pathRoomIds', 'pathNumDoors', 'pathDoorIds', 'level']:
if k in r:
rec[k] = r[k]
if not state_filter or state_filter(rec):
scene_states.append(rec)
counter = counter + 1
# Filter down to states per scene and create big list of all scenes
states = []
for scene_id, scene_states in self.states_by_scene.items():
self.states_by_scene[scene_id] = self._select_n_states(scene_states, max_states_per_scene)
states += self.states_by_scene[scene_id]
self.states = states
def _embed_states_in_scenes(self):
for state in self.states:
scene_id = state['scene_id']
if scene_id in self.scenes_by_id:
self.scenes_by_id[scene_id].setdefault('states', []).append(state)
scenes_with_no_states = []
for i, scene in enumerate(self.scenes):
if 'states' not in scene or len(scene['states']) == 0:
scenes_with_no_states.append(scene['id'])
del self.scenes_by_id[scene['id']]
self.scenes = [s for s in self.scenes if s['id'] not in scenes_with_no_states]
#print('Removed scenes with no episode states: ' + ','.join(scenes_with_no_states))
def main():
import argparse
# Argument processing
parser = argparse.ArgumentParser(description='Load state set')
parser.add_argument('-n', '--limit',
type=int,
help='Number of states per scene')
parser.add_argument('--select',
default=Select.FIRST,
type=Select,
help='Number of states per scene')
parser.add_argument('--field',
default=None,
help='Field to use for selection')
parser.add_argument('--scenes',
type=str,
default=None,
help='Scenes file to load')
parser.add_argument('input',
help='Input file to load')
args = parser.parse_args()
state_set = StateSet(scenes_file=args.scenes,
states_files=args.input,
max_states_per_scene=args.limit,
select_policy=SelectPolicy(args.select, args.field))
for state in state_set.states:
print(state)
if __name__ == "__main__":
main()
| en | 0.861988 | Wrapper for set of episode val/test states Get dictionary of StateSets keyed by scene 'set' i.e. dataset split # Select n states from big list of states # sort by field # sort by field # select by evenly dividing indices # sort by field and get range (value) # print('Range is %f to %f' % (fmin,fmax)) # from range, divide up into n buckets # make sure all buckets have something # print('Nothing in bucket %d' % i) # still some from other buckets # take half from bucket[ni] and put in current bucket # Convert scene state and group by sceneId # Filter down to states per scene and create big list of all scenes #print('Removed scenes with no episode states: ' + ','.join(scenes_with_no_states)) # Argument processing | 2.651506 | 3 |
pagetags/configuration/development.py | pmatigakis/pagetags | 0 | 9345 | <reponame>pmatigakis/pagetags
DEBUG = True
TESTING = False
| DEBUG = True
TESTING = False | none | 1 | 0.99697 | 1 |
|
hpcrocket/pyfilesystem/factory.py | SvenMarcus/hpc-rocket | 7 | 9346 | from hpcrocket.core.filesystem import Filesystem, FilesystemFactory
from hpcrocket.core.launchoptions import Options
from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem
from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem
class PyFilesystemFactory(FilesystemFactory):
def __init__(self, options: Options) -> None:
self._options = options
def create_local_filesystem(self) -> Filesystem:
return LocalFilesystem(".")
def create_ssh_filesystem(self) -> Filesystem:
connection = self._options.connection
proxyjumps = self._options.proxyjumps
return SSHFilesystem(connection, proxyjumps)
| from hpcrocket.core.filesystem import Filesystem, FilesystemFactory
from hpcrocket.core.launchoptions import Options
from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem
from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem
class PyFilesystemFactory(FilesystemFactory):
def __init__(self, options: Options) -> None:
self._options = options
def create_local_filesystem(self) -> Filesystem:
return LocalFilesystem(".")
def create_ssh_filesystem(self) -> Filesystem:
connection = self._options.connection
proxyjumps = self._options.proxyjumps
return SSHFilesystem(connection, proxyjumps)
| none | 1 | 2.153482 | 2 |
|
cqlsh_tests/cqlsh_tools.py | vincewhite/cassandra-dtest | 0 | 9347 | <gh_stars>0
import csv
import random
import cassandra
from cassandra.cluster import ResultSet
from typing import List
class DummyColorMap(object):
def __getitem__(self, *args):
return ''
def csv_rows(filename, delimiter=None):
"""
Given a filename, opens a csv file and yields it line by line.
"""
reader_opts = {}
if delimiter is not None:
reader_opts['delimiter'] = delimiter
with open(filename, 'rb') as csvfile:
for row in csv.reader(csvfile, **reader_opts):
yield row
def assert_csvs_items_equal(filename1, filename2):
with open(filename1, 'r') as x, open(filename2, 'r') as y:
assert list(x.readlines()) == list(y.readlines())
def random_list(gen=None, n=None):
if gen is None:
def gen():
return random.randint(-1000, 1000)
if n is None:
def length():
return random.randint(1, 5)
else:
def length():
return n
return [gen() for _ in range(length())]
def write_rows_to_csv(filename, data):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in data:
writer.writerow(row)
csvfile.close
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = cassandra.marshal.int64_unpack(byts)
try:
return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
return timestamp_ms
def monkeypatch_driver():
"""
Monkeypatches the `cassandra` driver module in the same way
that clqsh does. Returns a dictionary containing the original values of
the monkeypatched names.
"""
cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize,
'DateType_deserialize': cassandra.cqltypes.DateType.deserialize,
'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values}
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
cassandra.cqltypes.CassandraType.support_empty_values = True
if hasattr(cassandra, 'deserializers'):
cache['DesDateType'] = cassandra.deserializers.DesDateType
del cassandra.deserializers.DesDateType
return cache
def unmonkeypatch_driver(cache):
"""
Given a dictionary that was used to cache parts of `cassandra` for
monkeypatching, restore those values to the `cassandra` module.
"""
cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize'])
cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize'])
cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values']
if hasattr(cassandra, 'deserializers'):
cassandra.deserializers.DesDateType = cache['DesDateType']
def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None:
"""
So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering,
however I'm not finding it atm. As such, this method isn't intended for use with large datasets.
:param got: ResultSet, expect schema of [a, b]
:param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet
"""
# Adding a touch of sanity check so people don't mis-use this. n^2 is bad.
assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.'
# First quick check: if we have a different count, we can just die.
assert len(got.current_rows) == len(expected)
for t in expected:
assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t)
found = False
for row in got.current_rows:
if found:
break
if row.a == t[0] and row.b == t[1]:
found = True
assert found, 'Failed to find expected row: {}'.format(t)
| import csv
import random
import cassandra
from cassandra.cluster import ResultSet
from typing import List
class DummyColorMap(object):
def __getitem__(self, *args):
return ''
def csv_rows(filename, delimiter=None):
"""
Given a filename, opens a csv file and yields it line by line.
"""
reader_opts = {}
if delimiter is not None:
reader_opts['delimiter'] = delimiter
with open(filename, 'rb') as csvfile:
for row in csv.reader(csvfile, **reader_opts):
yield row
def assert_csvs_items_equal(filename1, filename2):
with open(filename1, 'r') as x, open(filename2, 'r') as y:
assert list(x.readlines()) == list(y.readlines())
def random_list(gen=None, n=None):
if gen is None:
def gen():
return random.randint(-1000, 1000)
if n is None:
def length():
return random.randint(1, 5)
else:
def length():
return n
return [gen() for _ in range(length())]
def write_rows_to_csv(filename, data):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in data:
writer.writerow(row)
csvfile.close
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = cassandra.marshal.int64_unpack(byts)
try:
return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
return timestamp_ms
def monkeypatch_driver():
"""
Monkeypatches the `cassandra` driver module in the same way
that clqsh does. Returns a dictionary containing the original values of
the monkeypatched names.
"""
cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize,
'DateType_deserialize': cassandra.cqltypes.DateType.deserialize,
'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values}
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
cassandra.cqltypes.CassandraType.support_empty_values = True
if hasattr(cassandra, 'deserializers'):
cache['DesDateType'] = cassandra.deserializers.DesDateType
del cassandra.deserializers.DesDateType
return cache
def unmonkeypatch_driver(cache):
"""
Given a dictionary that was used to cache parts of `cassandra` for
monkeypatching, restore those values to the `cassandra` module.
"""
cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize'])
cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize'])
cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values']
if hasattr(cassandra, 'deserializers'):
cassandra.deserializers.DesDateType = cache['DesDateType']
def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None:
"""
So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering,
however I'm not finding it atm. As such, this method isn't intended for use with large datasets.
:param got: ResultSet, expect schema of [a, b]
:param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet
"""
# Adding a touch of sanity check so people don't mis-use this. n^2 is bad.
assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.'
# First quick check: if we have a different count, we can just die.
assert len(got.current_rows) == len(expected)
for t in expected:
assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t)
found = False
for row in got.current_rows:
if found:
break
if row.a == t[0] and row.b == t[1]:
found = True
assert found, 'Failed to find expected row: {}'.format(t) | en | 0.90376 | Given a filename, opens a csv file and yields it line by line. Monkeypatches the `cassandra` driver module in the same way that clqsh does. Returns a dictionary containing the original values of the monkeypatched names. Given a dictionary that was used to cache parts of `cassandra` for monkeypatching, restore those values to the `cassandra` module. So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering, however I'm not finding it atm. As such, this method isn't intended for use with large datasets. :param got: ResultSet, expect schema of [a, b] :param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet # Adding a touch of sanity check so people don't mis-use this. n^2 is bad. # First quick check: if we have a different count, we can just die. | 3.009903 | 3 |
tests/unit/sagemaker/tensorflow/test_estimator_init.py | LastRemote/sagemaker-python-sdk | 1,690 | 9348 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock, patch
from packaging import version
import pytest
from sagemaker.tensorflow import TensorFlow
REGION = "us-west-2"
ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
@pytest.fixture()
def sagemaker_session():
return Mock(name="sagemaker_session", boto_region_name=REGION)
def _build_tf(sagemaker_session, **kwargs):
return TensorFlow(
sagemaker_session=sagemaker_session,
entry_point="dummy.py",
role="dummy-role",
instance_count=1,
instance_type="ml.c4.xlarge",
**kwargs,
)
@patch("sagemaker.fw_utils.python_deprecation_warning")
def test_estimator_py2_deprecation_warning(warning, sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="2.1.1", py_version="py2")
assert estimator.py_version == "py2"
warning.assert_called_with("tensorflow", "2.1.1")
def test_py2_version_deprecated(sagemaker_session):
with pytest.raises(AttributeError) as e:
_build_tf(sagemaker_session, framework_version="2.1.2", py_version="py2")
msg = (
"Python 2 containers are only available with 2.1.1 and lower versions. "
"Please use a Python 3 container."
)
assert msg in str(e.value)
def test_py2_version_is_not_deprecated(sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2")
assert estimator.py_version == "py2"
estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2")
assert estimator.py_version == "py2"
def test_framework_name(sagemaker_session):
tf = _build_tf(sagemaker_session, framework_version="1.15.2", py_version="py3")
assert tf._framework_name == "tensorflow"
def test_tf_add_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=ENV_INPUT,
)
assert tf.environment == ENV_INPUT
def test_tf_miss_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=None,
)
assert not tf.environment
def test_enable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=True,
)
assert tf.enable_sagemaker_metrics
def test_disable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=False,
)
assert not tf.enable_sagemaker_metrics
def test_disable_sm_metrics_if_fw_ver_is_less_than_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.14"):
pytest.skip("This test is for TF 1.14 and lower.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
image_uri="old-image",
)
assert tf.enable_sagemaker_metrics is None
def test_enable_sm_metrics_if_fw_ver_is_at_least_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) < version.Version("1.15"):
pytest.skip("This test is for TF 1.15 and higher.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
assert tf.enable_sagemaker_metrics
def test_require_image_uri_if_fw_ver_is_less_than_1_11(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.10"):
pytest.skip("This test is for TF 1.10 and lower.")
with pytest.raises(ValueError) as e:
_build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
expected_msg = (
"TF {version} supports only legacy mode. Please supply the image URI directly with "
"'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/"
"sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any "
"legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), "
"make sure to pass them directly as hyperparameters instead."
).format(version=tensorflow_training_version, region=REGION)
assert expected_msg in str(e.value)
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from mock import Mock, patch
from packaging import version
import pytest
from sagemaker.tensorflow import TensorFlow
REGION = "us-west-2"
ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"}
@pytest.fixture()
def sagemaker_session():
return Mock(name="sagemaker_session", boto_region_name=REGION)
def _build_tf(sagemaker_session, **kwargs):
return TensorFlow(
sagemaker_session=sagemaker_session,
entry_point="dummy.py",
role="dummy-role",
instance_count=1,
instance_type="ml.c4.xlarge",
**kwargs,
)
@patch("sagemaker.fw_utils.python_deprecation_warning")
def test_estimator_py2_deprecation_warning(warning, sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="2.1.1", py_version="py2")
assert estimator.py_version == "py2"
warning.assert_called_with("tensorflow", "2.1.1")
def test_py2_version_deprecated(sagemaker_session):
with pytest.raises(AttributeError) as e:
_build_tf(sagemaker_session, framework_version="2.1.2", py_version="py2")
msg = (
"Python 2 containers are only available with 2.1.1 and lower versions. "
"Please use a Python 3 container."
)
assert msg in str(e.value)
def test_py2_version_is_not_deprecated(sagemaker_session):
estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2")
assert estimator.py_version == "py2"
estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2")
assert estimator.py_version == "py2"
def test_framework_name(sagemaker_session):
tf = _build_tf(sagemaker_session, framework_version="1.15.2", py_version="py3")
assert tf._framework_name == "tensorflow"
def test_tf_add_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=ENV_INPUT,
)
assert tf.environment == ENV_INPUT
def test_tf_miss_environment_variables(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
environment=None,
)
assert not tf.environment
def test_enable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=True,
)
assert tf.enable_sagemaker_metrics
def test_disable_sm_metrics(sagemaker_session):
tf = _build_tf(
sagemaker_session,
framework_version="1.15.2",
py_version="py3",
enable_sagemaker_metrics=False,
)
assert not tf.enable_sagemaker_metrics
def test_disable_sm_metrics_if_fw_ver_is_less_than_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.14"):
pytest.skip("This test is for TF 1.14 and lower.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
image_uri="old-image",
)
assert tf.enable_sagemaker_metrics is None
def test_enable_sm_metrics_if_fw_ver_is_at_least_1_15(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) < version.Version("1.15"):
pytest.skip("This test is for TF 1.15 and higher.")
tf = _build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
assert tf.enable_sagemaker_metrics
def test_require_image_uri_if_fw_ver_is_less_than_1_11(
sagemaker_session, tensorflow_training_version, tensorflow_training_py_version
):
if version.Version(tensorflow_training_version) > version.Version("1.10"):
pytest.skip("This test is for TF 1.10 and lower.")
with pytest.raises(ValueError) as e:
_build_tf(
sagemaker_session,
framework_version=tensorflow_training_version,
py_version=tensorflow_training_py_version,
)
expected_msg = (
"TF {version} supports only legacy mode. Please supply the image URI directly with "
"'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/"
"sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any "
"legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), "
"make sure to pass them directly as hyperparameters instead."
).format(version=tensorflow_training_version, region=REGION)
assert expected_msg in str(e.value)
| en | 0.89298 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. | 1.75947 | 2 |
testing.py | sofwerx/mycroft-articlekeyword-skill | 0 | 9349 |
import subprocess
proc = subprocess.Popen(['python3', 'articlekeywords.py', 'aih.txt' , '5'], stdout=subprocess.PIPE )
#print(type(proc.communicate()[0]))
# path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/'
text = proc.stdout.read()
rows = text.splitlines()
#print(text.splitlines())
count = 0
s = ""
for row in rows:
divide = row.split()
wordCount = len(divide)
if wordCount > 1:
count = count + 1
s += str(count)
s += " "
s += str(divide[1])
s += " "
print(s)
# with open(path + 'out.csv', 'r') as content_file:
# text = content_file.read()
# self.speak_dialog("bitcoin.price", data={'price': str(text)})
#file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv'
#wordCount = 10
#
# text = Path(file_path).read_text()
# #print(exit_code) |
import subprocess
proc = subprocess.Popen(['python3', 'articlekeywords.py', 'aih.txt' , '5'], stdout=subprocess.PIPE )
#print(type(proc.communicate()[0]))
# path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/'
text = proc.stdout.read()
rows = text.splitlines()
#print(text.splitlines())
count = 0
s = ""
for row in rows:
divide = row.split()
wordCount = len(divide)
if wordCount > 1:
count = count + 1
s += str(count)
s += " "
s += str(divide[1])
s += " "
print(s)
# with open(path + 'out.csv', 'r') as content_file:
# text = content_file.read()
# self.speak_dialog("bitcoin.price", data={'price': str(text)})
#file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv'
#wordCount = 10
#
# text = Path(file_path).read_text()
# #print(exit_code) | en | 0.465532 | #print(type(proc.communicate()[0])) # path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/' #print(text.splitlines()) # with open(path + 'out.csv', 'r') as content_file: # text = content_file.read() # self.speak_dialog("bitcoin.price", data={'price': str(text)}) #file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv' #wordCount = 10 # # text = Path(file_path).read_text() # #print(exit_code) | 2.864502 | 3 |
slcyGeneral.py | mirrorcoloured/slcypi | 0 | 9350 | # Python 2.7.1
import RPi.GPIO as GPIO
from twython import Twython
import time
import sys
import os
import pygame
APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw'
APP_SECRET='<KEY>'
OAUTH_TOKEN='<KEY>'
OAUTH_TOKEN_SECRET='<KEY>'
applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
### GENERAL ###
def Cleanup():
GPIO.cleanup()
def Sleep(seconds):
"""Puts the program to sleep"""
time.sleep(seconds)
def Alert(channel):
"""Simple alert function for testing event interrupts"""
print('Alert on channel',channel)
def TimeString():
"""Returns the current time"""
t = time.localtime()
return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5])
def LoadPins(mapping,inp):
"""Organizes an input into a pin mapping dict
mapping <list>, ['IA','IB']
inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2]
"""
if type(inp) is int and len(mapping) == 1:
return {mapping[0]:inp}
elif type(inp) is list and len(mapping) == len(inp):
o = {}
for i in range(len(inp)):
o[mapping[i]] = inp[i]
return o
elif type(inp) is dict:
return inp
else:
print('Invalid input for pins:',inp,type(inp))
print('Expected:',mapping)
return {}
def BoolToSign(inp):
"""Converts boolean bits into signed bits
0 -> -1
1 -> 1"""
return (inp * 2) - 1
def SignToBool(inp):
"""Converts signed bits into boolean bits
-1 -> 0
1 -> 1"""
return (inp + 1) / 2
### PYGAME ###
def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)):
"""Sets up a pygame window to take keyboard input
size <tuple>, width by height
caption <str>, window title bar
text <str>, text to display in window, accepts \n
background <tuple>, foreground <tuple>, (r,g,b) color
"""
pygame.init()
screen = pygame.display.set_mode(size,0,32)
pygame.display.set_caption(caption)
myfont = pygame.font.SysFont('Monospace',15)
labels = []
lines = text.split('\n')
for line in lines:
labels.append(myfont.render(line,1,foreground))
screen.fill(background)
y = 0
for label in labels:
screen.blit(label, (0,y))
y += 15
pygame.display.update()
def InputLoop(eventmap):
"""Begins a pygame loop, mapping key inputs to functions
eventmap <dict>, {pygame.K_t:myfunction}
"""
index = 0
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
#print("{0}: You pressed {1:c}".format ( index , event.key ))
if event.key in eventmap:
eventmap[event.key]()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def InputLoopDemo():
def dog():
print('woof')
def cat():
print('meow')
def fish():
print('blub')
WindowSetup(caption='pet simulator',text='d for dog\nc for cat\nf for fish')
InputLoop({pygame.K_d:dog, pygame.K_c:cat, pygame.K_f:fish})
### TWITTER ###
def Tweet(twit,statustext):
"""Tweets a message
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
statustext <str>, must be <= 140 characters
"""
if len(statustext) > 140:
print('ERROR: Character limit 140 exceeded:',len(statustext))
else:
twit.update_status(status=statustext)
def TweetPicture(twit,file,statustext):
"""Tweets a message with a picture
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to picture
statustext <str>, must be <= 140 characters
"""
photo = open(file, 'rb')
response = twitter.upload_media(media=photo)
twit.update_status(status=statustext, media_ids=[response['media_id']])
def TweetVideo(twit,file,statustext):
"""Tweets a message with a video
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to video
statustext <str>, must be <= 140 characters
"""
video = open(file, 'rb')
response = twitter.upload_video(media=video, media_type='video/mp4')
twit.update_status(status=statustext, media_ids=[response['media_id']])
| # Python 2.7.1
import RPi.GPIO as GPIO
from twython import Twython
import time
import sys
import os
import pygame
APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw'
APP_SECRET='<KEY>'
OAUTH_TOKEN='<KEY>'
OAUTH_TOKEN_SECRET='<KEY>'
applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
### GENERAL ###
def Cleanup():
GPIO.cleanup()
def Sleep(seconds):
"""Puts the program to sleep"""
time.sleep(seconds)
def Alert(channel):
"""Simple alert function for testing event interrupts"""
print('Alert on channel',channel)
def TimeString():
"""Returns the current time"""
t = time.localtime()
return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5])
def LoadPins(mapping,inp):
"""Organizes an input into a pin mapping dict
mapping <list>, ['IA','IB']
inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2]
"""
if type(inp) is int and len(mapping) == 1:
return {mapping[0]:inp}
elif type(inp) is list and len(mapping) == len(inp):
o = {}
for i in range(len(inp)):
o[mapping[i]] = inp[i]
return o
elif type(inp) is dict:
return inp
else:
print('Invalid input for pins:',inp,type(inp))
print('Expected:',mapping)
return {}
def BoolToSign(inp):
"""Converts boolean bits into signed bits
0 -> -1
1 -> 1"""
return (inp * 2) - 1
def SignToBool(inp):
"""Converts signed bits into boolean bits
-1 -> 0
1 -> 1"""
return (inp + 1) / 2
### PYGAME ###
def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)):
"""Sets up a pygame window to take keyboard input
size <tuple>, width by height
caption <str>, window title bar
text <str>, text to display in window, accepts \n
background <tuple>, foreground <tuple>, (r,g,b) color
"""
pygame.init()
screen = pygame.display.set_mode(size,0,32)
pygame.display.set_caption(caption)
myfont = pygame.font.SysFont('Monospace',15)
labels = []
lines = text.split('\n')
for line in lines:
labels.append(myfont.render(line,1,foreground))
screen.fill(background)
y = 0
for label in labels:
screen.blit(label, (0,y))
y += 15
pygame.display.update()
def InputLoop(eventmap):
"""Begins a pygame loop, mapping key inputs to functions
eventmap <dict>, {pygame.K_t:myfunction}
"""
index = 0
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
#print("{0}: You pressed {1:c}".format ( index , event.key ))
if event.key in eventmap:
eventmap[event.key]()
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def InputLoopDemo():
def dog():
print('woof')
def cat():
print('meow')
def fish():
print('blub')
WindowSetup(caption='pet simulator',text='d for dog\nc for cat\nf for fish')
InputLoop({pygame.K_d:dog, pygame.K_c:cat, pygame.K_f:fish})
### TWITTER ###
def Tweet(twit,statustext):
"""Tweets a message
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
statustext <str>, must be <= 140 characters
"""
if len(statustext) > 140:
print('ERROR: Character limit 140 exceeded:',len(statustext))
else:
twit.update_status(status=statustext)
def TweetPicture(twit,file,statustext):
"""Tweets a message with a picture
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to picture
statustext <str>, must be <= 140 characters
"""
photo = open(file, 'rb')
response = twitter.upload_media(media=photo)
twit.update_status(status=statustext, media_ids=[response['media_id']])
def TweetVideo(twit,file,statustext):
"""Tweets a message with a video
twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
file <str>, path and filename to video
statustext <str>, must be <= 140 characters
"""
video = open(file, 'rb')
response = twitter.upload_video(media=video, media_type='video/mp4')
twit.update_status(status=statustext, media_ids=[response['media_id']])
| en | 0.586788 | # Python 2.7.1 ### GENERAL ### Puts the program to sleep Simple alert function for testing event interrupts Returns the current time Organizes an input into a pin mapping dict mapping <list>, ['IA','IB'] inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2] Converts boolean bits into signed bits 0 -> -1 1 -> 1 Converts signed bits into boolean bits -1 -> 0 1 -> 1 ### PYGAME ### Sets up a pygame window to take keyboard input size <tuple>, width by height caption <str>, window title bar text <str>, text to display in window, accepts \n background <tuple>, foreground <tuple>, (r,g,b) color Begins a pygame loop, mapping key inputs to functions eventmap <dict>, {pygame.K_t:myfunction} #print("{0}: You pressed {1:c}".format ( index , event.key )) ### TWITTER ### Tweets a message twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) statustext <str>, must be <= 140 characters Tweets a message with a picture twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) file <str>, path and filename to picture statustext <str>, must be <= 140 characters Tweets a message with a video twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) file <str>, path and filename to video statustext <str>, must be <= 140 characters | 3.002182 | 3 |
python/pyarmnn/scripts/generate_docs.py | PetervdPerk-NXP/pyarmnn-release | 7 | 9351 | <reponame>PetervdPerk-NXP/pyarmnn-release<gh_stars>1-10
# Copyright © 2019 Arm Ltd. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import os
import tarfile
import pyarmnn as ann
import shutil
from typing import List, Union
from pdoc.cli import main
package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str):
"""Copies multiple files to a directory.
Args:
file_paths (Union[List(str)]): List of files to copy
target_dir_path (str): Target directory.
Returns:
None
"""
file_paths = [] + file_paths
if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)):
os.makedirs(target_dir_path)
for file_path in file_paths:
if not (os.path.exists(file_path) and os.path.isfile(file_path)):
raise RuntimeError('Not a file: {}'.format(file_path))
file_name = os.path.basename(file_path)
shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name))
def archive_docs(path: str, version: str):
"""Creates an archive.
Args:
path (str): Path which will be archived.
version (str): Version of Arm NN.
Returns:
None
"""
output_filename = f'pyarmnn_docs-{version}.tar'
with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar:
tar.add(path)
if __name__ == "__main__":
readme_filename = os.path.join(package_dir, '..', '..', 'README.md')
with open(readme_filename, 'r') as readme_file:
top_level_pyarmnn_doc = ''.join(readme_file.readlines())
ann.__doc__ = top_level_pyarmnn_doc
main()
target_path = os.path.join(package_dir, 'docs')
archive_docs(target_path, ann.__version__)
| # Copyright © 2019 Arm Ltd. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import os
import tarfile
import pyarmnn as ann
import shutil
from typing import List, Union
from pdoc.cli import main
package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str):
"""Copies multiple files to a directory.
Args:
file_paths (Union[List(str)]): List of files to copy
target_dir_path (str): Target directory.
Returns:
None
"""
file_paths = [] + file_paths
if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)):
os.makedirs(target_dir_path)
for file_path in file_paths:
if not (os.path.exists(file_path) and os.path.isfile(file_path)):
raise RuntimeError('Not a file: {}'.format(file_path))
file_name = os.path.basename(file_path)
shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name))
def archive_docs(path: str, version: str):
"""Creates an archive.
Args:
path (str): Path which will be archived.
version (str): Version of Arm NN.
Returns:
None
"""
output_filename = f'pyarmnn_docs-{version}.tar'
with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar:
tar.add(path)
if __name__ == "__main__":
readme_filename = os.path.join(package_dir, '..', '..', 'README.md')
with open(readme_filename, 'r') as readme_file:
top_level_pyarmnn_doc = ''.join(readme_file.readlines())
ann.__doc__ = top_level_pyarmnn_doc
main()
target_path = os.path.join(package_dir, 'docs')
archive_docs(target_path, ann.__version__) | en | 0.663716 | # Copyright © 2019 Arm Ltd. All rights reserved. # Copyright 2020 NXP # SPDX-License-Identifier: MIT Copies multiple files to a directory.
Args:
file_paths (Union[List(str)]): List of files to copy
target_dir_path (str): Target directory.
Returns:
None Creates an archive.
Args:
path (str): Path which will be archived.
version (str): Version of Arm NN.
Returns:
None | 2.372192 | 2 |
tests/gejun_sum.py | jeffzhengye/pylearn | 2 | 9352 | __author__ = 'jeffye'
def sum_consecutives(s):
i = 1
li = []
if i < len(s):
n = 1
while s[i] != s[i + 1] and s[i] != s[i - 1]:
sum = s[i]
i = i + 1
return sum
while s[i] == s[i + 1]:
n = n + 1
sum = s[i] * n
i = i + 1
return sum
li.append(sum)
return li
def sum_consecutives_corrected(s):
start = 0
li = []
n = 1
while start < len(s):
if start == len(s) - 1: # last element
li.append(s[start])
break
elif s[start] == s[start + n]: # equal, just record the length
n += 1
else: # first not equal, sum all previous equal elements and append to li
li.append(sum(s[start: start + n]))
start += n
n = 1
return li
if __name__ == '__main__':
test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0]
print sum_consecutives_corrected(test_li)
| __author__ = 'jeffye'
def sum_consecutives(s):
i = 1
li = []
if i < len(s):
n = 1
while s[i] != s[i + 1] and s[i] != s[i - 1]:
sum = s[i]
i = i + 1
return sum
while s[i] == s[i + 1]:
n = n + 1
sum = s[i] * n
i = i + 1
return sum
li.append(sum)
return li
def sum_consecutives_corrected(s):
start = 0
li = []
n = 1
while start < len(s):
if start == len(s) - 1: # last element
li.append(s[start])
break
elif s[start] == s[start + n]: # equal, just record the length
n += 1
else: # first not equal, sum all previous equal elements and append to li
li.append(sum(s[start: start + n]))
start += n
n = 1
return li
if __name__ == '__main__':
test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0]
print sum_consecutives_corrected(test_li)
| en | 0.703839 | # last element # equal, just record the length # first not equal, sum all previous equal elements and append to li # should return [-10,14,12,0] | 3.397416 | 3 |
PySDDP/term.py | tscher/PySDDP | 9 | 9353 | <filename>PySDDP/term.py
class term(object):
# Dados de cadastro das usinas termeletrica (presentes no TERM.DAT)
Codigo = None
Nome = None
Potencia = None
FCMax = None
TEIF = None
IP = None
GTMin = None
# Dados Adicionais Especificados no arquivo de configuracao termica (CONFT)
Sist = None
Status = None
Classe = None
# Dados Adicionais Especificados no arquivo de classe termica (CLAST)
Custo = None
NomeClasse = None
TipoComb = None
def insere(self, custo, gmax):
self.custo = custo
self.gmax = gmax
| <filename>PySDDP/term.py
class term(object):
# Dados de cadastro das usinas termeletrica (presentes no TERM.DAT)
Codigo = None
Nome = None
Potencia = None
FCMax = None
TEIF = None
IP = None
GTMin = None
# Dados Adicionais Especificados no arquivo de configuracao termica (CONFT)
Sist = None
Status = None
Classe = None
# Dados Adicionais Especificados no arquivo de classe termica (CLAST)
Custo = None
NomeClasse = None
TipoComb = None
def insere(self, custo, gmax):
self.custo = custo
self.gmax = gmax
| pt | 0.52758 | # Dados de cadastro das usinas termeletrica (presentes no TERM.DAT) # Dados Adicionais Especificados no arquivo de configuracao termica (CONFT) # Dados Adicionais Especificados no arquivo de classe termica (CLAST) | 2.323335 | 2 |
plerr/__main__.py | b2bs-team/pylint-errors | 2 | 9354 | """plerr entrypoint"""
from plerr import cli
if __name__ == '__main__':
cli.main()
| """plerr entrypoint"""
from plerr import cli
if __name__ == '__main__':
cli.main()
| eu | 0.192871 | plerr entrypoint | 1.205007 | 1 |
code/send.py | CamouOkau/messenger_new_years_bot | 0 | 9355 | <filename>code/send.py
import sys
import time
from datetime import datetime
from bot import FbMessengerBot
if __name__ == "__main__":
if len(sys.argv) < 3:
print("No email or password provided")
else:
bot = FbMessengerBot(sys.argv[1], sys.argv[2])
with open("users.txt", "r") as file:
users = dict.fromkeys(file.read().split("\n"))
for user in users:
users[user] = bot.uid(user)
with open("message.txt", "r") as file:
message = file.read()
time_now = datetime.now()
send_time = datetime(time_now.year + 1, 1, 1)
wait_time = (send_time - time_now).total_seconds()
print("Waiting...")
time.sleep(wait_time)
for uid in users.values():
bot.send_message(message, uid)
bot.logout()
| <filename>code/send.py
import sys
import time
from datetime import datetime
from bot import FbMessengerBot
if __name__ == "__main__":
if len(sys.argv) < 3:
print("No email or password provided")
else:
bot = FbMessengerBot(sys.argv[1], sys.argv[2])
with open("users.txt", "r") as file:
users = dict.fromkeys(file.read().split("\n"))
for user in users:
users[user] = bot.uid(user)
with open("message.txt", "r") as file:
message = file.read()
time_now = datetime.now()
send_time = datetime(time_now.year + 1, 1, 1)
wait_time = (send_time - time_now).total_seconds()
print("Waiting...")
time.sleep(wait_time)
for uid in users.values():
bot.send_message(message, uid)
bot.logout()
| none | 1 | 2.603909 | 3 |
|
senlin_tempest_plugin/api/policies/test_policy_update_negative.py | ghanshyammann/senlin-tempest-plugin | 0 | 9356 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from senlin_tempest_plugin.api import base
from senlin_tempest_plugin.common import utils
class TestPolicyUpdateNegativeNotFound(base.BaseSenlinAPITest):
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5df90d82-9889-4c6f-824c-30272bcfa767')
def test_policy_update_policy_not_found(self):
ex = self.assertRaises(exceptions.NotFound,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{'policy': {'name': 'new-name'}})
message = ex.resp_body['error']['message']
self.assertEqual(
"The policy '5df90d82-9889-4c6f-824c-30272bcfa767' "
"could not be found.", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('29414add-9cba-4b72-a7bb-36718671dcab')
def test_policy_update_policy_invalid_param(self):
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{'policy': {'boo': 'foo'}})
message = ex.resp_body['error']['message']
self.assertEqual(
"Additional properties are not allowed (u'boo' was "
"unexpected)", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('bf26ed1e-1d26-4472-b4c8-0bcca1c0a838')
def test_policy_update_policy_empty_param(self):
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{})
message = ex.resp_body['error']['message']
self.assertEqual(
"Malformed request data, missing 'policy' key in "
"request body.", str(message))
class TestPolicyUpdateNegativeBadRequest(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyUpdateNegativeBadRequest, self).setUp()
# Create a policy
policy_id = utils.create_a_policy(self)
self.addCleanup(utils.delete_a_policy, self, policy_id)
self.policy_id = policy_id
@decorators.attr(type=['negative'])
@decorators.idempotent_id('31242de5-55ac-4589-87a1-a9940e4beca2')
def test_policy_update_no_property_updated(self):
# No property is updated.
params = {
'policy': {}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj,
'policies', self.policy_id, params)
message = ex.resp_body['error']['message']
self.assertEqual(
"'name' is a required property", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d2ca7de6-0069-48c9-b3de-ee975a2428dc')
def test_policy_update_spec_not_updatable(self):
# Try to update spec of policy.
# Note: name is the only property that can be updated
# after policy is created.
params = {
'policy': {
'name': 'new-name',
'spec': {'k1': 'v1'}
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj,
'policies', self.policy_id, params)
message = ex.resp_body['error']['message']
self.assertEqual(
"Additional properties are not allowed (u'spec' was "
"unexpected)", str(message))
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from senlin_tempest_plugin.api import base
from senlin_tempest_plugin.common import utils
class TestPolicyUpdateNegativeNotFound(base.BaseSenlinAPITest):
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5df90d82-9889-4c6f-824c-30272bcfa767')
def test_policy_update_policy_not_found(self):
ex = self.assertRaises(exceptions.NotFound,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{'policy': {'name': 'new-name'}})
message = ex.resp_body['error']['message']
self.assertEqual(
"The policy '5df90d82-9889-4c6f-824c-30272bcfa767' "
"could not be found.", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('29414add-9cba-4b72-a7bb-36718671dcab')
def test_policy_update_policy_invalid_param(self):
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{'policy': {'boo': 'foo'}})
message = ex.resp_body['error']['message']
self.assertEqual(
"Additional properties are not allowed (u'boo' was "
"unexpected)", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('bf26ed1e-1d26-4472-b4c8-0bcca1c0a838')
def test_policy_update_policy_empty_param(self):
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj, 'policies',
'5df90d82-9889-4c6f-824c-30272bcfa767',
{})
message = ex.resp_body['error']['message']
self.assertEqual(
"Malformed request data, missing 'policy' key in "
"request body.", str(message))
class TestPolicyUpdateNegativeBadRequest(base.BaseSenlinAPITest):
def setUp(self):
super(TestPolicyUpdateNegativeBadRequest, self).setUp()
# Create a policy
policy_id = utils.create_a_policy(self)
self.addCleanup(utils.delete_a_policy, self, policy_id)
self.policy_id = policy_id
@decorators.attr(type=['negative'])
@decorators.idempotent_id('31242de5-55ac-4589-87a1-a9940e4beca2')
def test_policy_update_no_property_updated(self):
# No property is updated.
params = {
'policy': {}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj,
'policies', self.policy_id, params)
message = ex.resp_body['error']['message']
self.assertEqual(
"'name' is a required property", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d2ca7de6-0069-48c9-b3de-ee975a2428dc')
def test_policy_update_spec_not_updatable(self):
# Try to update spec of policy.
# Note: name is the only property that can be updated
# after policy is created.
params = {
'policy': {
'name': 'new-name',
'spec': {'k1': 'v1'}
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.update_obj,
'policies', self.policy_id, params)
message = ex.resp_body['error']['message']
self.assertEqual(
"Additional properties are not allowed (u'spec' was "
"unexpected)", str(message))
| en | 0.863182 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Create a policy # No property is updated. # Verify badrequest exception(400) is raised. # Try to update spec of policy. # Note: name is the only property that can be updated # after policy is created. # Verify badrequest exception(400) is raised. | 1.72174 | 2 |
boa3_test/test_sc/interop_test/contract/DestroyContract.py | hal0x2328/neo3-boa | 25 | 9357 | <gh_stars>10-100
from boa3.builtin import public
from boa3.builtin.interop.contract import destroy_contract
@public
def Main():
destroy_contract()
| from boa3.builtin import public
from boa3.builtin.interop.contract import destroy_contract
@public
def Main():
destroy_contract() | none | 1 | 1.292105 | 1 |
|
tests/test_vmax.py | qinfeng2011/wltp | 0 | 9358 | <reponame>qinfeng2011/wltp<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import functools as fnt
import logging
import random
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas import IndexSlice as _ix
from wltp import engine, vehicle, downscale, vmax
from wltp.io import gear_names, veh_names
from . import vehdb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def test_v_max(h5_accdb):
from . import conftest
veh_samples = None
# DEBUG: to reduce clutter in the console.
# veh_samples = 12
# DEBUG: to study buggy cars.
# veh_samples = [76] # diff det_by_nlim
# veh_samples = [3, 21, 22, 104, ] # diff gear
# veh_samples = [38] # diff vmax order higher 1st
# veh_samples = [31] # [23]
def make_v_maxes(vehnum):
props, wot, n2vs = vehdb.load_vehicle_accdb(h5_accdb, vehnum)
wot = wot.rename({"Pwot": "p"}, axis=1)
wot["n"] = wot.index
gwots = engine.interpolate_wot_on_v_grid(wot, n2vs)
gwots = engine.calc_p_avail_in_gwots(gwots, SM=0.1)
gwots["p_resist"] = vehicle.calc_road_load_power(
gwots.index, props.f0, props.f1, props.f2
)
rec = vmax.calc_v_max(gwots)
return (props["v_max"], rec.v_max, props["gear_v_max"], rec.g_vmax, rec.wot)
def _package_wots_df(gear_wot_dfs):
assert gear_wot_dfs
## Merge all index values into the index of the 1st DF,
# or else, themerged-df contains n-gear dupes in each index-value.
#
# first_df, *rest_dfs = gear_wot_dfs.values()
# full_index = np.unique(np.hstack(df.index for df in gear_wot_dfs))
# first_df = first_df.reindex(full_index)
wots_df = pd.concat(
# [first_df] + rest_dfs,
gear_wot_dfs.values(),
axis=1,
# join="inner",
keys=gear_names(gear_wot_dfs.keys()),
names=["item", "gear"],
verify_integrity=True,
)
return wots_df
veh_nums = vehdb.all_vehnums(h5_accdb)
if not isinstance(veh_samples, (list, tuple)):
veh_samples = random.sample(veh_nums, veh_samples) if veh_samples else veh_nums
recs = [make_v_maxes(vehnum) for vehnum in veh_samples]
vehres = pd.DataFrame(
recs,
columns="vmax_accdb vmax_python gmax_accdb gmax_python wot".split(),
index=veh_names(veh_samples),
).astype({"gmax_accdb": "Int64", "gmax_python": "Int64"})
wots_df = pd.concat(
vehres["wot"].values, keys=veh_names(veh_samples), names=["vehicle"]
)
vehres = vehres.drop("wot", axis=1)
vehres["vmax_diff"] = (vehres["vmax_python"] - vehres["vmax_accdb"]).abs()
vehres["gmax_diff"] = (vehres["gmax_python"] - vehres["gmax_accdb"]).abs()
with pd.option_context(
"display.max_rows",
130,
"display.max_columns",
20,
"display.width",
120,
# "display.precision",
# 4,
# "display.chop_threshold",
# 1e-8,
"display.float_format",
"{:0.2f}".format,
):
print(
f"++ nones: {vehres.vmax_python.sum()} (out of {len(veh_samples)})"
f"\n++++\n{vehres}"
# f"\n++++\n{wots_df.sample(80, axis=0)}"
)
with pd.option_context(
"display.max_columns",
20,
"display.width",
120,
"display.float_format",
"{:0.4f}".format,
):
print(f"\n++++\n{vehres.describe().T}")
vehres = vehres.dropna(axis=1)
# npt.assert_array_equal(vmaxes["vmax_python"], vmaxes["vmax_accdb"])
aggregate_tol = 1e-4 # The digits copied from terminal.
assert (
vehres["vmax_diff"].describe()
- [125.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]
< aggregate_tol
).all()
assert (
vehres["gmax_diff"].describe()
- [125.0000, 0.1040, 0.3552, 0.0000, 0.0000, 0.0000, 0.0000, 2.0000]
< aggregate_tol
).all()
assert (vehres["vmax_diff"] == 0).sum() == 125 and (
vehres["gmax_diff"] == 0
).sum() == 125
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import functools as fnt
import logging
import random
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas import IndexSlice as _ix
from wltp import engine, vehicle, downscale, vmax
from wltp.io import gear_names, veh_names
from . import vehdb
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
def test_v_max(h5_accdb):
from . import conftest
veh_samples = None
# DEBUG: to reduce clutter in the console.
# veh_samples = 12
# DEBUG: to study buggy cars.
# veh_samples = [76] # diff det_by_nlim
# veh_samples = [3, 21, 22, 104, ] # diff gear
# veh_samples = [38] # diff vmax order higher 1st
# veh_samples = [31] # [23]
def make_v_maxes(vehnum):
props, wot, n2vs = vehdb.load_vehicle_accdb(h5_accdb, vehnum)
wot = wot.rename({"Pwot": "p"}, axis=1)
wot["n"] = wot.index
gwots = engine.interpolate_wot_on_v_grid(wot, n2vs)
gwots = engine.calc_p_avail_in_gwots(gwots, SM=0.1)
gwots["p_resist"] = vehicle.calc_road_load_power(
gwots.index, props.f0, props.f1, props.f2
)
rec = vmax.calc_v_max(gwots)
return (props["v_max"], rec.v_max, props["gear_v_max"], rec.g_vmax, rec.wot)
def _package_wots_df(gear_wot_dfs):
assert gear_wot_dfs
## Merge all index values into the index of the 1st DF,
# or else, themerged-df contains n-gear dupes in each index-value.
#
# first_df, *rest_dfs = gear_wot_dfs.values()
# full_index = np.unique(np.hstack(df.index for df in gear_wot_dfs))
# first_df = first_df.reindex(full_index)
wots_df = pd.concat(
# [first_df] + rest_dfs,
gear_wot_dfs.values(),
axis=1,
# join="inner",
keys=gear_names(gear_wot_dfs.keys()),
names=["item", "gear"],
verify_integrity=True,
)
return wots_df
veh_nums = vehdb.all_vehnums(h5_accdb)
if not isinstance(veh_samples, (list, tuple)):
veh_samples = random.sample(veh_nums, veh_samples) if veh_samples else veh_nums
recs = [make_v_maxes(vehnum) for vehnum in veh_samples]
vehres = pd.DataFrame(
recs,
columns="vmax_accdb vmax_python gmax_accdb gmax_python wot".split(),
index=veh_names(veh_samples),
).astype({"gmax_accdb": "Int64", "gmax_python": "Int64"})
wots_df = pd.concat(
vehres["wot"].values, keys=veh_names(veh_samples), names=["vehicle"]
)
vehres = vehres.drop("wot", axis=1)
vehres["vmax_diff"] = (vehres["vmax_python"] - vehres["vmax_accdb"]).abs()
vehres["gmax_diff"] = (vehres["gmax_python"] - vehres["gmax_accdb"]).abs()
with pd.option_context(
"display.max_rows",
130,
"display.max_columns",
20,
"display.width",
120,
# "display.precision",
# 4,
# "display.chop_threshold",
# 1e-8,
"display.float_format",
"{:0.2f}".format,
):
print(
f"++ nones: {vehres.vmax_python.sum()} (out of {len(veh_samples)})"
f"\n++++\n{vehres}"
# f"\n++++\n{wots_df.sample(80, axis=0)}"
)
with pd.option_context(
"display.max_columns",
20,
"display.width",
120,
"display.float_format",
"{:0.4f}".format,
):
print(f"\n++++\n{vehres.describe().T}")
vehres = vehres.dropna(axis=1)
# npt.assert_array_equal(vmaxes["vmax_python"], vmaxes["vmax_accdb"])
aggregate_tol = 1e-4 # The digits copied from terminal.
assert (
vehres["vmax_diff"].describe()
- [125.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]
< aggregate_tol
).all()
assert (
vehres["gmax_diff"].describe()
- [125.0000, 0.1040, 0.3552, 0.0000, 0.0000, 0.0000, 0.0000, 2.0000]
< aggregate_tol
).all()
assert (vehres["vmax_diff"] == 0).sum() == 125 and (
vehres["gmax_diff"] == 0
).sum() == 125 | en | 0.653608 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 European Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You may not use this work except in compliance with the Licence. # You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl # DEBUG: to reduce clutter in the console. # veh_samples = 12 # DEBUG: to study buggy cars. # veh_samples = [76] # diff det_by_nlim # veh_samples = [3, 21, 22, 104, ] # diff gear # veh_samples = [38] # diff vmax order higher 1st # veh_samples = [31] # [23] ## Merge all index values into the index of the 1st DF, # or else, themerged-df contains n-gear dupes in each index-value. # # first_df, *rest_dfs = gear_wot_dfs.values() # full_index = np.unique(np.hstack(df.index for df in gear_wot_dfs)) # first_df = first_df.reindex(full_index) # [first_df] + rest_dfs, # join="inner", # "display.precision", # 4, # "display.chop_threshold", # 1e-8, # f"\n++++\n{wots_df.sample(80, axis=0)}" # npt.assert_array_equal(vmaxes["vmax_python"], vmaxes["vmax_accdb"]) # The digits copied from terminal. | 2.146297 | 2 |
util/canonicaljson.py | giuseppe/quay | 2,027 | 9359 | <reponame>giuseppe/quay
import collections
def canonicalize(json_obj, preserve_sequence_order=True):
"""
This function canonicalizes a Python object that will be serialized as JSON.
Example usage: json.dumps(canonicalize(my_obj))
Args:
json_obj (object): the Python object that will later be serialized as JSON.
Returns:
object: json_obj now sorted to its canonical form.
"""
if isinstance(json_obj, collections.MutableMapping):
sorted_obj = sorted(
{
key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items()
}.items()
)
return collections.OrderedDict(sorted_obj)
elif isinstance(json_obj, (list, tuple)):
seq = [canonicalize(val, preserve_sequence_order) for val in json_obj]
return seq if preserve_sequence_order else sorted(seq)
return json_obj
| import collections
def canonicalize(json_obj, preserve_sequence_order=True):
"""
This function canonicalizes a Python object that will be serialized as JSON.
Example usage: json.dumps(canonicalize(my_obj))
Args:
json_obj (object): the Python object that will later be serialized as JSON.
Returns:
object: json_obj now sorted to its canonical form.
"""
if isinstance(json_obj, collections.MutableMapping):
sorted_obj = sorted(
{
key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items()
}.items()
)
return collections.OrderedDict(sorted_obj)
elif isinstance(json_obj, (list, tuple)):
seq = [canonicalize(val, preserve_sequence_order) for val in json_obj]
return seq if preserve_sequence_order else sorted(seq)
return json_obj | en | 0.799362 | This function canonicalizes a Python object that will be serialized as JSON. Example usage: json.dumps(canonicalize(my_obj)) Args: json_obj (object): the Python object that will later be serialized as JSON. Returns: object: json_obj now sorted to its canonical form. | 3.803853 | 4 |
datasette_plugin_geo/inspect.py | russss/datasette-geo | 9 | 9360 | from datasette import hookimpl
from datasette.utils import detect_spatialite
from shapely import wkt
def get_spatial_tables(conn):
if not detect_spatialite(conn):
return {}
spatial_tables = {}
c = conn.cursor()
c.execute(
"""SELECT f_table_name, f_geometry_column, srid, spatial_index_enabled
FROM geometry_columns"""
)
for row in c.fetchall():
if row[3] != 1:
print(
"Column {column} in table {table} has no spatial index; datasette-geo will ignore it.".format(
column=row[1], table=row[0]
)
)
continue
spatial_tables[row[0]] = row[1]
return spatial_tables
def get_bounds(conn, spatial_tables):
c = conn.cursor()
res = {}
for table, column in spatial_tables.items():
c.execute(
"SELECT AsText(Envelope(GUnion({column}))) FROM {table}".format(
table=table, column=column
)
)
data = c.fetchone()[0]
if data is None:
continue
bbox = wkt.loads(data)
res[table] = bbox.bounds
return res
| from datasette import hookimpl
from datasette.utils import detect_spatialite
from shapely import wkt
def get_spatial_tables(conn):
if not detect_spatialite(conn):
return {}
spatial_tables = {}
c = conn.cursor()
c.execute(
"""SELECT f_table_name, f_geometry_column, srid, spatial_index_enabled
FROM geometry_columns"""
)
for row in c.fetchall():
if row[3] != 1:
print(
"Column {column} in table {table} has no spatial index; datasette-geo will ignore it.".format(
column=row[1], table=row[0]
)
)
continue
spatial_tables[row[0]] = row[1]
return spatial_tables
def get_bounds(conn, spatial_tables):
c = conn.cursor()
res = {}
for table, column in spatial_tables.items():
c.execute(
"SELECT AsText(Envelope(GUnion({column}))) FROM {table}".format(
table=table, column=column
)
)
data = c.fetchone()[0]
if data is None:
continue
bbox = wkt.loads(data)
res[table] = bbox.bounds
return res
| en | 0.162458 | SELECT f_table_name, f_geometry_column, srid, spatial_index_enabled FROM geometry_columns | 2.358982 | 2 |
microcosm_pubsub/context.py | Sinon/microcosm-pubsub | 5 | 9361 | """
Message context.
"""
from typing import Dict
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import logger
from microcosm_pubsub.constants import TTL_KEY, URI_KEY
from microcosm_pubsub.message import SQSMessage
@defaults(
enable_ttl=typed(boolean, default_value=True),
initial_ttl=typed(int, default_value=32),
)
@logger
class SQSMessageContext:
"""
Factory for per-message contexts.
"""
def __init__(self, graph):
self.enable_ttl = graph.config.sqs_message_context.enable_ttl
self.initial_ttl = graph.config.sqs_message_context.initial_ttl
def __call__(self, context: SQSMessage, **kwargs) -> Dict[str, str]:
"""
Create a new context from a message.
"""
return self.from_sqs_message(context, **kwargs)
def from_sqs_message(self, message: SQSMessage, **kwargs):
context: Dict = dict(message.opaque_data)
context.update(
# include the message id
message_id=message.message_id,
**kwargs,
)
# include the TTL (if enabled)
if self.enable_ttl:
ttl = message.ttl if message.ttl is not None else self.initial_ttl
context[TTL_KEY] = str(ttl - 1)
# include the URI (if there is one)
if message.uri:
context[URI_KEY] = message.uri
return context
| """
Message context.
"""
from typing import Dict
from microcosm.api import defaults, typed
from microcosm.config.types import boolean
from microcosm_logging.decorators import logger
from microcosm_pubsub.constants import TTL_KEY, URI_KEY
from microcosm_pubsub.message import SQSMessage
@defaults(
enable_ttl=typed(boolean, default_value=True),
initial_ttl=typed(int, default_value=32),
)
@logger
class SQSMessageContext:
"""
Factory for per-message contexts.
"""
def __init__(self, graph):
self.enable_ttl = graph.config.sqs_message_context.enable_ttl
self.initial_ttl = graph.config.sqs_message_context.initial_ttl
def __call__(self, context: SQSMessage, **kwargs) -> Dict[str, str]:
"""
Create a new context from a message.
"""
return self.from_sqs_message(context, **kwargs)
def from_sqs_message(self, message: SQSMessage, **kwargs):
context: Dict = dict(message.opaque_data)
context.update(
# include the message id
message_id=message.message_id,
**kwargs,
)
# include the TTL (if enabled)
if self.enable_ttl:
ttl = message.ttl if message.ttl is not None else self.initial_ttl
context[TTL_KEY] = str(ttl - 1)
# include the URI (if there is one)
if message.uri:
context[URI_KEY] = message.uri
return context
| en | 0.748185 | Message context. Factory for per-message contexts. Create a new context from a message. # include the message id # include the TTL (if enabled) # include the URI (if there is one) | 2.289917 | 2 |
azure_ml/pytorch_classifier/train_parameterized.py | murdockcrc/python-tricks | 0 | 9362 | <gh_stars>0
import os
import argparse
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
from azureml.core import Run
run = Run.get_context()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
help='Path to the training data'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Learning rate for SGD'
)
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for SGD'
)
args = parser.parse_args()
print("===== DATA =====")
print("DATA PATH: " + args.data_path)
print("LIST FILES IN DATA PATH...")
print(os.listdir(args.data_path))
print("================")
# prepare DataLoader for CIFAR10 data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root=args.data_path,
train=True,
download=False,
transform=transform,
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2
)
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(
net.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
run.log('loss', loss) # log loss metric to AML
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
running_loss = 0.0
print('Finished Training') | import os
import argparse
import torch
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from model import Net
from azureml.core import Run
run = Run.get_context()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
help='Path to the training data'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.001,
help='Learning rate for SGD'
)
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for SGD'
)
args = parser.parse_args()
print("===== DATA =====")
print("DATA PATH: " + args.data_path)
print("LIST FILES IN DATA PATH...")
print(os.listdir(args.data_path))
print("================")
# prepare DataLoader for CIFAR10 data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(
root=args.data_path,
train=True,
download=False,
transform=transform,
)
trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=4,
shuffle=True,
num_workers=2
)
# define convolutional network
net = Net()
# set up pytorch loss / optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(
net.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
)
# train the network
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# unpack the data
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999:
loss = running_loss / 2000
run.log('loss', loss) # log loss metric to AML
print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}')
running_loss = 0.0
print('Finished Training') | en | 0.465789 | # prepare DataLoader for CIFAR10 data # define convolutional network # set up pytorch loss / optimizer # train the network # unpack the data # zero the parameter gradients # forward + backward + optimize # print statistics # log loss metric to AML | 2.51908 | 3 |
src/tests/test_app_db.py | kazqvaizer/arq-sqlalchemy-boilerplate | 6 | 9363 | import pytest
from app.db import session_scope
pytestmark = pytest.mark.asyncio
async def test_engine_configured(env):
async with session_scope() as session:
assert str(session.bind.engine.url) == env("SQLALCHEMY_DATABASE_URI")
| import pytest
from app.db import session_scope
pytestmark = pytest.mark.asyncio
async def test_engine_configured(env):
async with session_scope() as session:
assert str(session.bind.engine.url) == env("SQLALCHEMY_DATABASE_URI")
| none | 1 | 2.056562 | 2 |
|
catalyst/core/callbacks/formatters.py | cgarciae/catalyst | 1 | 9364 | <gh_stars>1-10
from abc import ABC, abstractmethod
from datetime import datetime
import json
import logging
from catalyst import utils
from catalyst.core import _State
class MetricsFormatter(ABC, logging.Formatter):
"""
Abstract metrics formatter
"""
def __init__(self, message_prefix):
"""
Args:
message_prefix: logging format string
that will be prepended to message
"""
super().__init__(f"{message_prefix}{{message}}", style="{")
@abstractmethod
def _format_message(self, state: _State):
pass
def format(self, record: logging.LogRecord):
"""
Format message string
"""
# noinspection PyUnresolvedReferences
state = record.state
record.msg = self._format_message(state)
return super().format(record)
class TxtMetricsFormatter(MetricsFormatter):
"""
Translate batch metrics in human-readable format.
This class is used by ``logging.Logger`` to make a string from record.
For details refer to official docs for 'logging' module.
Note:
This is inner class used by Logger callback,
no need to use it directly!
"""
def __init__(self):
"""
Initializes the ``TxtMetricsFormatter``
"""
super().__init__("[{asctime}] ")
def _format_metrics(self, metrics):
# metrics : dict[str: dict[str: float]]
metrics_formatted = {}
for key, value in metrics.items():
metrics_formatted_ = [
utils.format_metric(m_name, m_value)
for m_name, m_value in sorted(value.items())
]
metrics_formatted_ = " | ".join(metrics_formatted_)
metrics_formatted[key] = metrics_formatted_
return metrics_formatted
def _format_message(self, state: _State):
message = [""]
metrics = self._format_metrics(state.metric_manager.epoch_values)
for key, value in metrics.items():
message.append(
f"{state.stage_epoch_log}/{state.num_epochs} "
f"* Epoch {state.epoch_log} ({key}): {value}"
)
message = "\n".join(message)
return message
class JsonMetricsFormatter(MetricsFormatter):
"""
Translate batch metrics in json format.
This class is used by ``logging.Logger`` to make a string from record.
For details refer to official docs for 'logging' module.
Note:
This is inner class used by Logger callback,
no need to use it directly!
"""
def __init__(self):
"""
Initializes the ``JsonMetricsFormatter``
"""
super().__init__("")
def _format_message(self, state: _State):
res = dict(
metirics=state.metric_manager.epoch_values.copy(),
epoch=state.epoch,
time=datetime.now().isoformat()
)
return json.dumps(res, indent=True, ensure_ascii=False)
__all__ = ["MetricsFormatter", "TxtMetricsFormatter", "JsonMetricsFormatter"]
| from abc import ABC, abstractmethod
from datetime import datetime
import json
import logging
from catalyst import utils
from catalyst.core import _State
class MetricsFormatter(ABC, logging.Formatter):
"""
Abstract metrics formatter
"""
def __init__(self, message_prefix):
"""
Args:
message_prefix: logging format string
that will be prepended to message
"""
super().__init__(f"{message_prefix}{{message}}", style="{")
@abstractmethod
def _format_message(self, state: _State):
pass
def format(self, record: logging.LogRecord):
"""
Format message string
"""
# noinspection PyUnresolvedReferences
state = record.state
record.msg = self._format_message(state)
return super().format(record)
class TxtMetricsFormatter(MetricsFormatter):
"""
Translate batch metrics in human-readable format.
This class is used by ``logging.Logger`` to make a string from record.
For details refer to official docs for 'logging' module.
Note:
This is inner class used by Logger callback,
no need to use it directly!
"""
def __init__(self):
"""
Initializes the ``TxtMetricsFormatter``
"""
super().__init__("[{asctime}] ")
def _format_metrics(self, metrics):
# metrics : dict[str: dict[str: float]]
metrics_formatted = {}
for key, value in metrics.items():
metrics_formatted_ = [
utils.format_metric(m_name, m_value)
for m_name, m_value in sorted(value.items())
]
metrics_formatted_ = " | ".join(metrics_formatted_)
metrics_formatted[key] = metrics_formatted_
return metrics_formatted
def _format_message(self, state: _State):
message = [""]
metrics = self._format_metrics(state.metric_manager.epoch_values)
for key, value in metrics.items():
message.append(
f"{state.stage_epoch_log}/{state.num_epochs} "
f"* Epoch {state.epoch_log} ({key}): {value}"
)
message = "\n".join(message)
return message
class JsonMetricsFormatter(MetricsFormatter):
"""
Translate batch metrics in json format.
This class is used by ``logging.Logger`` to make a string from record.
For details refer to official docs for 'logging' module.
Note:
This is inner class used by Logger callback,
no need to use it directly!
"""
def __init__(self):
"""
Initializes the ``JsonMetricsFormatter``
"""
super().__init__("")
def _format_message(self, state: _State):
res = dict(
metirics=state.metric_manager.epoch_values.copy(),
epoch=state.epoch,
time=datetime.now().isoformat()
)
return json.dumps(res, indent=True, ensure_ascii=False)
__all__ = ["MetricsFormatter", "TxtMetricsFormatter", "JsonMetricsFormatter"] | en | 0.703113 | Abstract metrics formatter Args: message_prefix: logging format string that will be prepended to message Format message string # noinspection PyUnresolvedReferences Translate batch metrics in human-readable format. This class is used by ``logging.Logger`` to make a string from record. For details refer to official docs for 'logging' module. Note: This is inner class used by Logger callback, no need to use it directly! Initializes the ``TxtMetricsFormatter`` # metrics : dict[str: dict[str: float]] Translate batch metrics in json format. This class is used by ``logging.Logger`` to make a string from record. For details refer to official docs for 'logging' module. Note: This is inner class used by Logger callback, no need to use it directly! Initializes the ``JsonMetricsFormatter`` | 2.949586 | 3 |
fuzzy/fuzzy.py | Suraj1127/fuzzy-matcher | 0 | 9365 | #!/usr/bin/env python3
"""
Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching.
"""
import pip
def import_or_install(package):
try:
__import__(package)
except ImportError:
pip.main(['install', package])
import os
import sys
import argparse
import_or_install('numpy')
import_or_install('pandas')
import_or_install('fuzzywuzzy')
import numpy as np
import pandas as pd
from fuzzywuzzy import process, fuzz
class FuzzyMatcher:
"""
FuzzyMatcher class to perform the fuzzy matching.
"""
def __init__(self, df_1, df_2, columns_1, columns_2, append_in='second'):
"""
The constructor takes five arguments. The last argument 'append_in' is optional.
Parameters:
df_1: the first table in pandas.DataFrame format or the name of the CSV file for the first table
df_2: the second table in pandas.DataFrame format or the name of the CSV file for the second table
columns_1: list of common columns in the first table
columns_2: list of common columns in the second table
append_in (optional):
'first' if the common columns are to be appended in the first table
'second' if the common columns are to be appended in the second table
"""
if type(df_1) == str:
df_1 = pd.read_csv(df_1)
if type(df_2) == str:
df_2 = pd.read_csv(df_2)
df_1.columns = df_1.columns.str.lower().str.strip()
df_2.columns = df_2.columns.str.lower().str.strip()
columns_1 = [i.lower().strip() for i in columns_1]
columns_2 = [i.lower().strip() for i in columns_2]
if append_in == 'first':
temp = df_1
df_1 = df_2
df_2 = temp
temp = columns_1
columns_1 = columns_2
columns_2 = temp
self.df_1 = df_1.rename(columns=dict(zip(columns_1, columns_2)))
self.columns = columns_2
self.df_2 = self._fuzzy_match(self.df_1, df_2, self.columns[0])
@staticmethod
def _string_matching(name, collection, mapping_):
"""
Returns similar name using fuzzy matching.
"""
if name in collection:
return name
if name in mapping_:
return mapping_[name]
similar = process.extractOne(name, collection, scorer=fuzz.ratio)[0]
mapping_[name] = similar
return similar
def _fuzzy_match(self, df_1_t, df_2_t, common_column_t):
"""
Returns dataframe with the common column appended.
Notice that the appended columns end with '_t'.
"""
collection = set(df_1_t[common_column_t])
mapping_ = {}
df_2_t[common_column_t + '_t'] = df_2_t[common_column_t].apply(self._string_matching, args=(collection, mapping_))
return df_2_t
@property
def fuzzy_match(self):
"""
Returns the dataframe consisting of all the appended columns.
"""
for i_t, common_column in enumerate(self.columns[1:], start=1):
self.df_2[common_column + '_t'] = np.nan
group_1 = self.df_1.groupby(self.columns[:i_t])
group_2 = self.df_2.groupby([i + '_t' for i in self.columns[:i_t]])
for key, df_slice_2 in group_2:
df_slice_1 = group_1.get_group(key)
df_slice_2 = self._fuzzy_match(df_slice_1, df_slice_2, common_column)
self.df_2.loc[df_slice_2.index, common_column + '_t'] = df_slice_2.loc[:, common_column + '_t']
return self.df_2
def save(self, filename):
"""
Saves the result dataframe to a CSV file, filename.
"""
self.df_2.to_csv(filename)
def parse_args(parser):
"""
Parsing and configuration of the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--firstcsv', type=str, required=True, help='CSV file for first table.')
parser.add_argument('--secondcsv', type=str, required=True, help='CSV file for second table.')
parser.add_argument('--destination', type=str, default='output.csv', help='Destination filename.')
parser.add_argument('--commoncolumns1', type=str, required=True, help='Common columns for first table.')
parser.add_argument('--commoncolumns2', type=str, required=True, help='Common columns for second table in the same order.')
parser.add_argument("--in", dest="_in", default='second', choices=['second', 'first'], help='Table to append the columns. ')
return check_args(parser.parse_args())
def check_args(args):
"""
Checking the arguments if they are entered properly.
Validations performed:
1. Compulsory arguments are entered.
2. The entered filenames are present in the current folder.
3. The entered column names are present in the corresponding files.
4. If the destination filename is already present in the directory, ask the user if it can be overwritten.
"""
# for --firstcsv and --secondcsv
for filename in [args.firstcsv, args.secondcsv]:
if not os.path.isfile(filename):
raise Exception("File {} is not present in the currrent folder.".format(filename))
# --commoncolumns1
commoncolumns1 = [i.strip().lower() for i in args.commoncolumns1.split(',')]
temp = set(commoncolumns1) - set(pd.read_csv(args.firstcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.firstcsv, temp))
# --commoncolumns2
commoncolumns2 = [i.strip().lower() for i in args.commoncolumns2.split(',')]
temp = set(commoncolumns2) - set(pd.read_csv(args.secondcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.secondcsv, temp))
# --destination
if os.path.isfile(args.destination):
print("The file {} already exists. Do you want to overwrite it? y/n".format(args.destination))
ans = input().strip().lower()
if ans == 'n':
print("Please enter different destination filename and run the script again.")
sys.exit()
return args
if __name__ == "__main__":
# instantiate the ArgumentParser class and parse the arguments
parser = argparse.ArgumentParser()
arguments = parse_args(parser)
# save the arguments as some variables which later would be passed to FuzzyMatcher class
filename_1 = arguments.firstcsv
filename_2 = arguments.secondcsv
result_filename = arguments.destination
# clean and lowercase-ize the columns names
common_columns_1 = [i.strip().lower() for i in arguments.commoncolumns1.split(',')]
common_columns_2 = [i.strip().lower() for i in arguments.commoncolumns2.split(',')]
# instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file
fuzzy_matcher = FuzzyMatcher(filename_1, filename_2, common_columns_1, common_columns_2, append_in=arguments._in)
fuzzy_matcher.fuzzy_match
fuzzy_matcher.save(result_filename)
| #!/usr/bin/env python3
"""
Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching.
"""
import pip
def import_or_install(package):
try:
__import__(package)
except ImportError:
pip.main(['install', package])
import os
import sys
import argparse
import_or_install('numpy')
import_or_install('pandas')
import_or_install('fuzzywuzzy')
import numpy as np
import pandas as pd
from fuzzywuzzy import process, fuzz
class FuzzyMatcher:
"""
FuzzyMatcher class to perform the fuzzy matching.
"""
def __init__(self, df_1, df_2, columns_1, columns_2, append_in='second'):
"""
The constructor takes five arguments. The last argument 'append_in' is optional.
Parameters:
df_1: the first table in pandas.DataFrame format or the name of the CSV file for the first table
df_2: the second table in pandas.DataFrame format or the name of the CSV file for the second table
columns_1: list of common columns in the first table
columns_2: list of common columns in the second table
append_in (optional):
'first' if the common columns are to be appended in the first table
'second' if the common columns are to be appended in the second table
"""
if type(df_1) == str:
df_1 = pd.read_csv(df_1)
if type(df_2) == str:
df_2 = pd.read_csv(df_2)
df_1.columns = df_1.columns.str.lower().str.strip()
df_2.columns = df_2.columns.str.lower().str.strip()
columns_1 = [i.lower().strip() for i in columns_1]
columns_2 = [i.lower().strip() for i in columns_2]
if append_in == 'first':
temp = df_1
df_1 = df_2
df_2 = temp
temp = columns_1
columns_1 = columns_2
columns_2 = temp
self.df_1 = df_1.rename(columns=dict(zip(columns_1, columns_2)))
self.columns = columns_2
self.df_2 = self._fuzzy_match(self.df_1, df_2, self.columns[0])
@staticmethod
def _string_matching(name, collection, mapping_):
"""
Returns similar name using fuzzy matching.
"""
if name in collection:
return name
if name in mapping_:
return mapping_[name]
similar = process.extractOne(name, collection, scorer=fuzz.ratio)[0]
mapping_[name] = similar
return similar
def _fuzzy_match(self, df_1_t, df_2_t, common_column_t):
"""
Returns dataframe with the common column appended.
Notice that the appended columns end with '_t'.
"""
collection = set(df_1_t[common_column_t])
mapping_ = {}
df_2_t[common_column_t + '_t'] = df_2_t[common_column_t].apply(self._string_matching, args=(collection, mapping_))
return df_2_t
@property
def fuzzy_match(self):
"""
Returns the dataframe consisting of all the appended columns.
"""
for i_t, common_column in enumerate(self.columns[1:], start=1):
self.df_2[common_column + '_t'] = np.nan
group_1 = self.df_1.groupby(self.columns[:i_t])
group_2 = self.df_2.groupby([i + '_t' for i in self.columns[:i_t]])
for key, df_slice_2 in group_2:
df_slice_1 = group_1.get_group(key)
df_slice_2 = self._fuzzy_match(df_slice_1, df_slice_2, common_column)
self.df_2.loc[df_slice_2.index, common_column + '_t'] = df_slice_2.loc[:, common_column + '_t']
return self.df_2
def save(self, filename):
"""
Saves the result dataframe to a CSV file, filename.
"""
self.df_2.to_csv(filename)
def parse_args(parser):
"""
Parsing and configuration of the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--firstcsv', type=str, required=True, help='CSV file for first table.')
parser.add_argument('--secondcsv', type=str, required=True, help='CSV file for second table.')
parser.add_argument('--destination', type=str, default='output.csv', help='Destination filename.')
parser.add_argument('--commoncolumns1', type=str, required=True, help='Common columns for first table.')
parser.add_argument('--commoncolumns2', type=str, required=True, help='Common columns for second table in the same order.')
parser.add_argument("--in", dest="_in", default='second', choices=['second', 'first'], help='Table to append the columns. ')
return check_args(parser.parse_args())
def check_args(args):
"""
Checking the arguments if they are entered properly.
Validations performed:
1. Compulsory arguments are entered.
2. The entered filenames are present in the current folder.
3. The entered column names are present in the corresponding files.
4. If the destination filename is already present in the directory, ask the user if it can be overwritten.
"""
# for --firstcsv and --secondcsv
for filename in [args.firstcsv, args.secondcsv]:
if not os.path.isfile(filename):
raise Exception("File {} is not present in the currrent folder.".format(filename))
# --commoncolumns1
commoncolumns1 = [i.strip().lower() for i in args.commoncolumns1.split(',')]
temp = set(commoncolumns1) - set(pd.read_csv(args.firstcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.firstcsv, temp))
# --commoncolumns2
commoncolumns2 = [i.strip().lower() for i in args.commoncolumns2.split(',')]
temp = set(commoncolumns2) - set(pd.read_csv(args.secondcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.secondcsv, temp))
# --destination
if os.path.isfile(args.destination):
print("The file {} already exists. Do you want to overwrite it? y/n".format(args.destination))
ans = input().strip().lower()
if ans == 'n':
print("Please enter different destination filename and run the script again.")
sys.exit()
return args
if __name__ == "__main__":
# instantiate the ArgumentParser class and parse the arguments
parser = argparse.ArgumentParser()
arguments = parse_args(parser)
# save the arguments as some variables which later would be passed to FuzzyMatcher class
filename_1 = arguments.firstcsv
filename_2 = arguments.secondcsv
result_filename = arguments.destination
# clean and lowercase-ize the columns names
common_columns_1 = [i.strip().lower() for i in arguments.commoncolumns1.split(',')]
common_columns_2 = [i.strip().lower() for i in arguments.commoncolumns2.split(',')]
# instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file
fuzzy_matcher = FuzzyMatcher(filename_1, filename_2, common_columns_1, common_columns_2, append_in=arguments._in)
fuzzy_matcher.fuzzy_match
fuzzy_matcher.save(result_filename)
| en | 0.69875 | #!/usr/bin/env python3 Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching. FuzzyMatcher class to perform the fuzzy matching. The constructor takes five arguments. The last argument 'append_in' is optional. Parameters: df_1: the first table in pandas.DataFrame format or the name of the CSV file for the first table df_2: the second table in pandas.DataFrame format or the name of the CSV file for the second table columns_1: list of common columns in the first table columns_2: list of common columns in the second table append_in (optional): 'first' if the common columns are to be appended in the first table 'second' if the common columns are to be appended in the second table Returns similar name using fuzzy matching. Returns dataframe with the common column appended. Notice that the appended columns end with '_t'. Returns the dataframe consisting of all the appended columns. Saves the result dataframe to a CSV file, filename. Parsing and configuration of the command line arguments. Checking the arguments if they are entered properly. Validations performed: 1. Compulsory arguments are entered. 2. The entered filenames are present in the current folder. 3. The entered column names are present in the corresponding files. 4. If the destination filename is already present in the directory, ask the user if it can be overwritten. # for --firstcsv and --secondcsv # --commoncolumns1 # --commoncolumns2 # --destination # instantiate the ArgumentParser class and parse the arguments # save the arguments as some variables which later would be passed to FuzzyMatcher class # clean and lowercase-ize the columns names # instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file | 3.705154 | 4 |
exoatlas/visualizations/panels/BubblePanel.py | zkbt/exopop | 4 | 9366 | <filename>exoatlas/visualizations/panels/BubblePanel.py<gh_stars>1-10
from .Panel import *
__all__ = ['BubblePanel']
default_size = plt.matplotlib.rcParams['lines.markersize']**2
class BubblePanel(Panel):
'''
BubblePanel is a general wrapper for making scatter plots
where planets are represented as bubbles that can have
informative sizes and/or colors.
'''
def __init__(self,
xaxis=None,
yaxis=None,
size=None, size_normalization=None,
color=None, cmap='plasma', vmin=None, vmax=None, color_normalization=None,
**kw):
'''
Initialize a plotting panel.
Parameters
----------
size : PlottableAxis, str, float, None
What should the sizes of points be or encode?
size_normalization : float
If sizes depend on quantities,
how should they be normalized?
color : PlottableAxis, str, float, None
What should the colors of points be or encode?
cmap : str, cmap from plt.matplotlib.cm
If the colors depend on quantities,
what cmap should be used for them?
vmin : float, astropy.units.quantity.Quantity
If the colors depend on quantities,
what should the bottom of the cmap be?
vmax : float, astropy.units.quantity.Quantity
If the colors depend on quantities,
what should the top of the cmap be?
color_normalization : matplotlib.colors.Normalize
If color depend on quantities, how should
the values be normalized. If color_normalization
is defined, any values provided here for
vmin and vmax will be ignored.
**kw : dict
Other keywords will be passed on to *all*
Panel/Plottable initializations (which may
include x, y, size, and color). If you need
more fine-grained control over which axis
gets which keyword, consider initializing
those panels one-by-one.
'''
# initialize the basics of the panel with the plottable axes
Panel.__init__(self, xaxis=xaxis, yaxis=yaxis, **kw)
# set up how we should scale the sizes of points
size = clean_axis(size)
try:
# try to make a variable size axis
self.plottable['size'] = size(panel=self, **kw)
default_size_normalization = self.plottable['size'].size_normalization
except TypeError:
# otherwise, use a single size for all points
self.plottable['size'] = size
default_size_normalization = 1
#self.plottable['x'].panel = self
#self.plottable['y'].panel = self
# make sure a size normalization has been defined
self.size_normalization = size_normalization or default_size_normalization
# set up how we should set the colors of points
color = clean_axis(color)
try:
# try to make a variable color axis
self.plottable['color'] = color(panel=self, **kw)
default_lim = self.plottable['color'].lim
except TypeError:
# otherwise, use a single color for all points
self.plottable['color'] = color
default_lim = [None, None]
# if an actual cmap was provided, use it
if isinstance(cmap, plt.matplotlib.colors.Colormap):
self.cmap = cmap
# otherwise, treat the cmap as a string key
else:
self.cmap = plt.matplotlib.cm.cmap_d[cmap]
# make sure the color map limits are set
self.vmin = vmin or default_lim[0]
self.vmax = vmax or default_lim[1]
# if a custom normalization is used, reset vmin + vmax
self.color_normalization = color_normalization
if isinstance(self.color_normalization,
plt.matplotlib.colors.Normalize):
# pull the normalization's min/max for information
self.vmin = color_normalization.vmin
self.vmax = color_normalization.vmax
# apply (x,y) axis labels, scales, limits appropriately
for axis in 'xy':
for attribute in ['label', 'scale', 'lim']:
setattr(self,
f'{axis}{attribute}',
getattr(self.plottable[axis],
attribute))
#DEBUG
self.summarize()
def get_sizes(self):
'''
The sizes of the bubbles.
Returns
-------
s : an input for plt.scatter
Either a single scalar, or an array with variable
sizes for each bubble according to some quantity.
'''
# should we ignore any variable size instructions?
if self.pop.respond_to_size == False:
size = self.pop.plotkw.get('s', None)
# if desired, set variable sizes
elif isinstance(self.plottable['size'], PlottableAxis):
# get the raw values for the sizes
x = self.plottable['size'].value()
# calculate the normalized size
size = default_size*x/self.size_normalization
# otherwise, set a single size
else:
# get default, first from pop and then from panel
size = self.pop.plotkw.get('s', self.plottable['size'])
# return a valid input to plt.scatter(s=...)
return size
def get_colors(self):
'''
The colors of the bubbles.
Returns
-------
c : an input for plt.scatter
Either a single color, or an array with variable
colors for each bubble according to some quantity.
'''
# should we ignore any variable color instructions?
if self.pop.respond_to_color == False:
color = self.pop.color
# should we use a variable color?
elif isinstance(self.plottable['color'], PlottableAxis):
# get the raw values to go into the color
x = self.plottable['color'].value()
# FIXME - make sure to check vmin/vmax are valid
#if (self.vmin is None) or (self.vmax is None):
# raise AtlasError(f'''
# It looks like you're trying to use
# {self.plottable['color']} to set variable
# colors for bubbles. To do so, please make
# sure it has finite values defined for its
# .vmin and .vmax attributes.
# ''')
# make sure we have *some* normalizer defined
f = plt.matplotlib.colors.Normalize
self.color_normalization = (self.color_normalization
or f(vmin=self.vmin, vmax=self.vmax))
normalized = self.color_normalization(x)
color = self.cmap(normalized)
# finally, should we just use a default color?
else:
# get default, first from pop and then from panel
color = self.pop.color
if color is None:
color = self.plottable['color']
# return a valid input to any one of the following:
# plt.scatter(c=...)
# plt.scatter(edgecolors=...)
# plt.scatter(facecolors=...)
return color
def kw(self, key=None, **kwargs):
'''
Do a little decision-making about the plotting keyword
arguments, pulling defaults from each population where
needed.
Parameter
---------
key : str
The population for which we should pull keywords.
If None, go with the current population.
**kwargs : dict
All other keywords will be directed toward
overwriting individual population defaults.
'''
# identify the population we're working with
if key is None:
key = self.key
#else:
self.point_at(key)
# define some default keywords, which can be over-written
default = dict(s=self.get_sizes(),
marker=self.pop.marker,
linewidth=self.pop.linewidth,
alpha=self.pop.alpha,
zorder=self.pop.zorder,
label=self.pop.label)
# sort out whether faces and/or edges should get color
c=self.get_colors()
if self.pop.filled:
default['facecolors'] = c
else:
default['facecolors'] = 'none'
if self.pop.outlined:
default['edgecolors'] = c
else:
default['edgecolors'] = 'none'
# if any other keywords are provided, overwrite these defaults
for k, v in kwargs.items():
default[k] = v
return default
def plot(self, key, ax=None, labelkw={}, **kwargs):
'''
Add the points for a particular population to this panel.
Parameters
----------
key : str
The population (as an item in the self.pops dictionary) to add.
ax :
Into what ax should we place this plot?
If None, use default.
labelkw : dict
Keywords for labeling the planet names.
**kwargs : dict
Any extra keywords will be passed on to `scatter`
'''
# focus attention on that population
self.point_at(key)
# make sure we're plotting into the appropriate axes
try:
plt.sca(self.ax)
except AttributeError:
self.setup(ax=ax)
# add the scattered points
self.scattered[key] = self.ax.scatter(self.x, self.y, **self.kw(key,**kwargs))
# set the scales, limits, labels
self.finish_plot(labelkw=labelkw)
| <filename>exoatlas/visualizations/panels/BubblePanel.py<gh_stars>1-10
from .Panel import *
__all__ = ['BubblePanel']
default_size = plt.matplotlib.rcParams['lines.markersize']**2
class BubblePanel(Panel):
'''
BubblePanel is a general wrapper for making scatter plots
where planets are represented as bubbles that can have
informative sizes and/or colors.
'''
def __init__(self,
xaxis=None,
yaxis=None,
size=None, size_normalization=None,
color=None, cmap='plasma', vmin=None, vmax=None, color_normalization=None,
**kw):
'''
Initialize a plotting panel.
Parameters
----------
size : PlottableAxis, str, float, None
What should the sizes of points be or encode?
size_normalization : float
If sizes depend on quantities,
how should they be normalized?
color : PlottableAxis, str, float, None
What should the colors of points be or encode?
cmap : str, cmap from plt.matplotlib.cm
If the colors depend on quantities,
what cmap should be used for them?
vmin : float, astropy.units.quantity.Quantity
If the colors depend on quantities,
what should the bottom of the cmap be?
vmax : float, astropy.units.quantity.Quantity
If the colors depend on quantities,
what should the top of the cmap be?
color_normalization : matplotlib.colors.Normalize
If color depend on quantities, how should
the values be normalized. If color_normalization
is defined, any values provided here for
vmin and vmax will be ignored.
**kw : dict
Other keywords will be passed on to *all*
Panel/Plottable initializations (which may
include x, y, size, and color). If you need
more fine-grained control over which axis
gets which keyword, consider initializing
those panels one-by-one.
'''
# initialize the basics of the panel with the plottable axes
Panel.__init__(self, xaxis=xaxis, yaxis=yaxis, **kw)
# set up how we should scale the sizes of points
size = clean_axis(size)
try:
# try to make a variable size axis
self.plottable['size'] = size(panel=self, **kw)
default_size_normalization = self.plottable['size'].size_normalization
except TypeError:
# otherwise, use a single size for all points
self.plottable['size'] = size
default_size_normalization = 1
#self.plottable['x'].panel = self
#self.plottable['y'].panel = self
# make sure a size normalization has been defined
self.size_normalization = size_normalization or default_size_normalization
# set up how we should set the colors of points
color = clean_axis(color)
try:
# try to make a variable color axis
self.plottable['color'] = color(panel=self, **kw)
default_lim = self.plottable['color'].lim
except TypeError:
# otherwise, use a single color for all points
self.plottable['color'] = color
default_lim = [None, None]
# if an actual cmap was provided, use it
if isinstance(cmap, plt.matplotlib.colors.Colormap):
self.cmap = cmap
# otherwise, treat the cmap as a string key
else:
self.cmap = plt.matplotlib.cm.cmap_d[cmap]
# make sure the color map limits are set
self.vmin = vmin or default_lim[0]
self.vmax = vmax or default_lim[1]
# if a custom normalization is used, reset vmin + vmax
self.color_normalization = color_normalization
if isinstance(self.color_normalization,
plt.matplotlib.colors.Normalize):
# pull the normalization's min/max for information
self.vmin = color_normalization.vmin
self.vmax = color_normalization.vmax
# apply (x,y) axis labels, scales, limits appropriately
for axis in 'xy':
for attribute in ['label', 'scale', 'lim']:
setattr(self,
f'{axis}{attribute}',
getattr(self.plottable[axis],
attribute))
#DEBUG
self.summarize()
def get_sizes(self):
'''
The sizes of the bubbles.
Returns
-------
s : an input for plt.scatter
Either a single scalar, or an array with variable
sizes for each bubble according to some quantity.
'''
# should we ignore any variable size instructions?
if self.pop.respond_to_size == False:
size = self.pop.plotkw.get('s', None)
# if desired, set variable sizes
elif isinstance(self.plottable['size'], PlottableAxis):
# get the raw values for the sizes
x = self.plottable['size'].value()
# calculate the normalized size
size = default_size*x/self.size_normalization
# otherwise, set a single size
else:
# get default, first from pop and then from panel
size = self.pop.plotkw.get('s', self.plottable['size'])
# return a valid input to plt.scatter(s=...)
return size
def get_colors(self):
'''
The colors of the bubbles.
Returns
-------
c : an input for plt.scatter
Either a single color, or an array with variable
colors for each bubble according to some quantity.
'''
# should we ignore any variable color instructions?
if self.pop.respond_to_color == False:
color = self.pop.color
# should we use a variable color?
elif isinstance(self.plottable['color'], PlottableAxis):
# get the raw values to go into the color
x = self.plottable['color'].value()
# FIXME - make sure to check vmin/vmax are valid
#if (self.vmin is None) or (self.vmax is None):
# raise AtlasError(f'''
# It looks like you're trying to use
# {self.plottable['color']} to set variable
# colors for bubbles. To do so, please make
# sure it has finite values defined for its
# .vmin and .vmax attributes.
# ''')
# make sure we have *some* normalizer defined
f = plt.matplotlib.colors.Normalize
self.color_normalization = (self.color_normalization
or f(vmin=self.vmin, vmax=self.vmax))
normalized = self.color_normalization(x)
color = self.cmap(normalized)
# finally, should we just use a default color?
else:
# get default, first from pop and then from panel
color = self.pop.color
if color is None:
color = self.plottable['color']
# return a valid input to any one of the following:
# plt.scatter(c=...)
# plt.scatter(edgecolors=...)
# plt.scatter(facecolors=...)
return color
def kw(self, key=None, **kwargs):
'''
Do a little decision-making about the plotting keyword
arguments, pulling defaults from each population where
needed.
Parameter
---------
key : str
The population for which we should pull keywords.
If None, go with the current population.
**kwargs : dict
All other keywords will be directed toward
overwriting individual population defaults.
'''
# identify the population we're working with
if key is None:
key = self.key
#else:
self.point_at(key)
# define some default keywords, which can be over-written
default = dict(s=self.get_sizes(),
marker=self.pop.marker,
linewidth=self.pop.linewidth,
alpha=self.pop.alpha,
zorder=self.pop.zorder,
label=self.pop.label)
# sort out whether faces and/or edges should get color
c=self.get_colors()
if self.pop.filled:
default['facecolors'] = c
else:
default['facecolors'] = 'none'
if self.pop.outlined:
default['edgecolors'] = c
else:
default['edgecolors'] = 'none'
# if any other keywords are provided, overwrite these defaults
for k, v in kwargs.items():
default[k] = v
return default
def plot(self, key, ax=None, labelkw={}, **kwargs):
'''
Add the points for a particular population to this panel.
Parameters
----------
key : str
The population (as an item in the self.pops dictionary) to add.
ax :
Into what ax should we place this plot?
If None, use default.
labelkw : dict
Keywords for labeling the planet names.
**kwargs : dict
Any extra keywords will be passed on to `scatter`
'''
# focus attention on that population
self.point_at(key)
# make sure we're plotting into the appropriate axes
try:
plt.sca(self.ax)
except AttributeError:
self.setup(ax=ax)
# add the scattered points
self.scattered[key] = self.ax.scatter(self.x, self.y, **self.kw(key,**kwargs))
# set the scales, limits, labels
self.finish_plot(labelkw=labelkw)
| en | 0.739136 | BubblePanel is a general wrapper for making scatter plots where planets are represented as bubbles that can have informative sizes and/or colors. Initialize a plotting panel. Parameters ---------- size : PlottableAxis, str, float, None What should the sizes of points be or encode? size_normalization : float If sizes depend on quantities, how should they be normalized? color : PlottableAxis, str, float, None What should the colors of points be or encode? cmap : str, cmap from plt.matplotlib.cm If the colors depend on quantities, what cmap should be used for them? vmin : float, astropy.units.quantity.Quantity If the colors depend on quantities, what should the bottom of the cmap be? vmax : float, astropy.units.quantity.Quantity If the colors depend on quantities, what should the top of the cmap be? color_normalization : matplotlib.colors.Normalize If color depend on quantities, how should the values be normalized. If color_normalization is defined, any values provided here for vmin and vmax will be ignored. **kw : dict Other keywords will be passed on to *all* Panel/Plottable initializations (which may include x, y, size, and color). If you need more fine-grained control over which axis gets which keyword, consider initializing those panels one-by-one. # initialize the basics of the panel with the plottable axes # set up how we should scale the sizes of points # try to make a variable size axis # otherwise, use a single size for all points #self.plottable['x'].panel = self #self.plottable['y'].panel = self # make sure a size normalization has been defined # set up how we should set the colors of points # try to make a variable color axis # otherwise, use a single color for all points # if an actual cmap was provided, use it # otherwise, treat the cmap as a string key # make sure the color map limits are set # if a custom normalization is used, reset vmin + vmax # pull the normalization's min/max for information # apply (x,y) axis labels, scales, limits appropriately #DEBUG The sizes of the bubbles. Returns ------- s : an input for plt.scatter Either a single scalar, or an array with variable sizes for each bubble according to some quantity. # should we ignore any variable size instructions? # if desired, set variable sizes # get the raw values for the sizes # calculate the normalized size # otherwise, set a single size # get default, first from pop and then from panel # return a valid input to plt.scatter(s=...) The colors of the bubbles. Returns ------- c : an input for plt.scatter Either a single color, or an array with variable colors for each bubble according to some quantity. # should we ignore any variable color instructions? # should we use a variable color? # get the raw values to go into the color # FIXME - make sure to check vmin/vmax are valid #if (self.vmin is None) or (self.vmax is None): # raise AtlasError(f''' # It looks like you're trying to use # {self.plottable['color']} to set variable # colors for bubbles. To do so, please make # sure it has finite values defined for its # .vmin and .vmax attributes. # ''') # make sure we have *some* normalizer defined # finally, should we just use a default color? # get default, first from pop and then from panel # return a valid input to any one of the following: # plt.scatter(c=...) # plt.scatter(edgecolors=...) # plt.scatter(facecolors=...) Do a little decision-making about the plotting keyword arguments, pulling defaults from each population where needed. Parameter --------- key : str The population for which we should pull keywords. If None, go with the current population. **kwargs : dict All other keywords will be directed toward overwriting individual population defaults. # identify the population we're working with #else: # define some default keywords, which can be over-written # sort out whether faces and/or edges should get color # if any other keywords are provided, overwrite these defaults Add the points for a particular population to this panel. Parameters ---------- key : str The population (as an item in the self.pops dictionary) to add. ax : Into what ax should we place this plot? If None, use default. labelkw : dict Keywords for labeling the planet names. **kwargs : dict Any extra keywords will be passed on to `scatter` # focus attention on that population # make sure we're plotting into the appropriate axes # add the scattered points # set the scales, limits, labels | 3.119005 | 3 |
venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py | pengwu/scapy_env | 0 | 9367 | import os
from twisted.internet.defer import succeed
class Load(object):
def register(self, sysinfo):
self._sysinfo = sysinfo
def run(self):
self._sysinfo.add_header("System load", str(os.getloadavg()[0]))
return succeed(None)
| import os
from twisted.internet.defer import succeed
class Load(object):
def register(self, sysinfo):
self._sysinfo = sysinfo
def run(self):
self._sysinfo.add_header("System load", str(os.getloadavg()[0]))
return succeed(None)
| none | 1 | 2.063761 | 2 |
|
src/boh_api/viewsets.py | dougmorato/bag-of-holding | 0 | 9368 | <filename>src/boh_api/viewsets.py
from rest_framework import viewsets
from boh import models
from . import serializers
class OrganizationViewSet(viewsets.ModelViewSet):
queryset = models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
class ApplicationViewSet(viewsets.ModelViewSet):
queryset = models.Application.objects.all()
serializer_class = serializers.ApplicationSerializer
class TagViewSet(viewsets.ModelViewSet):
queryset = models.Tag.objects.all()
serializer_class = serializers.TagSerializer
class PersonViewSet(viewsets.ModelViewSet):
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
| <filename>src/boh_api/viewsets.py
from rest_framework import viewsets
from boh import models
from . import serializers
class OrganizationViewSet(viewsets.ModelViewSet):
queryset = models.Organization.objects.all()
serializer_class = serializers.OrganizationSerializer
class ApplicationViewSet(viewsets.ModelViewSet):
queryset = models.Application.objects.all()
serializer_class = serializers.ApplicationSerializer
class TagViewSet(viewsets.ModelViewSet):
queryset = models.Tag.objects.all()
serializer_class = serializers.TagSerializer
class PersonViewSet(viewsets.ModelViewSet):
queryset = models.Person.objects.all()
serializer_class = serializers.PersonSerializer
| none | 1 | 1.870128 | 2 |
|
githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py | TatendaNoreen/Python | 0 | 9369 | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import agentframework
import csv
import matplotlib.animation
#create environment in which agents will operate
environment=[]
#read csv downloaded file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist=[] # A list of rows
environment.append(rowlist)
for value in row: # A list of value
#print(value) # Floats
rowlist.append(value)
f.close() # Don't close until you are done with the reader;
# the data is read on request.
#def distance_between(agents_row_a, agents_row_b):
# return (((agents_row_a.x - agents_row_b.x)**2) +
# ((agents_row_a.y - agents_row_b.y)**2))**0.5
num_of_agents = 10
num_of_iterations = 10
neighbourhood = 20
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# Make the agents and connecting with the environment.
agents = []
def update(frame_number):
fig.clear()
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment,agents))
# Move and eat agents with every move or iteration.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Loop through the agents in self.agents .
# Calculate the distance between self and the current other agent:
# distance = self.distance_between(agent)
# If distance is less than or equal to the neighbourhood
# Sum self.store and agent.store .
# Divide sum by two to calculate average.
# self.store = average
# agent.store = average
# End if
# End loop
# plot
matplotlib.pyplot.xlim(0, 299)
matplotlib.pyplot.ylim(0, 299)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
matplotlib.pyplot.imshow(environment)
animation = matplotlib.animation.FuncAnimation(fig, update, interval=1)
matplotlib.pyplot.show()
| import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot
import agentframework
import csv
import matplotlib.animation
#create environment in which agents will operate
environment=[]
#read csv downloaded file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist=[] # A list of rows
environment.append(rowlist)
for value in row: # A list of value
#print(value) # Floats
rowlist.append(value)
f.close() # Don't close until you are done with the reader;
# the data is read on request.
#def distance_between(agents_row_a, agents_row_b):
# return (((agents_row_a.x - agents_row_b.x)**2) +
# ((agents_row_a.y - agents_row_b.y)**2))**0.5
num_of_agents = 10
num_of_iterations = 10
neighbourhood = 20
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# Make the agents and connecting with the environment.
agents = []
def update(frame_number):
fig.clear()
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment,agents))
# Move and eat agents with every move or iteration.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Loop through the agents in self.agents .
# Calculate the distance between self and the current other agent:
# distance = self.distance_between(agent)
# If distance is less than or equal to the neighbourhood
# Sum self.store and agent.store .
# Divide sum by two to calculate average.
# self.store = average
# agent.store = average
# End if
# End loop
# plot
matplotlib.pyplot.xlim(0, 299)
matplotlib.pyplot.ylim(0, 299)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
matplotlib.pyplot.imshow(environment)
animation = matplotlib.animation.FuncAnimation(fig, update, interval=1)
matplotlib.pyplot.show()
| en | 0.864358 | #create environment in which agents will operate #read csv downloaded file # A list of rows # A list of value #print(value) # Floats # Don't close until you are done with the reader; # the data is read on request. #def distance_between(agents_row_a, agents_row_b): # return (((agents_row_a.x - agents_row_b.x)**2) + # ((agents_row_a.y - agents_row_b.y)**2))**0.5 # Make the agents and connecting with the environment. # Move and eat agents with every move or iteration. # Loop through the agents in self.agents . # Calculate the distance between self and the current other agent: # distance = self.distance_between(agent) # If distance is less than or equal to the neighbourhood # Sum self.store and agent.store . # Divide sum by two to calculate average. # self.store = average # agent.store = average # End if # End loop # plot | 3.709414 | 4 |
external/model-preparation-algorithm/tests/conftest.py | opencv/openvino_training_extensions | 775 | 9370 | <reponame>opencv/openvino_training_extensions<filename>external/model-preparation-algorithm/tests/conftest.py
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
try:
import e2e.fixtures
from e2e.conftest_utils import * # noqa
from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa
from e2e import config # noqa
from e2e.utils import get_plugins_from_packages
pytest_plugins = get_plugins_from_packages([e2e])
except ImportError:
_e2e_pytest_addoption = None
pass
import config
import pytest
from ote_sdk.test_suite.pytest_insertions import *
from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT
pytest_plugins = get_pytest_plugins_from_ote()
ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')
@pytest.fixture
def ote_test_domain_fx():
return 'model-preparation-algorithm'
@pytest.fixture
def ote_test_scenario_fx(current_test_parameters_fx):
assert isinstance(current_test_parameters_fx, dict)
if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT:
return 'performance'
else:
return 'integration'
@pytest.fixture(scope='session')
def ote_templates_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/configs/'
logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}')
return root
@pytest.fixture(scope='session')
def ote_reference_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/tests/reference/'
logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}')
return root
# pytest magic
def pytest_generate_tests(metafunc):
ote_pytest_generate_tests_insertion(metafunc)
def pytest_addoption(parser):
ote_pytest_addoption_insertion(parser)
| # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
try:
import e2e.fixtures
from e2e.conftest_utils import * # noqa
from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa
from e2e import config # noqa
from e2e.utils import get_plugins_from_packages
pytest_plugins = get_plugins_from_packages([e2e])
except ImportError:
_e2e_pytest_addoption = None
pass
import config
import pytest
from ote_sdk.test_suite.pytest_insertions import *
from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT
pytest_plugins = get_pytest_plugins_from_ote()
ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm')
@pytest.fixture
def ote_test_domain_fx():
return 'model-preparation-algorithm'
@pytest.fixture
def ote_test_scenario_fx(current_test_parameters_fx):
assert isinstance(current_test_parameters_fx, dict)
if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT:
return 'performance'
else:
return 'integration'
@pytest.fixture(scope='session')
def ote_templates_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/configs/'
logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}')
return root
@pytest.fixture(scope='session')
def ote_reference_root_dir_fx():
import os.path as osp
import logging
logger = logging.getLogger(__name__)
root = osp.dirname(osp.dirname(osp.realpath(__file__)))
root = f'{root}/tests/reference/'
logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}')
return root
# pytest magic
def pytest_generate_tests(metafunc):
ote_pytest_generate_tests_insertion(metafunc)
def pytest_addoption(parser):
ote_pytest_addoption_insertion(parser) | en | 0.321773 | # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # noqa # noqa # noqa # pytest magic | 1.79058 | 2 |
ibis/udf/validate.py | rtpsw/ibis | 986 | 9371 | """Validation for UDFs.
Warning: This is an experimental module and API here can change without notice.
DO NOT USE DIRECTLY.
"""
from inspect import Parameter, Signature, signature
from typing import Any, Callable, List
import ibis.common.exceptions as com
from ibis.expr.datatypes import DataType
def _parameter_count(funcsig: Signature) -> int:
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
)
def validate_input_type(
input_type: List[DataType], func: Callable
) -> Signature:
"""Check that the declared number of inputs (the length of `input_type`)
and the number of inputs to `func` are equal.
If the signature of `func` uses *args, then no check is done (since no
check can be done).
Parameters
----------
input_type : List[DataType]
func : callable
Returns
-------
inspect.Signature
"""
funcsig = signature(func)
params = funcsig.parameters.values()
# We can only do validation if all the positional arguments are explicit
# (i.e. no *args)
if not any(param.kind is Parameter.VAR_POSITIONAL for param in params):
declared_parameter_count = len(input_type)
function_parameter_count = _parameter_count(funcsig)
if declared_parameter_count != function_parameter_count:
raise TypeError(
'Function signature {!r} has {:d} parameters, '
'input_type has {:d}. These must match. Non-column '
'parameters must be defined as keyword only, i.e., '
'def foo(col, *, function_param).'.format(
func.__name__,
function_parameter_count,
declared_parameter_count,
)
)
return funcsig
def validate_output_type(output_type: Any) -> None:
"""Check that the output type is a single datatype."""
if isinstance(output_type, list):
raise com.IbisTypeError(
'The output type of a UDF must be a single datatype.'
)
| """Validation for UDFs.
Warning: This is an experimental module and API here can change without notice.
DO NOT USE DIRECTLY.
"""
from inspect import Parameter, Signature, signature
from typing import Any, Callable, List
import ibis.common.exceptions as com
from ibis.expr.datatypes import DataType
def _parameter_count(funcsig: Signature) -> int:
"""Get the number of positional-or-keyword or position-only parameters in a
function signature.
Parameters
----------
funcsig : inspect.Signature
A UDF signature
Returns
-------
int
The number of parameters
"""
return sum(
param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY}
for param in funcsig.parameters.values()
if param.default is Parameter.empty
)
def validate_input_type(
input_type: List[DataType], func: Callable
) -> Signature:
"""Check that the declared number of inputs (the length of `input_type`)
and the number of inputs to `func` are equal.
If the signature of `func` uses *args, then no check is done (since no
check can be done).
Parameters
----------
input_type : List[DataType]
func : callable
Returns
-------
inspect.Signature
"""
funcsig = signature(func)
params = funcsig.parameters.values()
# We can only do validation if all the positional arguments are explicit
# (i.e. no *args)
if not any(param.kind is Parameter.VAR_POSITIONAL for param in params):
declared_parameter_count = len(input_type)
function_parameter_count = _parameter_count(funcsig)
if declared_parameter_count != function_parameter_count:
raise TypeError(
'Function signature {!r} has {:d} parameters, '
'input_type has {:d}. These must match. Non-column '
'parameters must be defined as keyword only, i.e., '
'def foo(col, *, function_param).'.format(
func.__name__,
function_parameter_count,
declared_parameter_count,
)
)
return funcsig
def validate_output_type(output_type: Any) -> None:
"""Check that the output type is a single datatype."""
if isinstance(output_type, list):
raise com.IbisTypeError(
'The output type of a UDF must be a single datatype.'
)
| en | 0.463312 | Validation for UDFs. Warning: This is an experimental module and API here can change without notice. DO NOT USE DIRECTLY. Get the number of positional-or-keyword or position-only parameters in a function signature. Parameters ---------- funcsig : inspect.Signature A UDF signature Returns ------- int The number of parameters Check that the declared number of inputs (the length of `input_type`) and the number of inputs to `func` are equal. If the signature of `func` uses *args, then no check is done (since no check can be done). Parameters ---------- input_type : List[DataType] func : callable Returns ------- inspect.Signature # We can only do validation if all the positional arguments are explicit # (i.e. no *args) Check that the output type is a single datatype. | 3.176635 | 3 |
packages/stattik/stattik/schema/schema.py | stattikcms/stattik | 1 | 9372 | import inspect
from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType
from .resolver import *
#
# Schema
#
class GrammarError(Exception):
pass
keywords = ['query', 'mutation', 'subscription', 'source']
class SchemaMetaDict(dict):
'''
Dictionary that allows decorated schema entry functions to be overloaded
'''
def __setitem__(self, key, value):
if key in self and callable(value) and hasattr(value, 'name'):
value.next_func = self[key]
if not hasattr(value.next_func, 'name'):
raise GrammarError(f'Redefinition of {key}. Perhaps an earlier {key} is missing @_')
super().__setitem__(key, value)
def __getitem__(self, key):
#if key not in self and key.isupper() and key[:1] != '_':
if key not in self and key.isupper() and not key[:1] in keywords:
return key.upper()
else:
return super().__getitem__(key)
def _query_decorator(name):
def decorate(func):
func.tag = 'query'
func.name = name
return func
return decorate
def _mutation_decorator(name):
def decorate(func):
func.tag = 'mutation'
func.name = name
return func
return decorate
def _subscription_decorator(name):
def decorate(func):
func.tag = 'subscription'
func.name = name
return func
return decorate
def _source_decorator(name):
def decorate(func):
func.tag = 'source'
func.name = name
return func
return decorate
class SchemaMeta(type):
@classmethod
def __prepare__(meta, *args, **kwargs):
d = SchemaMetaDict()
d['query'] = _query_decorator
d['mutation'] = _mutation_decorator
d['subscription'] = _subscription_decorator
d['source'] = _source_decorator
return d
def __new__(meta, selfname, bases, attributes):
#del attributes['_']
for key in keywords:
del attributes[key]
self = super().__new__(meta, selfname, bases, attributes)
self._build(list(attributes.items()))
return self
class Schema(metaclass=SchemaMeta):
def __init__(self, parent=None):
self.parent = parent
self.children = []
if parent:
parent.add_child(self)
self.db = parent.db
else:
self.db = self
self.entries = self.__class__.entries
@classmethod
def produce(self, parent=None):
schema = self(parent)
return schema
def add_child(self, schema):
self.children.append(schema)
def get_gql(self):
gql = [inspect.getdoc(self)]
for child in self.children:
gql.append(child.get_gql())
return "\n".join(gql)
def register(self):
for entry in self.entries:
entry.register(self)
for child in self.children:
child.register()
def add(self, r):
self.entries.append(r)
@classmethod
def __collect_functions(self, definitions):
'''
Collect all of the tagged grammar entries
'''
entries = [ (name, value) for name, value in definitions
if callable(value) and hasattr(value, 'name') ]
return entries
@classmethod
def _build(self, definitions):
if vars(self).get('_build', False):
return
# Collect all of the entry functions from the class definition
functions = self.__collect_functions(definitions)
self.entries = self.__build_entries(functions)
@classmethod
def __build_entries(self, functions):
entries = []
errors = ''
for name, func in functions:
entry = self._build_entry(func)
entries.append(entry)
return entries
@classmethod
def _build_entry(self, func):
tag = func.tag
name = func.name
prodname = func.__name__
unwrapped = inspect.unwrap(func)
filename = unwrapped.__code__.co_filename
lineno = unwrapped.__code__.co_firstlineno
logger.debug(f"_build_entry:tag: {tag}")
logger.debug(f"_build_entry:name: {name}")
logger.debug(f"_build_entry:prodname: {prodname}")
logger.debug(f"_build_entry:unwrapped: {unwrapped}")
#entry = Resolver(name, func, prodname=prodname, filename=filename, lineno=lineno)
entry = entry_factories[tag](self, name, func, prodname=prodname, filename=filename, lineno=lineno)
logger.debug(f"_build_entry:entry: {entry}")
return entry
# This is for testing or in case you don't want a database as the root schema
class RootSchema(Schema):
"""
type Query {
dummy: Int!
}
type Mutation {
setDummy(val: Int!): Int
}
type Subscription {
dummy: Int
}
"""
instance = None
def __init__(self, parent=None):
super().__init__(parent)
Schema.instance = self
self.query_type = QueryType()
self.mutation_type = MutationType()
self.subscription_type = SubscriptionType()
@classmethod
def produce(self):
if self.instance:
return self.instance
self.instance = schema = self()
return schema
def make_executable(self):
self.register()
#return make_executable_schema(type_defs, self.query)
return make_executable_schema(
self.get_gql(),
self.query_type,
self.mutation_type,
self.subscription_type
) | import inspect
from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType
from .resolver import *
#
# Schema
#
class GrammarError(Exception):
pass
keywords = ['query', 'mutation', 'subscription', 'source']
class SchemaMetaDict(dict):
'''
Dictionary that allows decorated schema entry functions to be overloaded
'''
def __setitem__(self, key, value):
if key in self and callable(value) and hasattr(value, 'name'):
value.next_func = self[key]
if not hasattr(value.next_func, 'name'):
raise GrammarError(f'Redefinition of {key}. Perhaps an earlier {key} is missing @_')
super().__setitem__(key, value)
def __getitem__(self, key):
#if key not in self and key.isupper() and key[:1] != '_':
if key not in self and key.isupper() and not key[:1] in keywords:
return key.upper()
else:
return super().__getitem__(key)
def _query_decorator(name):
def decorate(func):
func.tag = 'query'
func.name = name
return func
return decorate
def _mutation_decorator(name):
def decorate(func):
func.tag = 'mutation'
func.name = name
return func
return decorate
def _subscription_decorator(name):
def decorate(func):
func.tag = 'subscription'
func.name = name
return func
return decorate
def _source_decorator(name):
def decorate(func):
func.tag = 'source'
func.name = name
return func
return decorate
class SchemaMeta(type):
@classmethod
def __prepare__(meta, *args, **kwargs):
d = SchemaMetaDict()
d['query'] = _query_decorator
d['mutation'] = _mutation_decorator
d['subscription'] = _subscription_decorator
d['source'] = _source_decorator
return d
def __new__(meta, selfname, bases, attributes):
#del attributes['_']
for key in keywords:
del attributes[key]
self = super().__new__(meta, selfname, bases, attributes)
self._build(list(attributes.items()))
return self
class Schema(metaclass=SchemaMeta):
def __init__(self, parent=None):
self.parent = parent
self.children = []
if parent:
parent.add_child(self)
self.db = parent.db
else:
self.db = self
self.entries = self.__class__.entries
@classmethod
def produce(self, parent=None):
schema = self(parent)
return schema
def add_child(self, schema):
self.children.append(schema)
def get_gql(self):
gql = [inspect.getdoc(self)]
for child in self.children:
gql.append(child.get_gql())
return "\n".join(gql)
def register(self):
for entry in self.entries:
entry.register(self)
for child in self.children:
child.register()
def add(self, r):
self.entries.append(r)
@classmethod
def __collect_functions(self, definitions):
'''
Collect all of the tagged grammar entries
'''
entries = [ (name, value) for name, value in definitions
if callable(value) and hasattr(value, 'name') ]
return entries
@classmethod
def _build(self, definitions):
if vars(self).get('_build', False):
return
# Collect all of the entry functions from the class definition
functions = self.__collect_functions(definitions)
self.entries = self.__build_entries(functions)
@classmethod
def __build_entries(self, functions):
entries = []
errors = ''
for name, func in functions:
entry = self._build_entry(func)
entries.append(entry)
return entries
@classmethod
def _build_entry(self, func):
tag = func.tag
name = func.name
prodname = func.__name__
unwrapped = inspect.unwrap(func)
filename = unwrapped.__code__.co_filename
lineno = unwrapped.__code__.co_firstlineno
logger.debug(f"_build_entry:tag: {tag}")
logger.debug(f"_build_entry:name: {name}")
logger.debug(f"_build_entry:prodname: {prodname}")
logger.debug(f"_build_entry:unwrapped: {unwrapped}")
#entry = Resolver(name, func, prodname=prodname, filename=filename, lineno=lineno)
entry = entry_factories[tag](self, name, func, prodname=prodname, filename=filename, lineno=lineno)
logger.debug(f"_build_entry:entry: {entry}")
return entry
# This is for testing or in case you don't want a database as the root schema
class RootSchema(Schema):
"""
type Query {
dummy: Int!
}
type Mutation {
setDummy(val: Int!): Int
}
type Subscription {
dummy: Int
}
"""
instance = None
def __init__(self, parent=None):
super().__init__(parent)
Schema.instance = self
self.query_type = QueryType()
self.mutation_type = MutationType()
self.subscription_type = SubscriptionType()
@classmethod
def produce(self):
if self.instance:
return self.instance
self.instance = schema = self()
return schema
def make_executable(self):
self.register()
#return make_executable_schema(type_defs, self.query)
return make_executable_schema(
self.get_gql(),
self.query_type,
self.mutation_type,
self.subscription_type
) | en | 0.671376 | # # Schema # Dictionary that allows decorated schema entry functions to be overloaded #if key not in self and key.isupper() and key[:1] != '_': #del attributes['_'] Collect all of the tagged grammar entries # Collect all of the entry functions from the class definition #entry = Resolver(name, func, prodname=prodname, filename=filename, lineno=lineno) # This is for testing or in case you don't want a database as the root schema type Query { dummy: Int! } type Mutation { setDummy(val: Int!): Int } type Subscription { dummy: Int } #return make_executable_schema(type_defs, self.query) | 2.320187 | 2 |
toontown/battle/DistributedBattleBaseAI.py | DankMickey/Project-Altis-Educational-Source | 1 | 9373 | import random
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.battle.BattleBase import *
from toontown.battle.BattleCalculatorAI import *
from toontown.toonbase.ToontownBattleGlobals import *
from toontown.battle.SuitBattleGlobals import *
from pandac.PandaModules import *
from toontown.battle import BattleExperienceAI
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from toontown.ai import DatabaseObject
from toontown.toon import DistributedToonAI
from toontown.toon import InventoryBase
from toontown.toonbase import ToontownGlobals
from toontown.toon import NPCToons
from otp.ai.MagicWordGlobal import *
from toontown.pets import DistributedPetProxyAI
class DistributedBattleBaseAI(DistributedObjectAI.DistributedObjectAI, BattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBaseAI')
def __init__(self, air, zoneId, finishCallback = None, maxSuits = 4, bossBattle = 0, tutorialFlag = 0, interactivePropTrackBonus = -1):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.serialNum = 0
self.zoneId = zoneId
self.maxSuits = maxSuits
self.setBossBattle(bossBattle)
self.tutorialFlag = tutorialFlag
self.interactivePropTrackBonus = interactivePropTrackBonus
self.finishCallback = finishCallback
self.avatarExitEvents = []
self.responses = {}
self.adjustingResponses = {}
self.joinResponses = {}
self.adjustingSuits = []
self.adjustingToons = []
self.numSuitsEver = 0
BattleBase.__init__(self)
self.streetBattle = 1
self.pos = Point3(0, 0, 0)
self.initialSuitPos = Point3(0, 0, 0)
self.toonExp = {}
self.toonOrigQuests = {}
self.toonItems = {}
self.toonOrigMerits = {}
self.toonMerits = {}
self.toonParts = {}
self.battleCalc = BattleCalculatorAI(self, tutorialFlag)
if self.air.suitInvasionManager.getInvading():
mult = getInvasionMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
if self.air.holidayManager.isMoreXpHolidayRunning():
mult = getMoreXpHolidayMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
self.fsm = None
self.clearAttacks()
self.ignoreFaceOffDone = 0
self.needAdjust = 0
self.movieHasBeenMade = 0
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
self.movieRequested = 0
self.ignoreResponses = 0
self.ignoreAdjustingResponses = 0
self.taskNames = []
self.exitedToons = []
self.suitsKilled = []
self.suitsKilledThisBattle = []
self.suitsKilledPerFloor = []
self.suitsEncountered = []
self.newToons = []
self.newSuits = []
self.numNPCAttacks = 0
self.npcAttacks = {}
self.pets = {}
self.fireCount = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedBattleAI', [State.State('FaceOff', self.enterFaceOff, self.exitFaceOff, ['WaitForInput', 'Resume']),
State.State('WaitForJoin', self.enterWaitForJoin, self.exitWaitForJoin, ['WaitForInput', 'Resume']),
State.State('WaitForInput', self.enterWaitForInput, self.exitWaitForInput, ['MakeMovie', 'Resume']),
State.State('MakeMovie', self.enterMakeMovie, self.exitMakeMovie, ['PlayMovie', 'Resume']),
State.State('PlayMovie', self.enterPlayMovie, self.exitPlayMovie, ['WaitForJoin', 'Reward', 'Resume']),
State.State('Reward', self.enterReward, self.exitReward, ['Resume']),
State.State('Resume', self.enterResume, self.exitResume, []),
State.State('Off', self.enterOff, self.exitOff, ['FaceOff', 'WaitForJoin'])], 'Off', 'Off')
self.joinableFsm = ClassicFSM.ClassicFSM('Joinable', [State.State('Joinable', self.enterJoinable, self.exitJoinable, ['Unjoinable']), State.State('Unjoinable', self.enterUnjoinable, self.exitUnjoinable, ['Joinable'])], 'Unjoinable', 'Unjoinable')
self.joinableFsm.enterInitialState()
self.runableFsm = ClassicFSM.ClassicFSM('Runable', [State.State('Runable', self.enterRunable, self.exitRunable, ['Unrunable']), State.State('Unrunable', self.enterUnrunable, self.exitUnrunable, ['Runable'])], 'Unrunable', 'Unrunable')
self.runableFsm.enterInitialState()
self.adjustFsm = ClassicFSM.ClassicFSM('Adjust', [State.State('Adjusting', self.enterAdjusting, self.exitAdjusting, ['NotAdjusting', 'Adjusting']), State.State('NotAdjusting', self.enterNotAdjusting, self.exitNotAdjusting, ['Adjusting'])], 'NotAdjusting', 'NotAdjusting')
self.adjustFsm.enterInitialState()
self.fsm.enterInitialState()
self.startTime = globalClock.getRealTime()
self.adjustingTimer = Timer()
def clearAttacks(self):
self.toonAttacks = {}
self.suitAttacks = getDefaultSuitAttacks()
def requestDelete(self):
if hasattr(self, 'fsm'):
self.fsm.request('Off')
self.__removeTaskName(self.uniqueName('make-movie'))
DistributedObjectAI.DistributedObjectAI.requestDelete(self)
def delete(self):
self.notify.debug('deleting battle')
self.fsm.request('Off')
self.ignoreAll()
self.__removeAllTasks()
del self.fsm
del self.joinableFsm
del self.runableFsm
del self.adjustFsm
self.__cleanupJoinResponses()
self.timer.stop()
del self.timer
self.adjustingTimer.stop()
del self.adjustingTimer
self.battleCalc.cleanup()
del self.battleCalc
for suit in self.suits:
del suit.battleTrap
del self.finishCallback
for petProxy in self.pets.values():
petProxy.requestDelete()
DistributedObjectAI.DistributedObjectAI.delete(self)
def pause(self):
self.timer.stop()
self.adjustingTimer.stop()
def unpause(self):
self.timer.resume()
self.adjustingTimer.resume()
def abortBattle(self):
self.notify.debug('%s.abortBattle() called.' % self.doId)
toonsCopy = self.toons[:]
for toonId in toonsCopy:
self.__removeToon(toonId)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(toonId)
self.d_setMembers()
self.b_setState('Resume')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def findSuit(self, id):
for s in self.suits:
if s.doId == id:
return s
return None
def __removeTaskName(self, name):
if self.taskNames.count(name):
self.taskNames.remove(name)
self.notify.debug('removeTaskName() - %s' % name)
taskMgr.remove(name)
def __removeAllTasks(self):
for n in self.taskNames:
self.notify.debug('removeAllTasks() - %s' % n)
taskMgr.remove(n)
self.taskNames = []
def __removeToonTasks(self, toonId):
name = self.taskName('running-toon-%d' % toonId)
self.__removeTaskName(name)
name = self.taskName('to-pending-av-%d' % toonId)
self.__removeTaskName(name)
def getLevelDoId(self):
return 0
def getBattleCellId(self):
return 0
def getPosition(self):
self.notify.debug('getPosition() - %s' % self.pos)
return [self.pos[0], self.pos[1], self.pos[2]]
def getInitialSuitPos(self):
p = []
p.append(self.initialSuitPos[0])
p.append(self.initialSuitPos[1])
p.append(self.initialSuitPos[2])
return p
def setBossBattle(self, bossBattle):
self.bossBattle = bossBattle
def getBossBattle(self):
return self.bossBattle
def b_setState(self, state):
self.notify.debug('network:setState(%s)' % state)
stime = globalClock.getRealTime() + SERVER_BUFFER_TIME
self.sendUpdate('setState', [state, globalClockDelta.localToNetworkTime(stime)])
self.setState(state)
def setState(self, state):
self.fsm.request(state)
def getState(self):
return [self.fsm.getCurrentState().getName(), globalClockDelta.getRealNetworkTime()]
def d_setMembers(self):
self.notify.debug('network:setMembers()')
self.sendUpdate('setMembers', self.getMembers())
def getMembers(self):
suits = []
for s in self.suits:
suits.append(s.doId)
joiningSuits = ''
for s in self.joiningSuits:
joiningSuits += str(suits.index(s.doId))
pendingSuits = ''
for s in self.pendingSuits:
pendingSuits += str(suits.index(s.doId))
activeSuits = ''
for s in self.activeSuits:
activeSuits += str(suits.index(s.doId))
luredSuits = ''
for s in self.luredSuits:
luredSuits += str(suits.index(s.doId))
suitTraps = ''
for s in self.suits:
if s.battleTrap == NO_TRAP:
suitTraps += '9'
elif s.battleTrap == BattleCalculatorAI.TRAP_CONFLICT:
suitTraps += '9'
else:
suitTraps += str(s.battleTrap)
toons = []
for t in self.toons:
toons.append(t)
joiningToons = ''
for t in self.joiningToons:
joiningToons += str(toons.index(t))
pendingToons = ''
for t in self.pendingToons:
pendingToons += str(toons.index(t))
activeToons = ''
for t in self.activeToons:
activeToons += str(toons.index(t))
runningToons = ''
for t in self.runningToons:
runningToons += str(toons.index(t))
self.notify.debug('getMembers() - suits: %s joiningSuits: %s pendingSuits: %s activeSuits: %s luredSuits: %s suitTraps: %s toons: %s joiningToons: %s pendingToons: %s activeToons: %s runningToons: %s' % (suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons))
return [suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons,
globalClockDelta.getRealNetworkTime()]
def d_adjust(self):
self.notify.debug('network:adjust()')
self.sendUpdate('adjust', [globalClockDelta.getRealNetworkTime()])
def getInteractivePropTrackBonus(self):
return self.interactivePropTrackBonus
def getZoneId(self):
return self.zoneId
def getTaskZoneId(self):
return self.zoneId
def d_setMovie(self):
self.notify.debug('network:setMovie()')
self.sendUpdate('setMovie', self.getMovie())
self.__updateEncounteredCogs()
def getMovie(self):
suitIds = []
for s in self.activeSuits:
suitIds.append(s.doId)
p = [self.movieHasBeenMade]
p.append(self.activeToons)
p.append(suitIds)
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
index = -1
id = ta[TOON_ID_COL]
if id != -1:
index = self.activeToons.index(id)
track = ta[TOON_TRACK_COL]
if (track == NO_ATTACK or attackAffectsGroup(track, ta[TOON_LVL_COL])) and track != NPCSOS and track != PETSOS:
target = -1
if track == HEAL:
if ta[TOON_LVL_COL] == 1:
ta[TOON_HPBONUS_COL] = random.randint(0, 10000)
elif track == SOS or track == NPCSOS or track == PETSOS:
target = ta[TOON_TGT_COL]
elif track == HEAL:
if self.activeToons.count(ta[TOON_TGT_COL]) != 0:
target = self.activeToons.index(ta[TOON_TGT_COL])
else:
target = -1
elif suitIds.count(ta[TOON_TGT_COL]) != 0:
target = suitIds.index(ta[TOON_TGT_COL])
else:
target = -1
p = p + [index,
track,
ta[TOON_LVL_COL],
target]
p = p + ta[4:]
else:
index = self.activeToons.index(t)
attack = getToonAttack(index)
p = p + attack
for i in range(4 - len(self.activeToons)):
p = p + getToonAttack(-1)
for sa in self.suitAttacks:
index = -1
id = sa[SUIT_ID_COL]
if id != -1:
index = suitIds.index(id)
if sa[SUIT_ATK_COL] == -1:
targetIndex = -1
else:
targetIndex = sa[SUIT_TGT_COL]
if targetIndex == -1:
self.notify.debug('suit attack: %d must be group' % sa[SUIT_ATK_COL])
else:
toonId = self.activeToons[targetIndex]
p = p + [index, sa[SUIT_ATK_COL], targetIndex]
sa[SUIT_TAUNT_COL] = 0
if sa[SUIT_ATK_COL] != -1:
suit = self.findSuit(id)
sa[SUIT_TAUNT_COL] = getAttackTauntIndexFromIndex(suit, sa[SUIT_ATK_COL])
p = p + sa[3:]
return p
def d_setChosenToonAttacks(self):
self.notify.debug('network:setChosenToonAttacks()')
self.sendUpdate('setChosenToonAttacks', self.getChosenToonAttacks())
def getChosenToonAttacks(self):
ids = []
tracks = []
levels = []
targets = []
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
else:
ta = getToonAttack(t)
ids.append(t)
tracks.append(ta[TOON_TRACK_COL])
levels.append(ta[TOON_LVL_COL])
targets.append(ta[TOON_TGT_COL])
return [ids,
tracks,
levels,
targets]
def d_setBattleExperience(self):
self.notify.debug('network:setBattleExperience()')
self.sendUpdate('setBattleExperience', self.getBattleExperience())
def getBattleExperience(self):
returnValue = BattleExperienceAI.getBattleExperience(4, self.activeToons, self.toonExp, self.battleCalc.toonSkillPtsGained, self.toonOrigQuests, self.toonItems, self.toonOrigMerits, self.toonMerits, self.toonParts, self.suitsKilled, self.helpfulToons)
return returnValue
def getToonUberStatus(self):
fieldList = []
uberIndex = LAST_REGULAR_GAG_LEVEL + 1
for toon in self.activeToons:
toonList = []
for trackIndex in range(MAX_TRACK_INDEX):
toonList.append(toon.inventory.numItem(track, uberIndex))
fieldList.append(encodeUber(toonList))
return fieldList
def addSuit(self, suit):
self.notify.debug('addSuit(%d)' % suit.doId)
self.newSuits.append(suit)
self.suits.append(suit)
suit.battleTrap = NO_TRAP
self.numSuitsEver += 1
def __joinSuit(self, suit):
self.joiningSuits.append(suit)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % suit.doId)
self.__addJoinResponse(suit.doId, taskName)
self.taskNames.append(taskName)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(suit.doId, taskName))
def __serverJoinDone(self, avId, taskName):
self.notify.debug('join for av: %d timed out on server' % avId)
self.__removeTaskName(taskName)
self.__makeAvPending(avId)
return Task.done
def __makeAvPending(self, avId):
self.notify.debug('__makeAvPending(%d)' % avId)
self.__removeJoinResponse(avId)
self.__removeTaskName(self.taskName('to-pending-av-%d' % avId))
if self.toons.count(avId) > 0:
self.joiningToons.remove(avId)
self.pendingToons.append(avId)
else:
suit = self.findSuit(avId)
if suit != None:
if not suit.isEmpty():
if not self.joiningSuits.count(suit) == 1:
self.notify.warning('__makeAvPending(%d) in zone: %d' % (avId, self.zoneId))
self.notify.warning('toons: %s' % self.toons)
self.notify.warning('joining toons: %s' % self.joiningToons)
self.notify.warning('pending toons: %s' % self.pendingToons)
self.notify.warning('suits: %s' % self.suits)
self.notify.warning('joining suits: %s' % self.joiningSuits)
self.notify.warning('pending suits: %s' % self.pendingSuits)
self.joiningSuits.remove(suit)
self.pendingSuits.append(suit)
else:
self.notify.warning('makeAvPending() %d not in toons or suits' % avId)
return
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def suitRequestJoin(self, suit):
self.notify.debug('suitRequestJoin(%d)' % suit.getDoId())
if self.suitCanJoin():
self.addSuit(suit)
self.__joinSuit(suit)
self.d_setMembers()
suit.prepareToJoinBattle()
return 1
else:
self.notify.warning('suitRequestJoin() - not joinable - joinable state: %s max suits: %d' % (self.joinableFsm.getCurrentState().getName(), self.maxSuits))
return 0
def addToon(self, avId):
self.notify.debug('addToon(%d)' % avId)
toon = self.getToon(avId)
if toon == None:
return 0
toon.stopToonUp()
event = simbase.air.getAvatarExitEvent(avId)
self.avatarExitEvents.append(event)
self.accept(event, self.__handleUnexpectedExit, extraArgs=[avId])
event = 'inSafezone-%s' % avId
self.avatarExitEvents.append(event)
self.accept(event, self.__handleSuddenExit, extraArgs=[avId, 0])
self.newToons.append(avId)
self.toons.append(avId)
toon = simbase.air.doId2do.get(avId)
if toon:
if hasattr(self, 'doId'):
toon.b_setBattleId(self.doId)
else:
toon.b_setBattleId(-1)
messageToonAdded = 'Battle adding toon %s' % avId
messenger.send(messageToonAdded, [avId])
if self.fsm != None and self.fsm.getCurrentState().getName() == 'PlayMovie':
self.responses[avId] = 1
else:
self.responses[avId] = 0
self.adjustingResponses[avId] = 0
if avId not in self.toonExp:
p = []
for t in Tracks:
p.append(toon.experience.getExp(t))
self.toonExp[avId] = p
if avId not in self.toonOrigMerits:
self.toonOrigMerits[avId] = toon.cogMerits[:]
if avId not in self.toonMerits:
self.toonMerits[avId] = [0,
0,
0,
0,
0]
if avId not in self.toonOrigQuests:
flattenedQuests = []
for quest in toon.quests:
flattenedQuests.extend(quest)
self.toonOrigQuests[avId] = flattenedQuests
if avId not in self.toonItems:
self.toonItems[avId] = ([], [])
return 1
def __joinToon(self, avId, pos):
self.joiningToons.append(avId)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % avId)
self.__addJoinResponse(avId, taskName, toon=1)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(avId, taskName))
self.taskNames.append(taskName)
def __updateEncounteredCogs(self):
for toon in self.activeToons:
if toon in self.newToons:
for suit in self.activeSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newToons.remove(toon)
for suit in self.activeSuits:
if suit in self.newSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newSuits.remove(suit)
def __makeToonRun(self, toonId, updateAttacks):
self.activeToons.remove(toonId)
self.toonGone = 1
self.runningToons.append(toonId)
taskName = self.taskName('running-toon-%d' % toonId)
taskMgr.doMethodLater(TOON_RUN_T, self.__serverRunDone, taskName, extraArgs=(toonId, updateAttacks, taskName))
self.taskNames.append(taskName)
def __serverRunDone(self, toonId, updateAttacks, taskName):
self.notify.debug('run for toon: %d timed out on server' % toonId)
self.__removeTaskName(taskName)
self.__removeToon(toonId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.b_setState('Resume')
else:
if updateAttacks == 1:
self.d_setChosenToonAttacks()
self.needAdjust = 1
self.__requestAdjust()
return Task.done
def __requestAdjust(self):
if not self.fsm:
return
cstate = self.fsm.getCurrentState().getName()
if cstate == 'WaitForInput' or cstate == 'WaitForJoin':
if self.adjustFsm.getCurrentState().getName() == 'NotAdjusting':
if self.needAdjust == 1:
self.d_adjust()
self.adjustingSuits = []
for s in self.pendingSuits:
self.adjustingSuits.append(s)
self.adjustingToons = []
for t in self.pendingToons:
self.adjustingToons.append(t)
self.adjustFsm.request('Adjusting')
else:
self.notify.debug('requestAdjust() - dont need to')
else:
self.notify.debug('requestAdjust() - already adjusting')
else:
self.notify.debug('requestAdjust() - in state: %s' % cstate)
def __handleUnexpectedExit(self, avId):
#TODO: fixme
#disconnectCode = self.air.getAvatarDisconnectReason(avId)
disconnectCode = "placeHolder dc code, need self.air.getAvatarDisconnectReason(avId)"
self.notify.warning('toon: %d exited unexpectedly, reason %s' % (avId, disconnectCode))
#userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow
#TODO: fixme
userAborted = False
self.__handleSuddenExit(avId, userAborted)
def __handleSuddenExit(self, avId, userAborted):
self.__removeToon(avId, userAborted=userAborted)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(avId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
self.b_setState('Resume')
else:
self.needAdjust = 1
self.__requestAdjust()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def __removeToon(self, toonId, userAborted = 0):
self.notify.debug('__removeToon(%d)' % toonId)
if self.toons.count(toonId) == 0:
return
self.battleCalc.toonLeftBattle(toonId)
self.__removeToonTasks(toonId)
self.toons.remove(toonId)
if self.joiningToons.count(toonId) == 1:
self.joiningToons.remove(toonId)
if self.pendingToons.count(toonId) == 1:
self.pendingToons.remove(toonId)
if self.activeToons.count(toonId) == 1:
activeToonIdx = self.activeToons.index(toonId)
self.notify.debug('removing activeToons[%d], updating suitAttacks SUIT_HP_COL to match' % activeToonIdx)
for i in range(len(self.suitAttacks)):
if activeToonIdx < len(self.suitAttacks[i][SUIT_HP_COL]):
del self.suitAttacks[i][SUIT_HP_COL][activeToonIdx]
else:
self.notify.warning("suitAttacks %d doesn't have an HP column for active toon index %d" % (i, activeToonIdx))
self.activeToons.remove(toonId)
if self.runningToons.count(toonId) == 1:
self.runningToons.remove(toonId)
if self.adjustingToons.count(toonId) == 1:
self.notify.warning('removeToon() - toon: %d was adjusting!' % toonId)
self.adjustingToons.remove(toonId)
self.toonGone = 1
if toonId in self.pets:
self.pets[toonId].requestDelete()
del self.pets[toonId]
self.__removeResponse(toonId)
self.__removeAdjustingResponse(toonId)
self.__removeJoinResponses(toonId)
event = simbase.air.getAvatarExitEvent(toonId)
self.avatarExitEvents.remove(event)
self.ignore(event)
event = 'inSafezone-%s' % toonId
self.avatarExitEvents.remove(event)
self.ignore(event)
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
if not userAborted:
toon = self.getToon(toonId)
if toon != None:
toon.hpOwnedByBattle = 0
toon.d_setHp(toon.hp)
toon.d_setInventory(toon.inventory.makeNetString())
self.air.cogPageManager.toonEncounteredCogs(toon, self.suitsEncountered, self.getTaskZoneId())
elif len(self.suits) > 0 and not self.streetBattle:
self.notify.info('toon %d aborted non-street battle; clearing inventory and hp.' % toonId)
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = toonId
empty = InventoryBase.InventoryBase(toon)
toon.b_setInventory(empty.makeNetString())
toon.b_setHp(0)
db = DatabaseObject.DatabaseObject(self.air, toonId)
db.storeObject(toon, ['setInventory', 'setHp'])
self.notify.info('killing mem leak from temporary DistributedToonAI %d' % toonId)
toon.deleteDummy()
def getToon(self, toonId):
if toonId in self.air.doId2do:
return self.air.doId2do[toonId]
else:
self.notify.warning('getToon() - toon: %d not in repository!' % toonId)
return
def toonRequestRun(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('ignoring response from toon: %d' % toonId)
return
self.notify.debug('toonRequestRun(%d)' % toonId)
if not self.isRunable():
self.notify.warning('toonRequestRun() - not runable')
return
updateAttacks = 0
if self.activeToons.count(toonId) == 0:
self.notify.warning('toon tried to run, but not found in activeToons: %d' % toonId)
return
for toon in self.activeToons:
if toon in self.toonAttacks:
ta = self.toonAttacks[toon]
track = ta[TOON_TRACK_COL]
level = ta[TOON_LVL_COL]
if ta[TOON_TGT_COL] == toonId or track == HEAL and attackAffectsGroup(track, level) and len(self.activeToons) <= 2:
healerId = ta[TOON_ID_COL]
self.notify.debug('resetting toon: %ds attack' % healerId)
self.toonAttacks[toon] = getToonAttack(toon, track=UN_ATTACK)
self.responses[healerId] = 0
updateAttacks = 1
self.__makeToonRun(toonId, updateAttacks)
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def toonRequestJoin(self, x, y, z):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonRequestJoin(%d)' % toonId)
self.signupToon(toonId, x, y, z)
def toonDied(self):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonDied(%d)' % toonId)
if toonId in self.toons:
toon = self.getToon(toonId)
if toon:
toon.hp = -1
toon.inventory.zeroInv(1)
self.__handleSuddenExit(toonId, 0)
def signupToon(self, toonId, x, y, z):
if self.toons.count(toonId):
return
if self.toonCanJoin():
if self.addToon(toonId):
self.__joinToon(toonId, Point3(x, y, z))
self.d_setMembers()
else:
self.notify.warning('toonRequestJoin() - not joinable')
self.d_denyLocalToonJoin(toonId)
def d_denyLocalToonJoin(self, toonId):
self.notify.debug('network: denyLocalToonJoin(%d)' % toonId)
self.sendUpdateToAvatarId(toonId, 'denyLocalToonJoin', [])
def resetResponses(self):
self.responses = {}
for t in self.toons:
self.responses[t] = 0
self.ignoreResponses = 0
def allToonsResponded(self):
for t in self.toons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allPendingActiveToonsResponded(self):
for t in self.pendingToons + self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allActiveToonsResponded(self):
for t in self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __removeResponse(self, toonId):
del self.responses[toonId]
if self.ignoreResponses == 0 and len(self.toons) > 0:
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - dont wait for movie')
self.__requestMovie()
elif currStateName == 'PlayMovie':
if self.__allPendingActiveToonsResponded():
self.notify.debug('removeResponse() - surprise movie done')
self.__movieDone()
elif currStateName == 'Reward' or currStateName == 'BuildingReward':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - surprise reward done')
self.handleRewardDone()
def __resetAdjustingResponses(self):
self.adjustingResponses = {}
for t in self.toons:
self.adjustingResponses[t] = 0
self.ignoreAdjustingResponses = 0
def __allAdjustingToonsResponded(self):
for t in self.toons:
if self.adjustingResponses[t] == 0:
return 0
self.ignoreAdjustingResponses = 1
return 1
def __removeAdjustingResponse(self, toonId):
if toonId in self.adjustingResponses:
del self.adjustingResponses[toonId]
if self.ignoreAdjustingResponses == 0 and len(self.toons) > 0:
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def __addJoinResponse(self, avId, taskName, toon = 0):
if toon == 1:
for jr in self.joinResponses.values():
jr[avId] = 0
self.joinResponses[avId] = {}
for t in self.toons:
self.joinResponses[avId][t] = 0
self.joinResponses[avId]['taskName'] = taskName
def __removeJoinResponses(self, avId):
self.__removeJoinResponse(avId)
removedOne = 0
for j in self.joinResponses.values():
if avId in j:
del j[avId]
removedOne = 1
if removedOne == 1:
for t in self.joiningToons:
if self.__allToonsRespondedJoin(t):
self.__makeAvPending(t)
def __removeJoinResponse(self, avId):
if avId in self.joinResponses:
taskMgr.remove(self.joinResponses[avId]['taskName'])
del self.joinResponses[avId]
def __allToonsRespondedJoin(self, avId):
jr = self.joinResponses[avId]
for t in self.toons:
if jr[t] == 0:
return 0
return 1
def __cleanupJoinResponses(self):
for jr in self.joinResponses.values():
taskMgr.remove(jr['taskName'])
del jr
def adjustDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreAdjustingResponses == 1:
self.notify.debug('adjustDone() - ignoring toon: %d' % toonId)
return
elif self.adjustFsm.getCurrentState().getName() != 'Adjusting':
self.notify.warning('adjustDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('adjustDone() - toon: %d not in toon list' % toonId)
return
self.adjustingResponses[toonId] += 1
self.notify.debug('toon: %d done adjusting' % toonId)
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def timeout(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('timeout() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('timeout() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('timeout() - toon: %d not in toon list' % toonId)
return
self.toonAttacks[toonId] = getToonAttack(toonId)
self.d_setChosenToonAttacks()
self.responses[toonId] += 1
self.notify.debug('toon: %d timed out' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie(timeout=1)
def movieDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('movieDone() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'PlayMovie':
self.notify.warning('movieDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('movieDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with movie' % toonId)
if self.__allPendingActiveToonsResponded():
self.__movieDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.__serverMovieDone)
def rewardDone(self):
toonId = self.air.getAvatarIdFromSender()
stateName = self.fsm.getCurrentState().getName()
if self.ignoreResponses == 1:
self.notify.debug('rewardDone() - ignoring toon: %d' % toonId)
return
elif stateName not in ('Reward', 'BuildingReward', 'FactoryReward', 'MintReward', 'StageReward', 'CountryClubReward'):
self.notify.warning('rewardDone() - in state %s' % stateName)
return
elif self.toons.count(toonId) == 0:
self.notify.warning('rewardDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with reward' % toonId)
if self.__allActiveToonsResponded():
self.handleRewardDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.serverRewardDone)
def assignRewards(self):
if self.rewardHasPlayed == 1:
self.notify.debug('handleRewardDone() - reward has already played')
return
self.rewardHasPlayed = 1
BattleExperienceAI.assignRewards(self.activeToons, self.battleCalc.toonSkillPtsGained, self.suitsKilled, self.getTaskZoneId(), self.helpfulToons)
def joinDone(self, avId):
toonId = self.air.getAvatarIdFromSender()
if self.toons.count(toonId) == 0:
self.notify.warning('joinDone() - toon: %d not in toon list' % toonId)
return
if avId not in self.joinResponses:
self.notify.debug('joinDone() - no entry for: %d - ignoring: %d' % (avId, toonId))
return
jr = self.joinResponses[avId]
if toonId in jr:
jr[toonId] += 1
self.notify.debug('client with localToon: %d done joining av: %d' % (toonId, avId))
if self.__allToonsRespondedJoin(avId):
self.__makeAvPending(avId)
def requestAttack(self, track, level, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestAttack() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestAttack() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestAttack() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestAttack(%d, %d, %d, %d)' % (toonId,
track,
level,
av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestAttack() - no toon: %d' % toonId)
return
validResponse = 1
if track == SOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('friendSOS', toonId, '%s' % av)
self.toonAttacks[toonId] = getToonAttack(toonId, track=SOS, target=av)
elif track == NPCSOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('NPCSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if av in toon.NPCFriendsDict:
npcCollision = 0
if av in self.npcAttacks:
callingToon = self.npcAttacks[av]
if self.activeToons.count(callingToon) == 1:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
npcCollision = 1
if npcCollision == 0:
self.toonAttacks[toonId] = getToonAttack(toonId, track=NPCSOS, level=5, target=av)
self.numNPCAttacks += 1
self.npcAttacks[av] = toonId
elif track == PETSOS:
self.notify.debug('toon: %d calls for pet: %d' % (toonId, av))
self.air.writeServerEvent('PETSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if not self.validate(toonId, level in toon.petTrickPhrases, 'requestAttack: invalid pet trickId: %s' % level):
return
self.toonAttacks[toonId] = getToonAttack(toonId, track=PETSOS, level=level, target=av)
elif track == UN_ATTACK:
self.notify.debug('toon: %d changed its mind' % toonId)
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
if toonId in self.responses:
self.responses[toonId] = 0
validResponse = 0
elif track == PASS:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
elif track == FIRE:
if simbase.air.doId2do[toonId].getPinkSlips() < self.getFireCount() + 1:
#Not allowed to fire, force them to pass >:D
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
else:
#Allowed to fire
self.setFireCount(self.fireCount + 1)
self.toonAttacks[toonId] = getToonAttack(toonId, track=FIRE, target=av)
else:
if not self.validate(toonId, track >= 0 and track <= MAX_TRACK_INDEX, 'requestAttack: invalid track %s' % track):
return
if not self.validate(toonId, level >= 0 and level <= MAX_LEVEL_INDEX, 'requestAttack: invalid level %s' % level):
return
if toon.inventory.numItem(track, level) == 0:
self.notify.warning('requestAttack() - toon has no item track: %d level: %d' % (track, level))
self.toonAttacks[toonId] = getToonAttack(toonId)
return
if track == HEAL:
if self.runningToons.count(av) == 1 or attackAffectsGroup(track, level) and len(self.activeToons) < 2:
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
validResponse = 0
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
if av == -1 and not attackAffectsGroup(track, level):
validResponse = 0
self.d_setChosenToonAttacks()
if validResponse == 1:
self.responses[toonId] += 1
self.notify.debug('toon: %d chose an attack' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie()
def requestPetProxy(self, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestPetProxy() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestPetProxy() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestPetProxy() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestPetProxy(%s, %s)' % (toonId, av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestPetProxy() - no toon: %d' % toonId)
return
petId = toon.getPetId()
zoneId = self.zoneId
if petId == av:
if not toonId in self.pets:
def handleGetPetProxy(success, pet, petId = petId, zoneId = zoneId, toonId = toonId):
if success:
petProxy = DistributedPetProxyAI.DistributedPetProxyAI(self.air)
petProxy.setOwnerId(pet.getOwnerId())
petProxy.setPetName(pet.getPetName())
petProxy.setTraitSeed(pet.getTraitSeed())
petProxy.setSafeZone(pet.getSafeZone())
petProxy.setForgetfulness(pet.getForgetfulness())
petProxy.setBoredomThreshold(pet.getBoredomThreshold())
petProxy.setRestlessnessThreshold(pet.getRestlessnessThreshold())
petProxy.setPlayfulnessThreshold(pet.getPlayfulnessThreshold())
petProxy.setLonelinessThreshold(pet.getLonelinessThreshold())
petProxy.setSadnessThreshold(pet.getSadnessThreshold())
petProxy.setFatigueThreshold(pet.getFatigueThreshold())
petProxy.setHungerThreshold(pet.getHungerThreshold())
petProxy.setConfusionThreshold(pet.getConfusionThreshold())
petProxy.setExcitementThreshold(pet.getExcitementThreshold())
petProxy.setAngerThreshold(pet.getAngerThreshold())
petProxy.setSurpriseThreshold(pet.getSurpriseThreshold())
petProxy.setAffectionThreshold(pet.getAffectionThreshold())
petProxy.setHead(pet.getHead())
petProxy.setEars(pet.getEars())
petProxy.setNose(pet.getNose())
petProxy.setTail(pet.getTail())
petProxy.setBodyTexture(pet.getBodyTexture())
petProxy.setColor(pet.getColor())
petProxy.setColorScale(pet.getColorScale())
petProxy.setEyeColor(pet.getEyeColor())
petProxy.setGender(pet.getGender())
petProxy.setLastSeenTimestamp(pet.getLastSeenTimestamp())
petProxy.setBoredom(pet.getBoredom())
petProxy.setRestlessness(pet.getRestlessness())
petProxy.setPlayfulness(pet.getPlayfulness())
petProxy.setLoneliness(pet.getLoneliness())
petProxy.setSadness(pet.getSadness())
petProxy.setAffection(pet.getAffection())
petProxy.setHunger(pet.getHunger())
petProxy.setConfusion(pet.getConfusion())
petProxy.setExcitement(pet.getExcitement())
petProxy.setFatigue(pet.getFatigue())
petProxy.setAnger(pet.getAnger())
petProxy.setSurprise(pet.getSurprise())
petProxy.setTrickAptitudes(pet.getTrickAptitudes())
pet.requestDelete()
def deleted(task):
petProxy.doNotDeallocateChannel = True
petProxy.generateWithRequiredAndId(petId, self.air.districtId, self.zoneId)
petProxy.broadcastDominantMood()
self.pets[toonId] = petProxy
return task.done
self.acceptOnce(self.air.getAvatarExitEvent(petId),
lambda: taskMgr.doMethodLater(0,
deleted, self.uniqueName('petdel-%d' % petId)))
else:
self.notify.warning('error generating petProxy: %s' % petId)
self.getPetProxyObject(petId, handleGetPetProxy)
def suitCanJoin(self):
return len(self.suits) < self.maxSuits and self.isJoinable()
def toonCanJoin(self):
return len(self.toons) < 4 and self.isJoinable()
def __requestMovie(self, timeout = 0):
if self.adjustFsm.getCurrentState().getName() == 'Adjusting':
self.notify.debug('__requestMovie() - in Adjusting')
self.movieRequested = 1
else:
movieDelay = 0
if len(self.activeToons) == 0:
self.notify.warning('only pending toons left in battle %s, toons = %s' % (self.doId, self.toons))
elif len(self.activeSuits) == 0:
self.notify.warning('only pending suits left in battle %s, suits = %s' % (self.doId, self.suits))
elif len(self.activeToons) > 1 and not timeout:
movieDelay = 1
self.fsm.request('MakeMovie')
if movieDelay:
taskMgr.doMethodLater(0.8, self.__makeMovie, self.uniqueName('make-movie'))
self.taskNames.append(self.uniqueName('make-movie'))
else:
self.__makeMovie()
def __makeMovie(self, task = None):
self.notify.debug('makeMovie()')
if self._DOAI_requestedDelete:
self.notify.warning('battle %s requested delete, then __makeMovie was called!' % self.doId)
if hasattr(self, 'levelDoId'):
self.notify.warning('battle %s in level %s' % (self.doId, self.levelDoId))
return
self.__removeTaskName(self.uniqueName('make-movie'))
if self.movieHasBeenMade == 1:
self.notify.debug('__makeMovie() - movie has already been made')
return
self.movieRequested = 0
self.movieHasBeenMade = 1
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
for t in self.activeToons:
if t not in self.toonAttacks:
self.toonAttacks[t] = getToonAttack(t)
attack = self.toonAttacks[t]
if attack[TOON_TRACK_COL] == PASS or attack[TOON_TRACK_COL] == UN_ATTACK:
self.toonAttacks[t] = getToonAttack(t)
if self.toonAttacks[t][TOON_TRACK_COL] != NO_ATTACK:
self.addHelpfulToon(t)
self.battleCalc.calculateRound()
for t in self.activeToons:
self.sendEarnedExperience(t)
toon = self.getToon(t)
if toon != None:
toon.hpOwnedByBattle = 1
if toon.immortalMode:
toon.toonUp(toon.maxHp)
self.d_setMovie()
self.b_setState('PlayMovie')
return Task.done
def sendEarnedExperience(self, toonId):
toon = self.getToon(toonId)
if toon != None:
expList = self.battleCalc.toonSkillPtsGained.get(toonId, None)
if expList == None:
toon.d_setEarnedExperience([])
else:
roundList = []
for exp in expList:
roundList.append(int(exp + 0.5))
toon.d_setEarnedExperience(roundList)
def enterOff(self):
return
def exitOff(self):
return
def enterFaceOff(self):
return
def exitFaceOff(self):
return
def enterWaitForJoin(self):
self.notify.debug('enterWaitForJoin()')
if len(self.activeSuits) > 0:
self.b_setState('WaitForInput')
else:
self.notify.debug('enterWaitForJoin() - no active suits')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
def exitWaitForJoin(self):
pass
def enterWaitForInput(self):
self.notify.debug('enterWaitForInput()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
if not self.tutorialFlag:
self.timer.startCallback(SERVER_INPUT_TIMEOUT, self.__serverTimedOut)
self.npcAttacks = {}
for toonId in self.toons:
if bboard.get('autoRestock-%s' % toonId, False):
toon = self.air.doId2do.get(toonId)
if toon is not None:
toon.doRestock(0)
def exitWaitForInput(self):
self.npcAttacks = {}
self.timer.stop()
def __serverTimedOut(self):
self.notify.debug('wait for input timed out on server')
self.ignoreResponses = 1
self.__requestMovie(timeout=1)
def enterMakeMovie(self):
self.notify.debug('enterMakeMovie()')
self.runableFsm.request('Unrunable')
self.resetResponses()
def exitMakeMovie(self):
pass
def enterPlayMovie(self):
self.notify.debug('enterPlayMovie()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
movieTime = TOON_ATTACK_TIME * (len(self.activeToons) + self.numNPCAttacks) + SUIT_ATTACK_TIME * len(self.activeSuits) + SERVER_BUFFER_TIME
self.numNPCAttacks = 0
self.notify.debug('estimated upper bound of movie time: %f' % movieTime)
self.timer.startCallback(movieTime, self.__serverMovieDone)
def __serverMovieDone(self):
self.notify.debug('movie timed out on server')
self.ignoreResponses = 1
self.__movieDone()
def serverRewardDone(self):
self.notify.debug('reward timed out on server')
self.ignoreResponses = 1
self.handleRewardDone()
def handleRewardDone(self):
self.b_setState('Resume')
def exitPlayMovie(self):
self.timer.stop()
def __movieDone(self):
self.notify.debug('__movieDone() - movie is finished')
if self.movieHasPlayed == 1:
self.notify.debug('__movieDone() - movie had already finished')
return
self.movieHasBeenMade = 0
self.movieHasPlayed = 1
self.ignoreResponses = 1
needUpdate = 0
toonHpDict = {}
for toon in self.activeToons:
toonHpDict[toon] = [0, 0, 0]
actualToon = self.getToon(toon)
self.notify.debug('BEFORE ROUND: toon: %d hp: %d' % (toon, actualToon.hp))
deadSuits = []
trapDict = {}
suitsLuredOntoTraps = []
npcTrapAttacks = []
for activeToon in self.activeToons + self.exitedToons:
if activeToon in self.toonAttacks:
attack = self.toonAttacks[activeToon]
track = attack[TOON_TRACK_COL]
npc_level = None
if track == NPCSOS:
track, npc_level, npc_hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
if track == None:
track = NPCSOS
elif track == TRAP:
npcTrapAttacks.append(attack)
toon = self.getToon(attack[TOON_ID_COL])
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
continue
if track != NO_ATTACK:
toonId = attack[TOON_ID_COL]
level = attack[TOON_LVL_COL]
if npc_level != None:
level = npc_level
if attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(toonId)
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
elif track == PETSOS:
pass
elif track == FIRE:
pass
elif track != SOS:
toon = self.getToon(toonId)
if toon != None:
check = toon.inventory.useItem(track, level)
if check == -1:
self.air.writeServerEvent('suspicious', toonId, 'Toon generating movie for non-existant gag track %s level %s' % (track, level))
self.notify.warning('generating movie for non-existant gag track %s level %s! avId: %s' % (track, level, toonId))
toon.d_setInventory(toon.inventory.makeNetString())
hps = attack[TOON_HP_COL]
if track == SOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == NPCSOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == PETSOS:
self.notify.debug('toon: %d called for pet' % toonId)
for i in range(len(self.activeToons)):
toon = self.getToon(self.activeToons[i])
if toon != None:
if i < len(hps):
hp = hps[i]
if hp > 0:
toonHpDict[toon.doId][0] += hp
self.notify.debug('pet heal: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
elif track == NPC_RESTOCK_GAGS:
for at in self.activeToons:
toon = self.getToon(at)
if toon != None:
toon.inventory.NPCMaxOutInv(npc_level)
toon.d_setInventory(toon.inventory.makeNetString())
elif track == HEAL:
if levelAffectsGroup(HEAL, level):
for i in range(len(self.activeToons)):
at = self.activeToons[i]
if at != toonId or attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(at)
if toon != None:
if i < len(hps):
hp = hps[i]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
self.notify.debug('HEAL: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
targetId = attack[TOON_TGT_COL]
toon = self.getToon(targetId)
if toon != None and targetId in self.activeToons:
targetIndex = self.activeToons.index(targetId)
if targetIndex < len(hps):
hp = hps[targetIndex]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (targetIndex, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
elif attackAffectsGroup(track, level, attack[TOON_TRACK_COL]):
for suit in self.activeSuits:
targetIndex = self.activeSuits.index(suit)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if hp > 0 and track == LURE:
if suit.battleTrap == UBER_GAG_LEVEL_INDEX:
pass
suit.battleTrap = NO_TRAP
needUpdate = 1
if suit.doId in trapDict:
del trapDict[suit.doId]
if suitsLuredOntoTraps.count(suit) == 0:
suitsLuredOntoTraps.append(suit)
if track == TRAP:
targetId = suit.doId
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
needUpdate = 1
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(suit) == 0:
deadSuits.append(suit)
else:
targetId = attack[TOON_TGT_COL]
target = self.findSuit(targetId)
if target != None:
targetIndex = self.activeSuits.index(target)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if track == TRAP:
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
if hp > 0 and track == LURE:
oldBattleTrap = target.battleTrap
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
pass
target.battleTrap = NO_TRAP
needUpdate = 1
if target.doId in trapDict:
del trapDict[target.doId]
if suitsLuredOntoTraps.count(target) == 0:
suitsLuredOntoTraps.append(target)
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
for otherSuit in self.activeSuits:
if not otherSuit == target:
otherSuit.battleTrap = NO_TRAP
if otherSuit.doId in trapDict:
del trapDict[otherSuit.doId]
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(target) == 0:
deadSuits.append(target)
self.exitedToons = []
for suitKey in trapDict.keys():
attackList = trapDict[suitKey]
attack = attackList[0]
target = self.findSuit(attack[TOON_TGT_COL])
if attack[TOON_LVL_COL] == UBER_GAG_LEVEL_INDEX:
targetId = suitKey
target = self.findSuit(targetId)
if len(attackList) == 1:
if suitsLuredOntoTraps.count(target) == 0:
self.notify.debug('movieDone() - trap set')
target.battleTrap = attack[TOON_LVL_COL]
needUpdate = 1
else:
target.battleTrap = NO_TRAP
else:
self.notify.debug('movieDone() - traps collided')
if target != None:
target.battleTrap = NO_TRAP
if self.battleCalc.trainTrapTriggered:
self.notify.debug('Train trap triggered, clearing all traps')
for otherSuit in self.activeSuits:
self.notify.debug('suit =%d, oldBattleTrap=%d' % (otherSuit.doId, otherSuit.battleTrap))
otherSuit.battleTrap = NO_TRAP
currLuredSuits = self.battleCalc.getLuredSuits()
if len(self.luredSuits) == len(currLuredSuits):
for suit in self.luredSuits:
if currLuredSuits.count(suit.doId) == 0:
needUpdate = 1
break
else:
needUpdate = 1
self.luredSuits = []
for i in currLuredSuits:
suit = self.air.doId2do[i]
self.luredSuits.append(suit)
self.notify.debug('movieDone() - suit: %d is lured' % i)
for attack in npcTrapAttacks:
track, level, hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
for suit in self.activeSuits:
if self.luredSuits.count(suit) == 0 and suit.battleTrap == NO_TRAP:
suit.battleTrap = level
needUpdate = 1
for suit in deadSuits:
self.notify.debug('removing dead suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.debug('whoops, suit %d is deleted.' % suit.doId)
else:
self.notify.debug('suit had revives? %d' % suit.getMaxSkeleRevives())
encounter = {'type': suit.dna.name,
'level': suit.getActualLevel(),
'track': suit.dna.dept,
'isSkelecog': suit.getSkelecog(),
'isForeman': suit.isForeman(),
'isVP': 0,
'isCFO': 0,
'isSupervisor': suit.isSupervisor(),
'isVirtual': suit.isVirtual(),
'hasRevives': suit.getMaxSkeleRevives(),
'activeToons': self.activeToons[:]}
self.suitsKilled.append(encounter)
self.suitsKilledThisBattle.append(encounter)
self.air.suitInvasionManager.handleSuitDefeated()
self.__removeSuit(suit)
needUpdate = 1
suit.resume()
lastActiveSuitDied = 0
if len(self.activeSuits) == 0 and len(self.pendingSuits) == 0:
lastActiveSuitDied = 1
for i in range(4):
attack = self.suitAttacks[i][SUIT_ATK_COL]
if attack != NO_ATTACK:
suitId = self.suitAttacks[i][SUIT_ID_COL]
suit = self.findSuit(suitId)
if suit == None:
self.notify.warning('movieDone() - suit: %d is gone!' % suitId)
continue
if not (hasattr(suit, 'dna') and suit.dna):
toonId = self.air.getAvatarIdFromSender()
self.notify.warning('_movieDone avoiding crash, sender=%s but suit has no dna' % toonId)
self.air.writeServerEvent('suspicious', toonId, '_movieDone avoiding crash, suit has no dna')
continue
adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attack)
hps = self.suitAttacks[i][SUIT_HP_COL]
if adict['group'] == ATK_TGT_GROUP:
for activeToon in self.activeToons:
toon = self.getToon(activeToon)
if toon != None:
targetIndex = self.activeToons.index(activeToon)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % activeToon)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (activeToon, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
elif adict['group'] == ATK_TGT_SINGLE:
targetIndex = self.suitAttacks[i][SUIT_TGT_COL]
if targetIndex >= len(self.activeToons):
self.notify.warning('movieDone() - toon: %d gone!' % targetIndex)
break
toonId = self.activeToons[targetIndex]
toon = self.getToon(toonId)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % toonId)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (toonId, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
deadToons = []
for activeToon in self.activeToons:
hp = toonHpDict[activeToon]
toon = self.getToon(activeToon)
if toon != None:
self.notify.debug('AFTER ROUND: currtoonHP: %d toonMAX: %d hheal: %d damage: %d' % (toon.hp,
toon.maxHp,
hp[0],
hp[1]))
toon.hpOwnedByBattle = 0
hpDelta = hp[0] - hp[1]
if hpDelta >= 0:
toon.toonUp(hpDelta, quietly=1)
else:
toon.takeDamage(-hpDelta, quietly=1)
if toon.hp <= 0:
self.notify.debug('movieDone() - toon: %d was killed' % activeToon)
toon.inventory.zeroInv(1)
deadToons.append(activeToon)
self.notify.debug('AFTER ROUND: toon: %d setHp: %d' % (toon.doId, toon.hp))
if toon.unlimitedGags:
toon.doRestock(noUber=0, noPaid=0)
for deadToon in deadToons:
self.__removeToon(deadToon)
needUpdate = 1
self.clearAttacks()
self.d_setMovie()
self.d_setChosenToonAttacks()
self.localMovieDone(needUpdate, deadToons, deadSuits, lastActiveSuitDied)
def enterResume(self):
for suit in self.suits:
self.notify.info('battle done, resuming suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.info('whoops, suit %d is deleted.' % suit.doId)
else:
suit.resume()
self.suits = []
self.joiningSuits = []
self.pendingSuits = []
self.adjustingSuits = []
self.activeSuits = []
self.luredSuits = []
for toonId in self.toons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
for exitEvent in self.avatarExitEvents:
self.ignore(exitEvent)
eventMsg = {}
for encounter in self.suitsKilledThisBattle:
cog = encounter['type']
level = encounter['level']
msgName = '%s%s' % (cog, level)
if encounter['isSkelecog']:
msgName += '+'
if msgName in eventMsg:
eventMsg[msgName] += 1
else:
eventMsg[msgName] = 1
msgText = ''
for msgName, count in eventMsg.items():
if msgText != '':
msgText += ','
msgText += '%s%s' % (count, msgName)
self.air.writeServerEvent('battleCogsDefeated', self.doId, '%s|%s' % (msgText, self.getTaskZoneId()))
def exitResume(self):
pass
def isJoinable(self):
return self.joinableFsm.getCurrentState().getName() == 'Joinable'
def enterJoinable(self):
self.notify.debug('enterJoinable()')
def exitJoinable(self):
pass
def enterUnjoinable(self):
self.notify.debug('enterUnjoinable()')
def exitUnjoinable(self):
pass
def isRunable(self):
return self.runableFsm.getCurrentState().getName() == 'Runable'
def enterRunable(self):
self.notify.debug('enterRunable()')
def exitRunable(self):
pass
def enterUnrunable(self):
self.notify.debug('enterUnrunable()')
def exitUnrunable(self):
pass
def __estimateAdjustTime(self):
self.needAdjust = 0
adjustTime = 0
if len(self.pendingSuits) > 0 or self.suitGone == 1:
self.suitGone = 0
pos0 = self.suitPendingPoints[0][0]
pos1 = self.suitPoints[0][0][0]
adjustTime = self.calcSuitMoveTime(pos0, pos1)
if len(self.pendingToons) > 0 or self.toonGone == 1:
self.toonGone = 0
if adjustTime == 0:
pos0 = self.toonPendingPoints[0][0]
pos1 = self.toonPoints[0][0][0]
adjustTime = self.calcToonMoveTime(pos0, pos1)
return adjustTime
def enterAdjusting(self):
self.notify.debug('enterAdjusting()')
self.timer.stop()
self.__resetAdjustingResponses()
self.adjustingTimer.startCallback(self.__estimateAdjustTime() + SERVER_BUFFER_TIME, self.__serverAdjustingDone)
def __serverAdjustingDone(self):
if self.needAdjust == 1:
self.adjustFsm.request('NotAdjusting')
self.__requestAdjust()
else:
self.notify.debug('adjusting timed out on the server')
self.ignoreAdjustingResponses = 1
self.__adjustDone()
def exitAdjusting(self):
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
self.timer.restart()
elif currStateName == 'WaitForJoin':
self.b_setState('WaitForInput')
self.adjustingTimer.stop()
def __addTrainTrapForNewSuits(self):
hasTrainTrap = False
trapInfo = None
for otherSuit in self.activeSuits:
if otherSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
hasTrainTrap = True
if hasTrainTrap:
for curSuit in self.activeSuits:
if not curSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
oldBattleTrap = curSuit.battleTrap
curSuit.battleTrap = UBER_GAG_LEVEL_INDEX
self.battleCalc.addTrainTrapForJoiningSuit(curSuit.doId)
self.notify.debug('setting traintrack trap for joining suit %d oldTrap=%s' % (curSuit.doId, oldBattleTrap))
def __adjustDone(self):
for s in self.adjustingSuits:
self.pendingSuits.remove(s)
self.activeSuits.append(s)
self.adjustingSuits = []
for toon in self.adjustingToons:
if self.pendingToons.count(toon) == 1:
self.pendingToons.remove(toon)
else:
self.notify.warning('adjustDone() - toon: %d not pending!' % toon.doId)
if self.activeToons.count(toon) == 0:
self.activeToons.append(toon)
self.ignoreResponses = 0
self.sendEarnedExperience(toon)
else:
self.notify.warning('adjustDone() - toon: %d already active!' % toon.doId)
self.adjustingToons = []
self.__addTrainTrapForNewSuits()
self.d_setMembers()
self.adjustFsm.request('NotAdjusting')
if self.needAdjust == 1:
self.notify.debug('__adjustDone() - need to adjust again')
self.__requestAdjust()
def enterNotAdjusting(self):
self.notify.debug('enterNotAdjusting()')
if self.movieRequested == 1:
if len(self.activeToons) > 0 and self.__allActiveToonsResponded():
self.__requestMovie()
def exitNotAdjusting(self):
pass
def getPetProxyObject(self, petId, callback):
doneEvent = 'generate-%d' % petId
def handlePetProxyRead(pet):
callback(1, pet)
self.air.sendActivate(petId, self.air.districtId, 0)
self.acceptOnce(doneEvent, handlePetProxyRead)
def _getNextSerialNum(self):
num = self.serialNum
self.serialNum += 1
return num
def setFireCount(self, amount):
self.fireCount = amount
def getFireCount(self):
return self.fireCount
@magicWord(category=CATEGORY_PROGRAMMER)
def skipMovie():
invoker = spellbook.getInvoker()
battleId = invoker.getBattleId()
if not battleId:
return 'You are not currently in a battle!'
battle = simbase.air.doId2do.get(battleId)
battle._DistributedBattleBaseAI__movieDone()
return 'Battle movie skipped.'
| import random
from otp.ai.AIBase import *
from direct.distributed.ClockDelta import *
from toontown.battle.BattleBase import *
from toontown.battle.BattleCalculatorAI import *
from toontown.toonbase.ToontownBattleGlobals import *
from toontown.battle.SuitBattleGlobals import *
from pandac.PandaModules import *
from toontown.battle import BattleExperienceAI
from direct.distributed import DistributedObjectAI
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from toontown.ai import DatabaseObject
from toontown.toon import DistributedToonAI
from toontown.toon import InventoryBase
from toontown.toonbase import ToontownGlobals
from toontown.toon import NPCToons
from otp.ai.MagicWordGlobal import *
from toontown.pets import DistributedPetProxyAI
class DistributedBattleBaseAI(DistributedObjectAI.DistributedObjectAI, BattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBaseAI')
def __init__(self, air, zoneId, finishCallback = None, maxSuits = 4, bossBattle = 0, tutorialFlag = 0, interactivePropTrackBonus = -1):
DistributedObjectAI.DistributedObjectAI.__init__(self, air)
self.serialNum = 0
self.zoneId = zoneId
self.maxSuits = maxSuits
self.setBossBattle(bossBattle)
self.tutorialFlag = tutorialFlag
self.interactivePropTrackBonus = interactivePropTrackBonus
self.finishCallback = finishCallback
self.avatarExitEvents = []
self.responses = {}
self.adjustingResponses = {}
self.joinResponses = {}
self.adjustingSuits = []
self.adjustingToons = []
self.numSuitsEver = 0
BattleBase.__init__(self)
self.streetBattle = 1
self.pos = Point3(0, 0, 0)
self.initialSuitPos = Point3(0, 0, 0)
self.toonExp = {}
self.toonOrigQuests = {}
self.toonItems = {}
self.toonOrigMerits = {}
self.toonMerits = {}
self.toonParts = {}
self.battleCalc = BattleCalculatorAI(self, tutorialFlag)
if self.air.suitInvasionManager.getInvading():
mult = getInvasionMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
if self.air.holidayManager.isMoreXpHolidayRunning():
mult = getMoreXpHolidayMultiplier()
self.battleCalc.setSkillCreditMultiplier(mult)
self.fsm = None
self.clearAttacks()
self.ignoreFaceOffDone = 0
self.needAdjust = 0
self.movieHasBeenMade = 0
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
self.movieRequested = 0
self.ignoreResponses = 0
self.ignoreAdjustingResponses = 0
self.taskNames = []
self.exitedToons = []
self.suitsKilled = []
self.suitsKilledThisBattle = []
self.suitsKilledPerFloor = []
self.suitsEncountered = []
self.newToons = []
self.newSuits = []
self.numNPCAttacks = 0
self.npcAttacks = {}
self.pets = {}
self.fireCount = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedBattleAI', [State.State('FaceOff', self.enterFaceOff, self.exitFaceOff, ['WaitForInput', 'Resume']),
State.State('WaitForJoin', self.enterWaitForJoin, self.exitWaitForJoin, ['WaitForInput', 'Resume']),
State.State('WaitForInput', self.enterWaitForInput, self.exitWaitForInput, ['MakeMovie', 'Resume']),
State.State('MakeMovie', self.enterMakeMovie, self.exitMakeMovie, ['PlayMovie', 'Resume']),
State.State('PlayMovie', self.enterPlayMovie, self.exitPlayMovie, ['WaitForJoin', 'Reward', 'Resume']),
State.State('Reward', self.enterReward, self.exitReward, ['Resume']),
State.State('Resume', self.enterResume, self.exitResume, []),
State.State('Off', self.enterOff, self.exitOff, ['FaceOff', 'WaitForJoin'])], 'Off', 'Off')
self.joinableFsm = ClassicFSM.ClassicFSM('Joinable', [State.State('Joinable', self.enterJoinable, self.exitJoinable, ['Unjoinable']), State.State('Unjoinable', self.enterUnjoinable, self.exitUnjoinable, ['Joinable'])], 'Unjoinable', 'Unjoinable')
self.joinableFsm.enterInitialState()
self.runableFsm = ClassicFSM.ClassicFSM('Runable', [State.State('Runable', self.enterRunable, self.exitRunable, ['Unrunable']), State.State('Unrunable', self.enterUnrunable, self.exitUnrunable, ['Runable'])], 'Unrunable', 'Unrunable')
self.runableFsm.enterInitialState()
self.adjustFsm = ClassicFSM.ClassicFSM('Adjust', [State.State('Adjusting', self.enterAdjusting, self.exitAdjusting, ['NotAdjusting', 'Adjusting']), State.State('NotAdjusting', self.enterNotAdjusting, self.exitNotAdjusting, ['Adjusting'])], 'NotAdjusting', 'NotAdjusting')
self.adjustFsm.enterInitialState()
self.fsm.enterInitialState()
self.startTime = globalClock.getRealTime()
self.adjustingTimer = Timer()
def clearAttacks(self):
self.toonAttacks = {}
self.suitAttacks = getDefaultSuitAttacks()
def requestDelete(self):
if hasattr(self, 'fsm'):
self.fsm.request('Off')
self.__removeTaskName(self.uniqueName('make-movie'))
DistributedObjectAI.DistributedObjectAI.requestDelete(self)
def delete(self):
self.notify.debug('deleting battle')
self.fsm.request('Off')
self.ignoreAll()
self.__removeAllTasks()
del self.fsm
del self.joinableFsm
del self.runableFsm
del self.adjustFsm
self.__cleanupJoinResponses()
self.timer.stop()
del self.timer
self.adjustingTimer.stop()
del self.adjustingTimer
self.battleCalc.cleanup()
del self.battleCalc
for suit in self.suits:
del suit.battleTrap
del self.finishCallback
for petProxy in self.pets.values():
petProxy.requestDelete()
DistributedObjectAI.DistributedObjectAI.delete(self)
def pause(self):
self.timer.stop()
self.adjustingTimer.stop()
def unpause(self):
self.timer.resume()
self.adjustingTimer.resume()
def abortBattle(self):
self.notify.debug('%s.abortBattle() called.' % self.doId)
toonsCopy = self.toons[:]
for toonId in toonsCopy:
self.__removeToon(toonId)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(toonId)
self.d_setMembers()
self.b_setState('Resume')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def findSuit(self, id):
for s in self.suits:
if s.doId == id:
return s
return None
def __removeTaskName(self, name):
if self.taskNames.count(name):
self.taskNames.remove(name)
self.notify.debug('removeTaskName() - %s' % name)
taskMgr.remove(name)
def __removeAllTasks(self):
for n in self.taskNames:
self.notify.debug('removeAllTasks() - %s' % n)
taskMgr.remove(n)
self.taskNames = []
def __removeToonTasks(self, toonId):
name = self.taskName('running-toon-%d' % toonId)
self.__removeTaskName(name)
name = self.taskName('to-pending-av-%d' % toonId)
self.__removeTaskName(name)
def getLevelDoId(self):
return 0
def getBattleCellId(self):
return 0
def getPosition(self):
self.notify.debug('getPosition() - %s' % self.pos)
return [self.pos[0], self.pos[1], self.pos[2]]
def getInitialSuitPos(self):
p = []
p.append(self.initialSuitPos[0])
p.append(self.initialSuitPos[1])
p.append(self.initialSuitPos[2])
return p
def setBossBattle(self, bossBattle):
self.bossBattle = bossBattle
def getBossBattle(self):
return self.bossBattle
def b_setState(self, state):
self.notify.debug('network:setState(%s)' % state)
stime = globalClock.getRealTime() + SERVER_BUFFER_TIME
self.sendUpdate('setState', [state, globalClockDelta.localToNetworkTime(stime)])
self.setState(state)
def setState(self, state):
self.fsm.request(state)
def getState(self):
return [self.fsm.getCurrentState().getName(), globalClockDelta.getRealNetworkTime()]
def d_setMembers(self):
self.notify.debug('network:setMembers()')
self.sendUpdate('setMembers', self.getMembers())
def getMembers(self):
suits = []
for s in self.suits:
suits.append(s.doId)
joiningSuits = ''
for s in self.joiningSuits:
joiningSuits += str(suits.index(s.doId))
pendingSuits = ''
for s in self.pendingSuits:
pendingSuits += str(suits.index(s.doId))
activeSuits = ''
for s in self.activeSuits:
activeSuits += str(suits.index(s.doId))
luredSuits = ''
for s in self.luredSuits:
luredSuits += str(suits.index(s.doId))
suitTraps = ''
for s in self.suits:
if s.battleTrap == NO_TRAP:
suitTraps += '9'
elif s.battleTrap == BattleCalculatorAI.TRAP_CONFLICT:
suitTraps += '9'
else:
suitTraps += str(s.battleTrap)
toons = []
for t in self.toons:
toons.append(t)
joiningToons = ''
for t in self.joiningToons:
joiningToons += str(toons.index(t))
pendingToons = ''
for t in self.pendingToons:
pendingToons += str(toons.index(t))
activeToons = ''
for t in self.activeToons:
activeToons += str(toons.index(t))
runningToons = ''
for t in self.runningToons:
runningToons += str(toons.index(t))
self.notify.debug('getMembers() - suits: %s joiningSuits: %s pendingSuits: %s activeSuits: %s luredSuits: %s suitTraps: %s toons: %s joiningToons: %s pendingToons: %s activeToons: %s runningToons: %s' % (suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons))
return [suits,
joiningSuits,
pendingSuits,
activeSuits,
luredSuits,
suitTraps,
toons,
joiningToons,
pendingToons,
activeToons,
runningToons,
globalClockDelta.getRealNetworkTime()]
def d_adjust(self):
self.notify.debug('network:adjust()')
self.sendUpdate('adjust', [globalClockDelta.getRealNetworkTime()])
def getInteractivePropTrackBonus(self):
return self.interactivePropTrackBonus
def getZoneId(self):
return self.zoneId
def getTaskZoneId(self):
return self.zoneId
def d_setMovie(self):
self.notify.debug('network:setMovie()')
self.sendUpdate('setMovie', self.getMovie())
self.__updateEncounteredCogs()
def getMovie(self):
suitIds = []
for s in self.activeSuits:
suitIds.append(s.doId)
p = [self.movieHasBeenMade]
p.append(self.activeToons)
p.append(suitIds)
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
index = -1
id = ta[TOON_ID_COL]
if id != -1:
index = self.activeToons.index(id)
track = ta[TOON_TRACK_COL]
if (track == NO_ATTACK or attackAffectsGroup(track, ta[TOON_LVL_COL])) and track != NPCSOS and track != PETSOS:
target = -1
if track == HEAL:
if ta[TOON_LVL_COL] == 1:
ta[TOON_HPBONUS_COL] = random.randint(0, 10000)
elif track == SOS or track == NPCSOS or track == PETSOS:
target = ta[TOON_TGT_COL]
elif track == HEAL:
if self.activeToons.count(ta[TOON_TGT_COL]) != 0:
target = self.activeToons.index(ta[TOON_TGT_COL])
else:
target = -1
elif suitIds.count(ta[TOON_TGT_COL]) != 0:
target = suitIds.index(ta[TOON_TGT_COL])
else:
target = -1
p = p + [index,
track,
ta[TOON_LVL_COL],
target]
p = p + ta[4:]
else:
index = self.activeToons.index(t)
attack = getToonAttack(index)
p = p + attack
for i in range(4 - len(self.activeToons)):
p = p + getToonAttack(-1)
for sa in self.suitAttacks:
index = -1
id = sa[SUIT_ID_COL]
if id != -1:
index = suitIds.index(id)
if sa[SUIT_ATK_COL] == -1:
targetIndex = -1
else:
targetIndex = sa[SUIT_TGT_COL]
if targetIndex == -1:
self.notify.debug('suit attack: %d must be group' % sa[SUIT_ATK_COL])
else:
toonId = self.activeToons[targetIndex]
p = p + [index, sa[SUIT_ATK_COL], targetIndex]
sa[SUIT_TAUNT_COL] = 0
if sa[SUIT_ATK_COL] != -1:
suit = self.findSuit(id)
sa[SUIT_TAUNT_COL] = getAttackTauntIndexFromIndex(suit, sa[SUIT_ATK_COL])
p = p + sa[3:]
return p
def d_setChosenToonAttacks(self):
self.notify.debug('network:setChosenToonAttacks()')
self.sendUpdate('setChosenToonAttacks', self.getChosenToonAttacks())
def getChosenToonAttacks(self):
ids = []
tracks = []
levels = []
targets = []
for t in self.activeToons:
if t in self.toonAttacks:
ta = self.toonAttacks[t]
else:
ta = getToonAttack(t)
ids.append(t)
tracks.append(ta[TOON_TRACK_COL])
levels.append(ta[TOON_LVL_COL])
targets.append(ta[TOON_TGT_COL])
return [ids,
tracks,
levels,
targets]
def d_setBattleExperience(self):
self.notify.debug('network:setBattleExperience()')
self.sendUpdate('setBattleExperience', self.getBattleExperience())
def getBattleExperience(self):
returnValue = BattleExperienceAI.getBattleExperience(4, self.activeToons, self.toonExp, self.battleCalc.toonSkillPtsGained, self.toonOrigQuests, self.toonItems, self.toonOrigMerits, self.toonMerits, self.toonParts, self.suitsKilled, self.helpfulToons)
return returnValue
def getToonUberStatus(self):
fieldList = []
uberIndex = LAST_REGULAR_GAG_LEVEL + 1
for toon in self.activeToons:
toonList = []
for trackIndex in range(MAX_TRACK_INDEX):
toonList.append(toon.inventory.numItem(track, uberIndex))
fieldList.append(encodeUber(toonList))
return fieldList
def addSuit(self, suit):
self.notify.debug('addSuit(%d)' % suit.doId)
self.newSuits.append(suit)
self.suits.append(suit)
suit.battleTrap = NO_TRAP
self.numSuitsEver += 1
def __joinSuit(self, suit):
self.joiningSuits.append(suit)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % suit.doId)
self.__addJoinResponse(suit.doId, taskName)
self.taskNames.append(taskName)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(suit.doId, taskName))
def __serverJoinDone(self, avId, taskName):
self.notify.debug('join for av: %d timed out on server' % avId)
self.__removeTaskName(taskName)
self.__makeAvPending(avId)
return Task.done
def __makeAvPending(self, avId):
self.notify.debug('__makeAvPending(%d)' % avId)
self.__removeJoinResponse(avId)
self.__removeTaskName(self.taskName('to-pending-av-%d' % avId))
if self.toons.count(avId) > 0:
self.joiningToons.remove(avId)
self.pendingToons.append(avId)
else:
suit = self.findSuit(avId)
if suit != None:
if not suit.isEmpty():
if not self.joiningSuits.count(suit) == 1:
self.notify.warning('__makeAvPending(%d) in zone: %d' % (avId, self.zoneId))
self.notify.warning('toons: %s' % self.toons)
self.notify.warning('joining toons: %s' % self.joiningToons)
self.notify.warning('pending toons: %s' % self.pendingToons)
self.notify.warning('suits: %s' % self.suits)
self.notify.warning('joining suits: %s' % self.joiningSuits)
self.notify.warning('pending suits: %s' % self.pendingSuits)
self.joiningSuits.remove(suit)
self.pendingSuits.append(suit)
else:
self.notify.warning('makeAvPending() %d not in toons or suits' % avId)
return
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def suitRequestJoin(self, suit):
self.notify.debug('suitRequestJoin(%d)' % suit.getDoId())
if self.suitCanJoin():
self.addSuit(suit)
self.__joinSuit(suit)
self.d_setMembers()
suit.prepareToJoinBattle()
return 1
else:
self.notify.warning('suitRequestJoin() - not joinable - joinable state: %s max suits: %d' % (self.joinableFsm.getCurrentState().getName(), self.maxSuits))
return 0
def addToon(self, avId):
self.notify.debug('addToon(%d)' % avId)
toon = self.getToon(avId)
if toon == None:
return 0
toon.stopToonUp()
event = simbase.air.getAvatarExitEvent(avId)
self.avatarExitEvents.append(event)
self.accept(event, self.__handleUnexpectedExit, extraArgs=[avId])
event = 'inSafezone-%s' % avId
self.avatarExitEvents.append(event)
self.accept(event, self.__handleSuddenExit, extraArgs=[avId, 0])
self.newToons.append(avId)
self.toons.append(avId)
toon = simbase.air.doId2do.get(avId)
if toon:
if hasattr(self, 'doId'):
toon.b_setBattleId(self.doId)
else:
toon.b_setBattleId(-1)
messageToonAdded = 'Battle adding toon %s' % avId
messenger.send(messageToonAdded, [avId])
if self.fsm != None and self.fsm.getCurrentState().getName() == 'PlayMovie':
self.responses[avId] = 1
else:
self.responses[avId] = 0
self.adjustingResponses[avId] = 0
if avId not in self.toonExp:
p = []
for t in Tracks:
p.append(toon.experience.getExp(t))
self.toonExp[avId] = p
if avId not in self.toonOrigMerits:
self.toonOrigMerits[avId] = toon.cogMerits[:]
if avId not in self.toonMerits:
self.toonMerits[avId] = [0,
0,
0,
0,
0]
if avId not in self.toonOrigQuests:
flattenedQuests = []
for quest in toon.quests:
flattenedQuests.extend(quest)
self.toonOrigQuests[avId] = flattenedQuests
if avId not in self.toonItems:
self.toonItems[avId] = ([], [])
return 1
def __joinToon(self, avId, pos):
self.joiningToons.append(avId)
toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME
taskName = self.taskName('to-pending-av-%d' % avId)
self.__addJoinResponse(avId, taskName, toon=1)
taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(avId, taskName))
self.taskNames.append(taskName)
def __updateEncounteredCogs(self):
for toon in self.activeToons:
if toon in self.newToons:
for suit in self.activeSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newToons.remove(toon)
for suit in self.activeSuits:
if suit in self.newSuits:
if hasattr(suit, 'dna'):
self.suitsEncountered.append({'type': suit.dna.name,
'activeToons': self.activeToons[:]})
else:
self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons))
return
self.newSuits.remove(suit)
def __makeToonRun(self, toonId, updateAttacks):
self.activeToons.remove(toonId)
self.toonGone = 1
self.runningToons.append(toonId)
taskName = self.taskName('running-toon-%d' % toonId)
taskMgr.doMethodLater(TOON_RUN_T, self.__serverRunDone, taskName, extraArgs=(toonId, updateAttacks, taskName))
self.taskNames.append(taskName)
def __serverRunDone(self, toonId, updateAttacks, taskName):
self.notify.debug('run for toon: %d timed out on server' % toonId)
self.__removeTaskName(taskName)
self.__removeToon(toonId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.b_setState('Resume')
else:
if updateAttacks == 1:
self.d_setChosenToonAttacks()
self.needAdjust = 1
self.__requestAdjust()
return Task.done
def __requestAdjust(self):
if not self.fsm:
return
cstate = self.fsm.getCurrentState().getName()
if cstate == 'WaitForInput' or cstate == 'WaitForJoin':
if self.adjustFsm.getCurrentState().getName() == 'NotAdjusting':
if self.needAdjust == 1:
self.d_adjust()
self.adjustingSuits = []
for s in self.pendingSuits:
self.adjustingSuits.append(s)
self.adjustingToons = []
for t in self.pendingToons:
self.adjustingToons.append(t)
self.adjustFsm.request('Adjusting')
else:
self.notify.debug('requestAdjust() - dont need to')
else:
self.notify.debug('requestAdjust() - already adjusting')
else:
self.notify.debug('requestAdjust() - in state: %s' % cstate)
def __handleUnexpectedExit(self, avId):
#TODO: fixme
#disconnectCode = self.air.getAvatarDisconnectReason(avId)
disconnectCode = "placeHolder dc code, need self.air.getAvatarDisconnectReason(avId)"
self.notify.warning('toon: %d exited unexpectedly, reason %s' % (avId, disconnectCode))
#userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow
#TODO: fixme
userAborted = False
self.__handleSuddenExit(avId, userAborted)
def __handleSuddenExit(self, avId, userAborted):
self.__removeToon(avId, userAborted=userAborted)
if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie':
self.exitedToons.append(avId)
self.d_setMembers()
if len(self.toons) == 0:
self.notify.debug('last toon is gone - battle is finished')
self.__removeAllTasks()
self.timer.stop()
self.adjustingTimer.stop()
self.b_setState('Resume')
else:
self.needAdjust = 1
self.__requestAdjust()
def __removeSuit(self, suit):
self.notify.debug('__removeSuit(%d)' % suit.doId)
self.suits.remove(suit)
self.activeSuits.remove(suit)
if self.luredSuits.count(suit) == 1:
self.luredSuits.remove(suit)
self.suitGone = 1
del suit.battleTrap
def __removeToon(self, toonId, userAborted = 0):
self.notify.debug('__removeToon(%d)' % toonId)
if self.toons.count(toonId) == 0:
return
self.battleCalc.toonLeftBattle(toonId)
self.__removeToonTasks(toonId)
self.toons.remove(toonId)
if self.joiningToons.count(toonId) == 1:
self.joiningToons.remove(toonId)
if self.pendingToons.count(toonId) == 1:
self.pendingToons.remove(toonId)
if self.activeToons.count(toonId) == 1:
activeToonIdx = self.activeToons.index(toonId)
self.notify.debug('removing activeToons[%d], updating suitAttacks SUIT_HP_COL to match' % activeToonIdx)
for i in range(len(self.suitAttacks)):
if activeToonIdx < len(self.suitAttacks[i][SUIT_HP_COL]):
del self.suitAttacks[i][SUIT_HP_COL][activeToonIdx]
else:
self.notify.warning("suitAttacks %d doesn't have an HP column for active toon index %d" % (i, activeToonIdx))
self.activeToons.remove(toonId)
if self.runningToons.count(toonId) == 1:
self.runningToons.remove(toonId)
if self.adjustingToons.count(toonId) == 1:
self.notify.warning('removeToon() - toon: %d was adjusting!' % toonId)
self.adjustingToons.remove(toonId)
self.toonGone = 1
if toonId in self.pets:
self.pets[toonId].requestDelete()
del self.pets[toonId]
self.__removeResponse(toonId)
self.__removeAdjustingResponse(toonId)
self.__removeJoinResponses(toonId)
event = simbase.air.getAvatarExitEvent(toonId)
self.avatarExitEvents.remove(event)
self.ignore(event)
event = 'inSafezone-%s' % toonId
self.avatarExitEvents.remove(event)
self.ignore(event)
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
if not userAborted:
toon = self.getToon(toonId)
if toon != None:
toon.hpOwnedByBattle = 0
toon.d_setHp(toon.hp)
toon.d_setInventory(toon.inventory.makeNetString())
self.air.cogPageManager.toonEncounteredCogs(toon, self.suitsEncountered, self.getTaskZoneId())
elif len(self.suits) > 0 and not self.streetBattle:
self.notify.info('toon %d aborted non-street battle; clearing inventory and hp.' % toonId)
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = toonId
empty = InventoryBase.InventoryBase(toon)
toon.b_setInventory(empty.makeNetString())
toon.b_setHp(0)
db = DatabaseObject.DatabaseObject(self.air, toonId)
db.storeObject(toon, ['setInventory', 'setHp'])
self.notify.info('killing mem leak from temporary DistributedToonAI %d' % toonId)
toon.deleteDummy()
def getToon(self, toonId):
if toonId in self.air.doId2do:
return self.air.doId2do[toonId]
else:
self.notify.warning('getToon() - toon: %d not in repository!' % toonId)
return
def toonRequestRun(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('ignoring response from toon: %d' % toonId)
return
self.notify.debug('toonRequestRun(%d)' % toonId)
if not self.isRunable():
self.notify.warning('toonRequestRun() - not runable')
return
updateAttacks = 0
if self.activeToons.count(toonId) == 0:
self.notify.warning('toon tried to run, but not found in activeToons: %d' % toonId)
return
for toon in self.activeToons:
if toon in self.toonAttacks:
ta = self.toonAttacks[toon]
track = ta[TOON_TRACK_COL]
level = ta[TOON_LVL_COL]
if ta[TOON_TGT_COL] == toonId or track == HEAL and attackAffectsGroup(track, level) and len(self.activeToons) <= 2:
healerId = ta[TOON_ID_COL]
self.notify.debug('resetting toon: %ds attack' % healerId)
self.toonAttacks[toon] = getToonAttack(toon, track=UN_ATTACK)
self.responses[healerId] = 0
updateAttacks = 1
self.__makeToonRun(toonId, updateAttacks)
self.d_setMembers()
self.needAdjust = 1
self.__requestAdjust()
def toonRequestJoin(self, x, y, z):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonRequestJoin(%d)' % toonId)
self.signupToon(toonId, x, y, z)
def toonDied(self):
toonId = self.air.getAvatarIdFromSender()
self.notify.debug('toonDied(%d)' % toonId)
if toonId in self.toons:
toon = self.getToon(toonId)
if toon:
toon.hp = -1
toon.inventory.zeroInv(1)
self.__handleSuddenExit(toonId, 0)
def signupToon(self, toonId, x, y, z):
if self.toons.count(toonId):
return
if self.toonCanJoin():
if self.addToon(toonId):
self.__joinToon(toonId, Point3(x, y, z))
self.d_setMembers()
else:
self.notify.warning('toonRequestJoin() - not joinable')
self.d_denyLocalToonJoin(toonId)
def d_denyLocalToonJoin(self, toonId):
self.notify.debug('network: denyLocalToonJoin(%d)' % toonId)
self.sendUpdateToAvatarId(toonId, 'denyLocalToonJoin', [])
def resetResponses(self):
self.responses = {}
for t in self.toons:
self.responses[t] = 0
self.ignoreResponses = 0
def allToonsResponded(self):
for t in self.toons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allPendingActiveToonsResponded(self):
for t in self.pendingToons + self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __allActiveToonsResponded(self):
for t in self.activeToons:
if self.responses[t] == 0:
return 0
self.ignoreResponses = 1
return 1
def __removeResponse(self, toonId):
del self.responses[toonId]
if self.ignoreResponses == 0 and len(self.toons) > 0:
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - dont wait for movie')
self.__requestMovie()
elif currStateName == 'PlayMovie':
if self.__allPendingActiveToonsResponded():
self.notify.debug('removeResponse() - surprise movie done')
self.__movieDone()
elif currStateName == 'Reward' or currStateName == 'BuildingReward':
if self.__allActiveToonsResponded():
self.notify.debug('removeResponse() - surprise reward done')
self.handleRewardDone()
def __resetAdjustingResponses(self):
self.adjustingResponses = {}
for t in self.toons:
self.adjustingResponses[t] = 0
self.ignoreAdjustingResponses = 0
def __allAdjustingToonsResponded(self):
for t in self.toons:
if self.adjustingResponses[t] == 0:
return 0
self.ignoreAdjustingResponses = 1
return 1
def __removeAdjustingResponse(self, toonId):
if toonId in self.adjustingResponses:
del self.adjustingResponses[toonId]
if self.ignoreAdjustingResponses == 0 and len(self.toons) > 0:
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def __addJoinResponse(self, avId, taskName, toon = 0):
if toon == 1:
for jr in self.joinResponses.values():
jr[avId] = 0
self.joinResponses[avId] = {}
for t in self.toons:
self.joinResponses[avId][t] = 0
self.joinResponses[avId]['taskName'] = taskName
def __removeJoinResponses(self, avId):
self.__removeJoinResponse(avId)
removedOne = 0
for j in self.joinResponses.values():
if avId in j:
del j[avId]
removedOne = 1
if removedOne == 1:
for t in self.joiningToons:
if self.__allToonsRespondedJoin(t):
self.__makeAvPending(t)
def __removeJoinResponse(self, avId):
if avId in self.joinResponses:
taskMgr.remove(self.joinResponses[avId]['taskName'])
del self.joinResponses[avId]
def __allToonsRespondedJoin(self, avId):
jr = self.joinResponses[avId]
for t in self.toons:
if jr[t] == 0:
return 0
return 1
def __cleanupJoinResponses(self):
for jr in self.joinResponses.values():
taskMgr.remove(jr['taskName'])
del jr
def adjustDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreAdjustingResponses == 1:
self.notify.debug('adjustDone() - ignoring toon: %d' % toonId)
return
elif self.adjustFsm.getCurrentState().getName() != 'Adjusting':
self.notify.warning('adjustDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('adjustDone() - toon: %d not in toon list' % toonId)
return
self.adjustingResponses[toonId] += 1
self.notify.debug('toon: %d done adjusting' % toonId)
if self.__allAdjustingToonsResponded():
self.__adjustDone()
def timeout(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('timeout() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('timeout() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('timeout() - toon: %d not in toon list' % toonId)
return
self.toonAttacks[toonId] = getToonAttack(toonId)
self.d_setChosenToonAttacks()
self.responses[toonId] += 1
self.notify.debug('toon: %d timed out' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie(timeout=1)
def movieDone(self):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('movieDone() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'PlayMovie':
self.notify.warning('movieDone() - in state %s' % self.fsm.getCurrentState().getName())
return
elif self.toons.count(toonId) == 0:
self.notify.warning('movieDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with movie' % toonId)
if self.__allPendingActiveToonsResponded():
self.__movieDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.__serverMovieDone)
def rewardDone(self):
toonId = self.air.getAvatarIdFromSender()
stateName = self.fsm.getCurrentState().getName()
if self.ignoreResponses == 1:
self.notify.debug('rewardDone() - ignoring toon: %d' % toonId)
return
elif stateName not in ('Reward', 'BuildingReward', 'FactoryReward', 'MintReward', 'StageReward', 'CountryClubReward'):
self.notify.warning('rewardDone() - in state %s' % stateName)
return
elif self.toons.count(toonId) == 0:
self.notify.warning('rewardDone() - toon: %d not in toon list' % toonId)
return
self.responses[toonId] += 1
self.notify.debug('toon: %d done with reward' % toonId)
if self.__allActiveToonsResponded():
self.handleRewardDone()
else:
self.timer.stop()
self.timer.startCallback(TIMEOUT_PER_USER, self.serverRewardDone)
def assignRewards(self):
if self.rewardHasPlayed == 1:
self.notify.debug('handleRewardDone() - reward has already played')
return
self.rewardHasPlayed = 1
BattleExperienceAI.assignRewards(self.activeToons, self.battleCalc.toonSkillPtsGained, self.suitsKilled, self.getTaskZoneId(), self.helpfulToons)
def joinDone(self, avId):
toonId = self.air.getAvatarIdFromSender()
if self.toons.count(toonId) == 0:
self.notify.warning('joinDone() - toon: %d not in toon list' % toonId)
return
if avId not in self.joinResponses:
self.notify.debug('joinDone() - no entry for: %d - ignoring: %d' % (avId, toonId))
return
jr = self.joinResponses[avId]
if toonId in jr:
jr[toonId] += 1
self.notify.debug('client with localToon: %d done joining av: %d' % (toonId, avId))
if self.__allToonsRespondedJoin(avId):
self.__makeAvPending(avId)
def requestAttack(self, track, level, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestAttack() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestAttack() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestAttack() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestAttack(%d, %d, %d, %d)' % (toonId,
track,
level,
av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestAttack() - no toon: %d' % toonId)
return
validResponse = 1
if track == SOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('friendSOS', toonId, '%s' % av)
self.toonAttacks[toonId] = getToonAttack(toonId, track=SOS, target=av)
elif track == NPCSOS:
self.notify.debug('toon: %d calls for help' % toonId)
self.air.writeServerEvent('NPCSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if av in toon.NPCFriendsDict:
npcCollision = 0
if av in self.npcAttacks:
callingToon = self.npcAttacks[av]
if self.activeToons.count(callingToon) == 1:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
npcCollision = 1
if npcCollision == 0:
self.toonAttacks[toonId] = getToonAttack(toonId, track=NPCSOS, level=5, target=av)
self.numNPCAttacks += 1
self.npcAttacks[av] = toonId
elif track == PETSOS:
self.notify.debug('toon: %d calls for pet: %d' % (toonId, av))
self.air.writeServerEvent('PETSOS', toonId, '%s' % av)
toon = self.getToon(toonId)
if toon == None:
return
if not self.validate(toonId, level in toon.petTrickPhrases, 'requestAttack: invalid pet trickId: %s' % level):
return
self.toonAttacks[toonId] = getToonAttack(toonId, track=PETSOS, level=level, target=av)
elif track == UN_ATTACK:
self.notify.debug('toon: %d changed its mind' % toonId)
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
if toonId in self.responses:
self.responses[toonId] = 0
validResponse = 0
elif track == PASS:
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
elif track == FIRE:
if simbase.air.doId2do[toonId].getPinkSlips() < self.getFireCount() + 1:
#Not allowed to fire, force them to pass >:D
self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS)
else:
#Allowed to fire
self.setFireCount(self.fireCount + 1)
self.toonAttacks[toonId] = getToonAttack(toonId, track=FIRE, target=av)
else:
if not self.validate(toonId, track >= 0 and track <= MAX_TRACK_INDEX, 'requestAttack: invalid track %s' % track):
return
if not self.validate(toonId, level >= 0 and level <= MAX_LEVEL_INDEX, 'requestAttack: invalid level %s' % level):
return
if toon.inventory.numItem(track, level) == 0:
self.notify.warning('requestAttack() - toon has no item track: %d level: %d' % (track, level))
self.toonAttacks[toonId] = getToonAttack(toonId)
return
if track == HEAL:
if self.runningToons.count(av) == 1 or attackAffectsGroup(track, level) and len(self.activeToons) < 2:
self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK)
validResponse = 0
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
else:
self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av)
if av == -1 and not attackAffectsGroup(track, level):
validResponse = 0
self.d_setChosenToonAttacks()
if validResponse == 1:
self.responses[toonId] += 1
self.notify.debug('toon: %d chose an attack' % toonId)
if self.__allActiveToonsResponded():
self.__requestMovie()
def requestPetProxy(self, av):
toonId = self.air.getAvatarIdFromSender()
if self.ignoreResponses == 1:
self.notify.debug('requestPetProxy() - ignoring toon: %d' % toonId)
return
elif self.fsm.getCurrentState().getName() != 'WaitForInput':
self.notify.warning('requestPetProxy() - in state: %s' % self.fsm.getCurrentState().getName())
return
elif self.activeToons.count(toonId) == 0:
self.notify.warning('requestPetProxy() - toon: %d not in toon list' % toonId)
return
self.notify.debug('requestPetProxy(%s, %s)' % (toonId, av))
toon = self.getToon(toonId)
if toon == None:
self.notify.warning('requestPetProxy() - no toon: %d' % toonId)
return
petId = toon.getPetId()
zoneId = self.zoneId
if petId == av:
if not toonId in self.pets:
def handleGetPetProxy(success, pet, petId = petId, zoneId = zoneId, toonId = toonId):
if success:
petProxy = DistributedPetProxyAI.DistributedPetProxyAI(self.air)
petProxy.setOwnerId(pet.getOwnerId())
petProxy.setPetName(pet.getPetName())
petProxy.setTraitSeed(pet.getTraitSeed())
petProxy.setSafeZone(pet.getSafeZone())
petProxy.setForgetfulness(pet.getForgetfulness())
petProxy.setBoredomThreshold(pet.getBoredomThreshold())
petProxy.setRestlessnessThreshold(pet.getRestlessnessThreshold())
petProxy.setPlayfulnessThreshold(pet.getPlayfulnessThreshold())
petProxy.setLonelinessThreshold(pet.getLonelinessThreshold())
petProxy.setSadnessThreshold(pet.getSadnessThreshold())
petProxy.setFatigueThreshold(pet.getFatigueThreshold())
petProxy.setHungerThreshold(pet.getHungerThreshold())
petProxy.setConfusionThreshold(pet.getConfusionThreshold())
petProxy.setExcitementThreshold(pet.getExcitementThreshold())
petProxy.setAngerThreshold(pet.getAngerThreshold())
petProxy.setSurpriseThreshold(pet.getSurpriseThreshold())
petProxy.setAffectionThreshold(pet.getAffectionThreshold())
petProxy.setHead(pet.getHead())
petProxy.setEars(pet.getEars())
petProxy.setNose(pet.getNose())
petProxy.setTail(pet.getTail())
petProxy.setBodyTexture(pet.getBodyTexture())
petProxy.setColor(pet.getColor())
petProxy.setColorScale(pet.getColorScale())
petProxy.setEyeColor(pet.getEyeColor())
petProxy.setGender(pet.getGender())
petProxy.setLastSeenTimestamp(pet.getLastSeenTimestamp())
petProxy.setBoredom(pet.getBoredom())
petProxy.setRestlessness(pet.getRestlessness())
petProxy.setPlayfulness(pet.getPlayfulness())
petProxy.setLoneliness(pet.getLoneliness())
petProxy.setSadness(pet.getSadness())
petProxy.setAffection(pet.getAffection())
petProxy.setHunger(pet.getHunger())
petProxy.setConfusion(pet.getConfusion())
petProxy.setExcitement(pet.getExcitement())
petProxy.setFatigue(pet.getFatigue())
petProxy.setAnger(pet.getAnger())
petProxy.setSurprise(pet.getSurprise())
petProxy.setTrickAptitudes(pet.getTrickAptitudes())
pet.requestDelete()
def deleted(task):
petProxy.doNotDeallocateChannel = True
petProxy.generateWithRequiredAndId(petId, self.air.districtId, self.zoneId)
petProxy.broadcastDominantMood()
self.pets[toonId] = petProxy
return task.done
self.acceptOnce(self.air.getAvatarExitEvent(petId),
lambda: taskMgr.doMethodLater(0,
deleted, self.uniqueName('petdel-%d' % petId)))
else:
self.notify.warning('error generating petProxy: %s' % petId)
self.getPetProxyObject(petId, handleGetPetProxy)
def suitCanJoin(self):
return len(self.suits) < self.maxSuits and self.isJoinable()
def toonCanJoin(self):
return len(self.toons) < 4 and self.isJoinable()
def __requestMovie(self, timeout = 0):
if self.adjustFsm.getCurrentState().getName() == 'Adjusting':
self.notify.debug('__requestMovie() - in Adjusting')
self.movieRequested = 1
else:
movieDelay = 0
if len(self.activeToons) == 0:
self.notify.warning('only pending toons left in battle %s, toons = %s' % (self.doId, self.toons))
elif len(self.activeSuits) == 0:
self.notify.warning('only pending suits left in battle %s, suits = %s' % (self.doId, self.suits))
elif len(self.activeToons) > 1 and not timeout:
movieDelay = 1
self.fsm.request('MakeMovie')
if movieDelay:
taskMgr.doMethodLater(0.8, self.__makeMovie, self.uniqueName('make-movie'))
self.taskNames.append(self.uniqueName('make-movie'))
else:
self.__makeMovie()
def __makeMovie(self, task = None):
self.notify.debug('makeMovie()')
if self._DOAI_requestedDelete:
self.notify.warning('battle %s requested delete, then __makeMovie was called!' % self.doId)
if hasattr(self, 'levelDoId'):
self.notify.warning('battle %s in level %s' % (self.doId, self.levelDoId))
return
self.__removeTaskName(self.uniqueName('make-movie'))
if self.movieHasBeenMade == 1:
self.notify.debug('__makeMovie() - movie has already been made')
return
self.movieRequested = 0
self.movieHasBeenMade = 1
self.movieHasPlayed = 0
self.rewardHasPlayed = 0
for t in self.activeToons:
if t not in self.toonAttacks:
self.toonAttacks[t] = getToonAttack(t)
attack = self.toonAttacks[t]
if attack[TOON_TRACK_COL] == PASS or attack[TOON_TRACK_COL] == UN_ATTACK:
self.toonAttacks[t] = getToonAttack(t)
if self.toonAttacks[t][TOON_TRACK_COL] != NO_ATTACK:
self.addHelpfulToon(t)
self.battleCalc.calculateRound()
for t in self.activeToons:
self.sendEarnedExperience(t)
toon = self.getToon(t)
if toon != None:
toon.hpOwnedByBattle = 1
if toon.immortalMode:
toon.toonUp(toon.maxHp)
self.d_setMovie()
self.b_setState('PlayMovie')
return Task.done
def sendEarnedExperience(self, toonId):
toon = self.getToon(toonId)
if toon != None:
expList = self.battleCalc.toonSkillPtsGained.get(toonId, None)
if expList == None:
toon.d_setEarnedExperience([])
else:
roundList = []
for exp in expList:
roundList.append(int(exp + 0.5))
toon.d_setEarnedExperience(roundList)
def enterOff(self):
return
def exitOff(self):
return
def enterFaceOff(self):
return
def exitFaceOff(self):
return
def enterWaitForJoin(self):
self.notify.debug('enterWaitForJoin()')
if len(self.activeSuits) > 0:
self.b_setState('WaitForInput')
else:
self.notify.debug('enterWaitForJoin() - no active suits')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
def exitWaitForJoin(self):
pass
def enterWaitForInput(self):
self.notify.debug('enterWaitForInput()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Runable')
self.resetResponses()
self.__requestAdjust()
if not self.tutorialFlag:
self.timer.startCallback(SERVER_INPUT_TIMEOUT, self.__serverTimedOut)
self.npcAttacks = {}
for toonId in self.toons:
if bboard.get('autoRestock-%s' % toonId, False):
toon = self.air.doId2do.get(toonId)
if toon is not None:
toon.doRestock(0)
def exitWaitForInput(self):
self.npcAttacks = {}
self.timer.stop()
def __serverTimedOut(self):
self.notify.debug('wait for input timed out on server')
self.ignoreResponses = 1
self.__requestMovie(timeout=1)
def enterMakeMovie(self):
self.notify.debug('enterMakeMovie()')
self.runableFsm.request('Unrunable')
self.resetResponses()
def exitMakeMovie(self):
pass
def enterPlayMovie(self):
self.notify.debug('enterPlayMovie()')
self.joinableFsm.request('Joinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
movieTime = TOON_ATTACK_TIME * (len(self.activeToons) + self.numNPCAttacks) + SUIT_ATTACK_TIME * len(self.activeSuits) + SERVER_BUFFER_TIME
self.numNPCAttacks = 0
self.notify.debug('estimated upper bound of movie time: %f' % movieTime)
self.timer.startCallback(movieTime, self.__serverMovieDone)
def __serverMovieDone(self):
self.notify.debug('movie timed out on server')
self.ignoreResponses = 1
self.__movieDone()
def serverRewardDone(self):
self.notify.debug('reward timed out on server')
self.ignoreResponses = 1
self.handleRewardDone()
def handleRewardDone(self):
self.b_setState('Resume')
def exitPlayMovie(self):
self.timer.stop()
def __movieDone(self):
self.notify.debug('__movieDone() - movie is finished')
if self.movieHasPlayed == 1:
self.notify.debug('__movieDone() - movie had already finished')
return
self.movieHasBeenMade = 0
self.movieHasPlayed = 1
self.ignoreResponses = 1
needUpdate = 0
toonHpDict = {}
for toon in self.activeToons:
toonHpDict[toon] = [0, 0, 0]
actualToon = self.getToon(toon)
self.notify.debug('BEFORE ROUND: toon: %d hp: %d' % (toon, actualToon.hp))
deadSuits = []
trapDict = {}
suitsLuredOntoTraps = []
npcTrapAttacks = []
for activeToon in self.activeToons + self.exitedToons:
if activeToon in self.toonAttacks:
attack = self.toonAttacks[activeToon]
track = attack[TOON_TRACK_COL]
npc_level = None
if track == NPCSOS:
track, npc_level, npc_hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
if track == None:
track = NPCSOS
elif track == TRAP:
npcTrapAttacks.append(attack)
toon = self.getToon(attack[TOON_ID_COL])
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
continue
if track != NO_ATTACK:
toonId = attack[TOON_ID_COL]
level = attack[TOON_LVL_COL]
if npc_level != None:
level = npc_level
if attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(toonId)
av = attack[TOON_TGT_COL]
if toon != None and av in toon.NPCFriendsDict:
toon.NPCFriendsDict[av] -= 1
if toon.NPCFriendsDict[av] <= 0:
del toon.NPCFriendsDict[av]
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
elif track == PETSOS:
pass
elif track == FIRE:
pass
elif track != SOS:
toon = self.getToon(toonId)
if toon != None:
check = toon.inventory.useItem(track, level)
if check == -1:
self.air.writeServerEvent('suspicious', toonId, 'Toon generating movie for non-existant gag track %s level %s' % (track, level))
self.notify.warning('generating movie for non-existant gag track %s level %s! avId: %s' % (track, level, toonId))
toon.d_setInventory(toon.inventory.makeNetString())
hps = attack[TOON_HP_COL]
if track == SOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == NPCSOS:
self.notify.debug('toon: %d called for help' % toonId)
elif track == PETSOS:
self.notify.debug('toon: %d called for pet' % toonId)
for i in range(len(self.activeToons)):
toon = self.getToon(self.activeToons[i])
if toon != None:
if i < len(hps):
hp = hps[i]
if hp > 0:
toonHpDict[toon.doId][0] += hp
self.notify.debug('pet heal: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
elif track == NPC_RESTOCK_GAGS:
for at in self.activeToons:
toon = self.getToon(at)
if toon != None:
toon.inventory.NPCMaxOutInv(npc_level)
toon.d_setInventory(toon.inventory.makeNetString())
elif track == HEAL:
if levelAffectsGroup(HEAL, level):
for i in range(len(self.activeToons)):
at = self.activeToons[i]
if at != toonId or attack[TOON_TRACK_COL] == NPCSOS:
toon = self.getToon(at)
if toon != None:
if i < len(hps):
hp = hps[i]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
self.notify.debug('HEAL: toon: %d healed for hp: %d' % (toon.doId, hp))
else:
targetId = attack[TOON_TGT_COL]
toon = self.getToon(targetId)
if toon != None and targetId in self.activeToons:
targetIndex = self.activeToons.index(targetId)
if targetIndex < len(hps):
hp = hps[targetIndex]
else:
self.notify.warning('Invalid targetIndex %s in hps %s.' % (targetIndex, hps))
hp = 0
toonHpDict[toon.doId][0] += hp
elif attackAffectsGroup(track, level, attack[TOON_TRACK_COL]):
for suit in self.activeSuits:
targetIndex = self.activeSuits.index(suit)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if hp > 0 and track == LURE:
if suit.battleTrap == UBER_GAG_LEVEL_INDEX:
pass
suit.battleTrap = NO_TRAP
needUpdate = 1
if suit.doId in trapDict:
del trapDict[suit.doId]
if suitsLuredOntoTraps.count(suit) == 0:
suitsLuredOntoTraps.append(suit)
if track == TRAP:
targetId = suit.doId
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
needUpdate = 1
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(suit) == 0:
deadSuits.append(suit)
else:
targetId = attack[TOON_TGT_COL]
target = self.findSuit(targetId)
if target != None:
targetIndex = self.activeSuits.index(target)
if targetIndex < 0 or targetIndex >= len(hps):
self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track,
level,
targetIndex,
len(hps),
hps))
else:
hp = hps[targetIndex]
if track == TRAP:
if targetId in trapDict:
trapDict[targetId].append(attack)
else:
trapDict[targetId] = [attack]
if hp > 0 and track == LURE:
oldBattleTrap = target.battleTrap
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
pass
target.battleTrap = NO_TRAP
needUpdate = 1
if target.doId in trapDict:
del trapDict[target.doId]
if suitsLuredOntoTraps.count(target) == 0:
suitsLuredOntoTraps.append(target)
if oldBattleTrap == UBER_GAG_LEVEL_INDEX:
for otherSuit in self.activeSuits:
if not otherSuit == target:
otherSuit.battleTrap = NO_TRAP
if otherSuit.doId in trapDict:
del trapDict[otherSuit.doId]
died = attack[SUIT_DIED_COL] & 1 << targetIndex
if died != 0:
if deadSuits.count(target) == 0:
deadSuits.append(target)
self.exitedToons = []
for suitKey in trapDict.keys():
attackList = trapDict[suitKey]
attack = attackList[0]
target = self.findSuit(attack[TOON_TGT_COL])
if attack[TOON_LVL_COL] == UBER_GAG_LEVEL_INDEX:
targetId = suitKey
target = self.findSuit(targetId)
if len(attackList) == 1:
if suitsLuredOntoTraps.count(target) == 0:
self.notify.debug('movieDone() - trap set')
target.battleTrap = attack[TOON_LVL_COL]
needUpdate = 1
else:
target.battleTrap = NO_TRAP
else:
self.notify.debug('movieDone() - traps collided')
if target != None:
target.battleTrap = NO_TRAP
if self.battleCalc.trainTrapTriggered:
self.notify.debug('Train trap triggered, clearing all traps')
for otherSuit in self.activeSuits:
self.notify.debug('suit =%d, oldBattleTrap=%d' % (otherSuit.doId, otherSuit.battleTrap))
otherSuit.battleTrap = NO_TRAP
currLuredSuits = self.battleCalc.getLuredSuits()
if len(self.luredSuits) == len(currLuredSuits):
for suit in self.luredSuits:
if currLuredSuits.count(suit.doId) == 0:
needUpdate = 1
break
else:
needUpdate = 1
self.luredSuits = []
for i in currLuredSuits:
suit = self.air.doId2do[i]
self.luredSuits.append(suit)
self.notify.debug('movieDone() - suit: %d is lured' % i)
for attack in npcTrapAttacks:
track, level, hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL])
for suit in self.activeSuits:
if self.luredSuits.count(suit) == 0 and suit.battleTrap == NO_TRAP:
suit.battleTrap = level
needUpdate = 1
for suit in deadSuits:
self.notify.debug('removing dead suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.debug('whoops, suit %d is deleted.' % suit.doId)
else:
self.notify.debug('suit had revives? %d' % suit.getMaxSkeleRevives())
encounter = {'type': suit.dna.name,
'level': suit.getActualLevel(),
'track': suit.dna.dept,
'isSkelecog': suit.getSkelecog(),
'isForeman': suit.isForeman(),
'isVP': 0,
'isCFO': 0,
'isSupervisor': suit.isSupervisor(),
'isVirtual': suit.isVirtual(),
'hasRevives': suit.getMaxSkeleRevives(),
'activeToons': self.activeToons[:]}
self.suitsKilled.append(encounter)
self.suitsKilledThisBattle.append(encounter)
self.air.suitInvasionManager.handleSuitDefeated()
self.__removeSuit(suit)
needUpdate = 1
suit.resume()
lastActiveSuitDied = 0
if len(self.activeSuits) == 0 and len(self.pendingSuits) == 0:
lastActiveSuitDied = 1
for i in range(4):
attack = self.suitAttacks[i][SUIT_ATK_COL]
if attack != NO_ATTACK:
suitId = self.suitAttacks[i][SUIT_ID_COL]
suit = self.findSuit(suitId)
if suit == None:
self.notify.warning('movieDone() - suit: %d is gone!' % suitId)
continue
if not (hasattr(suit, 'dna') and suit.dna):
toonId = self.air.getAvatarIdFromSender()
self.notify.warning('_movieDone avoiding crash, sender=%s but suit has no dna' % toonId)
self.air.writeServerEvent('suspicious', toonId, '_movieDone avoiding crash, suit has no dna')
continue
adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attack)
hps = self.suitAttacks[i][SUIT_HP_COL]
if adict['group'] == ATK_TGT_GROUP:
for activeToon in self.activeToons:
toon = self.getToon(activeToon)
if toon != None:
targetIndex = self.activeToons.index(activeToon)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % activeToon)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (activeToon, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
elif adict['group'] == ATK_TGT_SINGLE:
targetIndex = self.suitAttacks[i][SUIT_TGT_COL]
if targetIndex >= len(self.activeToons):
self.notify.warning('movieDone() - toon: %d gone!' % targetIndex)
break
toonId = self.activeToons[targetIndex]
toon = self.getToon(toonId)
toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex
if targetIndex >= len(hps):
self.notify.warning('DAMAGE: toon %s is no longer in battle!' % toonId)
else:
hp = hps[targetIndex]
if hp > 0:
self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (toonId, hp))
if toonDied != 0:
toonHpDict[toon.doId][2] = 1
toonHpDict[toon.doId][1] += hp
deadToons = []
for activeToon in self.activeToons:
hp = toonHpDict[activeToon]
toon = self.getToon(activeToon)
if toon != None:
self.notify.debug('AFTER ROUND: currtoonHP: %d toonMAX: %d hheal: %d damage: %d' % (toon.hp,
toon.maxHp,
hp[0],
hp[1]))
toon.hpOwnedByBattle = 0
hpDelta = hp[0] - hp[1]
if hpDelta >= 0:
toon.toonUp(hpDelta, quietly=1)
else:
toon.takeDamage(-hpDelta, quietly=1)
if toon.hp <= 0:
self.notify.debug('movieDone() - toon: %d was killed' % activeToon)
toon.inventory.zeroInv(1)
deadToons.append(activeToon)
self.notify.debug('AFTER ROUND: toon: %d setHp: %d' % (toon.doId, toon.hp))
if toon.unlimitedGags:
toon.doRestock(noUber=0, noPaid=0)
for deadToon in deadToons:
self.__removeToon(deadToon)
needUpdate = 1
self.clearAttacks()
self.d_setMovie()
self.d_setChosenToonAttacks()
self.localMovieDone(needUpdate, deadToons, deadSuits, lastActiveSuitDied)
def enterResume(self):
for suit in self.suits:
self.notify.info('battle done, resuming suit: %d' % suit.doId)
if suit.isDeleted():
self.notify.info('whoops, suit %d is deleted.' % suit.doId)
else:
suit.resume()
self.suits = []
self.joiningSuits = []
self.pendingSuits = []
self.adjustingSuits = []
self.activeSuits = []
self.luredSuits = []
for toonId in self.toons:
toon = simbase.air.doId2do.get(toonId)
if toon:
toon.b_setBattleId(0)
messageToonReleased = 'Battle releasing toon %s' % toon.doId
messenger.send(messageToonReleased, [toon.doId])
for exitEvent in self.avatarExitEvents:
self.ignore(exitEvent)
eventMsg = {}
for encounter in self.suitsKilledThisBattle:
cog = encounter['type']
level = encounter['level']
msgName = '%s%s' % (cog, level)
if encounter['isSkelecog']:
msgName += '+'
if msgName in eventMsg:
eventMsg[msgName] += 1
else:
eventMsg[msgName] = 1
msgText = ''
for msgName, count in eventMsg.items():
if msgText != '':
msgText += ','
msgText += '%s%s' % (count, msgName)
self.air.writeServerEvent('battleCogsDefeated', self.doId, '%s|%s' % (msgText, self.getTaskZoneId()))
def exitResume(self):
pass
def isJoinable(self):
return self.joinableFsm.getCurrentState().getName() == 'Joinable'
def enterJoinable(self):
self.notify.debug('enterJoinable()')
def exitJoinable(self):
pass
def enterUnjoinable(self):
self.notify.debug('enterUnjoinable()')
def exitUnjoinable(self):
pass
def isRunable(self):
return self.runableFsm.getCurrentState().getName() == 'Runable'
def enterRunable(self):
self.notify.debug('enterRunable()')
def exitRunable(self):
pass
def enterUnrunable(self):
self.notify.debug('enterUnrunable()')
def exitUnrunable(self):
pass
def __estimateAdjustTime(self):
self.needAdjust = 0
adjustTime = 0
if len(self.pendingSuits) > 0 or self.suitGone == 1:
self.suitGone = 0
pos0 = self.suitPendingPoints[0][0]
pos1 = self.suitPoints[0][0][0]
adjustTime = self.calcSuitMoveTime(pos0, pos1)
if len(self.pendingToons) > 0 or self.toonGone == 1:
self.toonGone = 0
if adjustTime == 0:
pos0 = self.toonPendingPoints[0][0]
pos1 = self.toonPoints[0][0][0]
adjustTime = self.calcToonMoveTime(pos0, pos1)
return adjustTime
def enterAdjusting(self):
self.notify.debug('enterAdjusting()')
self.timer.stop()
self.__resetAdjustingResponses()
self.adjustingTimer.startCallback(self.__estimateAdjustTime() + SERVER_BUFFER_TIME, self.__serverAdjustingDone)
def __serverAdjustingDone(self):
if self.needAdjust == 1:
self.adjustFsm.request('NotAdjusting')
self.__requestAdjust()
else:
self.notify.debug('adjusting timed out on the server')
self.ignoreAdjustingResponses = 1
self.__adjustDone()
def exitAdjusting(self):
currStateName = self.fsm.getCurrentState().getName()
if currStateName == 'WaitForInput':
self.timer.restart()
elif currStateName == 'WaitForJoin':
self.b_setState('WaitForInput')
self.adjustingTimer.stop()
def __addTrainTrapForNewSuits(self):
hasTrainTrap = False
trapInfo = None
for otherSuit in self.activeSuits:
if otherSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
hasTrainTrap = True
if hasTrainTrap:
for curSuit in self.activeSuits:
if not curSuit.battleTrap == UBER_GAG_LEVEL_INDEX:
oldBattleTrap = curSuit.battleTrap
curSuit.battleTrap = UBER_GAG_LEVEL_INDEX
self.battleCalc.addTrainTrapForJoiningSuit(curSuit.doId)
self.notify.debug('setting traintrack trap for joining suit %d oldTrap=%s' % (curSuit.doId, oldBattleTrap))
def __adjustDone(self):
for s in self.adjustingSuits:
self.pendingSuits.remove(s)
self.activeSuits.append(s)
self.adjustingSuits = []
for toon in self.adjustingToons:
if self.pendingToons.count(toon) == 1:
self.pendingToons.remove(toon)
else:
self.notify.warning('adjustDone() - toon: %d not pending!' % toon.doId)
if self.activeToons.count(toon) == 0:
self.activeToons.append(toon)
self.ignoreResponses = 0
self.sendEarnedExperience(toon)
else:
self.notify.warning('adjustDone() - toon: %d already active!' % toon.doId)
self.adjustingToons = []
self.__addTrainTrapForNewSuits()
self.d_setMembers()
self.adjustFsm.request('NotAdjusting')
if self.needAdjust == 1:
self.notify.debug('__adjustDone() - need to adjust again')
self.__requestAdjust()
def enterNotAdjusting(self):
self.notify.debug('enterNotAdjusting()')
if self.movieRequested == 1:
if len(self.activeToons) > 0 and self.__allActiveToonsResponded():
self.__requestMovie()
def exitNotAdjusting(self):
pass
def getPetProxyObject(self, petId, callback):
doneEvent = 'generate-%d' % petId
def handlePetProxyRead(pet):
callback(1, pet)
self.air.sendActivate(petId, self.air.districtId, 0)
self.acceptOnce(doneEvent, handlePetProxyRead)
def _getNextSerialNum(self):
num = self.serialNum
self.serialNum += 1
return num
def setFireCount(self, amount):
self.fireCount = amount
def getFireCount(self):
return self.fireCount
@magicWord(category=CATEGORY_PROGRAMMER)
def skipMovie():
invoker = spellbook.getInvoker()
battleId = invoker.getBattleId()
if not battleId:
return 'You are not currently in a battle!'
battle = simbase.air.doId2do.get(battleId)
battle._DistributedBattleBaseAI__movieDone()
return 'Battle movie skipped.'
| en | 0.592317 | #TODO: fixme #disconnectCode = self.air.getAvatarDisconnectReason(avId) #userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow #TODO: fixme #Not allowed to fire, force them to pass >:D #Allowed to fire | 1.766072 | 2 |
tracking_test.py | HsunGong/Augmented-Advertisement | 5 | 9374 | # Copyright (c) Group Three-Forest SJTU. All Rights Reserved.
from tracking.tracking import *
# a = tracking_video_rectangle("video/","1.mp4",[[273,352],[266,616],[412,620],[416,369]])
a = tracking_video_rectangle_tovideo("video/","1.mp4", "1.png", [[273,352],[266,616],[412,620],[416,369]], result = 'result__.avi', method_num = 5, edge = 4, middle_halt = 250)
| # Copyright (c) Group Three-Forest SJTU. All Rights Reserved.
from tracking.tracking import *
# a = tracking_video_rectangle("video/","1.mp4",[[273,352],[266,616],[412,620],[416,369]])
a = tracking_video_rectangle_tovideo("video/","1.mp4", "1.png", [[273,352],[266,616],[412,620],[416,369]], result = 'result__.avi', method_num = 5, edge = 4, middle_halt = 250)
| en | 0.701191 | # Copyright (c) Group Three-Forest SJTU. All Rights Reserved. # a = tracking_video_rectangle("video/","1.mp4",[[273,352],[266,616],[412,620],[416,369]]) | 1.590424 | 2 |
gym_flock/envs/old/flocking_position.py | katetolstaya/gym-flock | 19 | 9375 | import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
from scipy.spatial.distance import pdist, squareform
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.fig = None
self.line1 = None
self.filter_len = int(config['filter_length'])
self.nx_system = 4
self.n_nodes = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max # 0.5 * self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
self.pooling = []
if config.getboolean('sum_pooling'):
self.pooling.append(np.nansum)
if config.getboolean('min_pooling'):
self.pooling.append(np.nanmin)
if config.getboolean('max_pooling'):
self.pooling.append(np.nanmax)
self.n_pools = len(self.pooling)
# number of features and outputs
self.n_features = int(config['N_features'])
self.nx = int(self.n_features / self.n_pools / self.filter_len)
self.nu = int(config['N_outputs']) # outputs
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x = np.zeros((self.n_nodes, self.nx_system))
self.u = np.zeros((self.n_nodes, self.nu))
self.mean_vel = np.zeros((self.n_nodes, self.nu))
# TODO
self.max_accel = 40
self.max_z = 200
# self.b = np.ones((self.n_nodes,1))
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 )
# self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(
# self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32)
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2,) , dtype=np.float32 )
self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(self.n_features, ), dtype=np.float32)
self.seed()
def render(self, mode='human'):
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
x = self.x
x_ = np.zeros((self.n_nodes, self.nx_system))
#u = np.vstack((np.zeros((self.n_leaders, 2)), u))
# x position
x_[:, 0] = x[:, 0] + x[:, 2] * self.dt
# y position
x_[:, 1] = x[:, 1] + x[:, 3] * self.dt
# x velocity
x_[:, 2] = x[:, 2] + 0.1 * u[:, 0] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# y velocity
x_[:, 3] = x[:, 3] + 0.1 * u[:, 1] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# TODO - check the 0.1
self.x = x_
self.x_agg = self.aggregate(self.x, self.x_agg)
self.u = u
return self._get_obs(), -self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
return np.sum(np.var(self.x[:, 2:4], axis=0)) #+ np.sum(np.square(self.u)) * 0.00001
#return np.sum(np.square(self.x[:,2:4] - self.mean_vel))
def _get_obs(self):
reshaped = self.x_agg.reshape((self.n_nodes, self.n_features))
clipped = np.clip(reshaped, a_min=-self.max_z, a_max=self.max_z)
return clipped #[self.n_leaders:, :]
def reset(self):
x = np.zeros((self.n_nodes, self.nx_system))
degree = 0
min_dist = 0
while degree < 2 or min_dist < 0.1: # < 0.25: # 0.25: #0.5: #min_dist < 0.25:
# randomly initialize the state of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_nodes,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_nodes,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[1]
# compute distances between agents
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
# no self loops
a_net = a_net + 2 * self.comm_radius * np.eye(self.n_nodes)
# compute minimum distance between agents and degree of network
min_dist = np.min(np.min(a_net))
a_net = a_net < self.comm_radius
degree = np.min(np.sum(a_net.astype(int), axis=1))
self.mean_vel = np.mean(x[:,2:4],axis=0)
self.x = x
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x_agg = self.aggregate(self.x, self.x_agg)
return self._get_obs()
# def render(self, mode='human'):
# pass
def close(self):
pass
def aggregate(self, xt, x_agg):
"""
Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms
Args:
x_agg (): Last time step's aggregated info
xt (): Current state of all agents
Returns:
Aggregated state values
"""
x_features = self.get_x_features(xt)
a_net = self.get_connectivity(xt)
for k in range(0, self.n_pools):
comm_data = self.get_comms(np.dstack((x_features, self.get_features(x_agg[:, :, k]))), a_net)
x_agg[:, :, k] = self.get_pool(comm_data, self.pooling[k])
return x_agg
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current states of all agents
Returns: adjacency matrix of network
"""
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
a_net = (a_net < self.comm_radius).astype(float)
np.fill_diagonal(a_net, 0)
return a_net
def get_x_features(self, xt): # TODO
"""
Compute the non-linear features necessary for implementing Turner 2003
Args:
xt (): current state of all agents
Returns: matrix of features for each agent
"""
diff = xt.reshape((self.n_nodes, 1, self.nx_system)) - xt.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(diff[:, :, 0], diff[:, :, 0]) + np.multiply(diff[:, :, 1], diff[:, :, 1]) + np.eye(
self.n_nodes)
return np.dstack((diff[:, :, 2], np.divide(diff[:, :, 0], np.multiply(r2, r2)), np.divide(diff[:, :, 0], r2),
diff[:, :, 3], np.divide(diff[:, :, 1], np.multiply(r2, r2)), np.divide(diff[:, :, 1], r2)))
def get_features(self, agg):
"""
Matrix of
Args:
agg (): the aggregated matrix from the last time step
Returns: matrix of aggregated features from all nodes at current time
"""
return np.tile(agg[:, :-self.nx].reshape((self.n_nodes, 1, -1)), (1, self.n_nodes, 1)) # TODO check indexing
def get_comms(self, mat, a_net):
"""
Enforces that agents who are not connected in the network cannot observe each others' states
Args:
mat (): matrix of state information for the whole graph
a_net (): adjacency matrix for flock network (weighted networks unsupported for now)
Returns:
mat (): sparse matrix with NaN values where agents can't communicate
"""
a_net[a_net == 0] = np.nan
return mat * a_net.reshape(self.n_nodes, self.n_nodes, 1)
def get_pool(self, mat, func):
"""
Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who
can't communicate must already be enforced.
Args:
mat (): matrix of state information
func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs.
Returns:
information pooled from neighbors for each agent
"""
return func(mat, axis=1).reshape((self.n_nodes, self.n_features)) # TODO check this axis = 1
def controller(self):
"""
The controller for flocking from Turner 2003.
Args:
x (): the current state
Returns: the optimal action
"""
x = self.x
s_diff = x.reshape((self.n_nodes, 1, self.nx_system)) - x.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(s_diff[:, :, 0], s_diff[:, :, 0]) + np.multiply(s_diff[:, :, 1], s_diff[:, :, 1]) + np.eye(
self.n_nodes)
p = np.dstack((s_diff, self.potential_grad(s_diff[:, :, 0], r2), self.potential_grad(s_diff[:, :, 1], r2)))
p_sum = np.nansum(p, axis=1).reshape((self.n_nodes, self.nx_system + 2))
return np.hstack(((- p_sum[:, 4] - p_sum[:, 2]).reshape((-1, 1)), (- p_sum[:, 3] - p_sum[:, 5]).reshape(-1, 1)))
def potential_grad(self, pos_diff, r2):
"""
Computes the gradient of the potential function for flocking proposed in Turner 2003.
Args:
pos_diff (): difference in a component of position among all agents
r2 (): distance squared between agents
Returns: corresponding component of the gradient of the potential
"""
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
grad[r2 > self.comm_radius] = 0
return grad
| import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
from scipy.spatial.distance import pdist, squareform
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.fig = None
self.line1 = None
self.filter_len = int(config['filter_length'])
self.nx_system = 4
self.n_nodes = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max # 0.5 * self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
self.pooling = []
if config.getboolean('sum_pooling'):
self.pooling.append(np.nansum)
if config.getboolean('min_pooling'):
self.pooling.append(np.nanmin)
if config.getboolean('max_pooling'):
self.pooling.append(np.nanmax)
self.n_pools = len(self.pooling)
# number of features and outputs
self.n_features = int(config['N_features'])
self.nx = int(self.n_features / self.n_pools / self.filter_len)
self.nu = int(config['N_outputs']) # outputs
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x = np.zeros((self.n_nodes, self.nx_system))
self.u = np.zeros((self.n_nodes, self.nu))
self.mean_vel = np.zeros((self.n_nodes, self.nu))
# TODO
self.max_accel = 40
self.max_z = 200
# self.b = np.ones((self.n_nodes,1))
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 )
# self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(
# self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32)
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2,) , dtype=np.float32 )
self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(self.n_features, ), dtype=np.float32)
self.seed()
def render(self, mode='human'):
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
x = self.x
x_ = np.zeros((self.n_nodes, self.nx_system))
#u = np.vstack((np.zeros((self.n_leaders, 2)), u))
# x position
x_[:, 0] = x[:, 0] + x[:, 2] * self.dt
# y position
x_[:, 1] = x[:, 1] + x[:, 3] * self.dt
# x velocity
x_[:, 2] = x[:, 2] + 0.1 * u[:, 0] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# y velocity
x_[:, 3] = x[:, 3] + 0.1 * u[:, 1] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# TODO - check the 0.1
self.x = x_
self.x_agg = self.aggregate(self.x, self.x_agg)
self.u = u
return self._get_obs(), -self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
return np.sum(np.var(self.x[:, 2:4], axis=0)) #+ np.sum(np.square(self.u)) * 0.00001
#return np.sum(np.square(self.x[:,2:4] - self.mean_vel))
def _get_obs(self):
reshaped = self.x_agg.reshape((self.n_nodes, self.n_features))
clipped = np.clip(reshaped, a_min=-self.max_z, a_max=self.max_z)
return clipped #[self.n_leaders:, :]
def reset(self):
x = np.zeros((self.n_nodes, self.nx_system))
degree = 0
min_dist = 0
while degree < 2 or min_dist < 0.1: # < 0.25: # 0.25: #0.5: #min_dist < 0.25:
# randomly initialize the state of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_nodes,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_nodes,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[1]
# compute distances between agents
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
# no self loops
a_net = a_net + 2 * self.comm_radius * np.eye(self.n_nodes)
# compute minimum distance between agents and degree of network
min_dist = np.min(np.min(a_net))
a_net = a_net < self.comm_radius
degree = np.min(np.sum(a_net.astype(int), axis=1))
self.mean_vel = np.mean(x[:,2:4],axis=0)
self.x = x
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x_agg = self.aggregate(self.x, self.x_agg)
return self._get_obs()
# def render(self, mode='human'):
# pass
def close(self):
pass
def aggregate(self, xt, x_agg):
"""
Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms
Args:
x_agg (): Last time step's aggregated info
xt (): Current state of all agents
Returns:
Aggregated state values
"""
x_features = self.get_x_features(xt)
a_net = self.get_connectivity(xt)
for k in range(0, self.n_pools):
comm_data = self.get_comms(np.dstack((x_features, self.get_features(x_agg[:, :, k]))), a_net)
x_agg[:, :, k] = self.get_pool(comm_data, self.pooling[k])
return x_agg
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current states of all agents
Returns: adjacency matrix of network
"""
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
a_net = (a_net < self.comm_radius).astype(float)
np.fill_diagonal(a_net, 0)
return a_net
def get_x_features(self, xt): # TODO
"""
Compute the non-linear features necessary for implementing Turner 2003
Args:
xt (): current state of all agents
Returns: matrix of features for each agent
"""
diff = xt.reshape((self.n_nodes, 1, self.nx_system)) - xt.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(diff[:, :, 0], diff[:, :, 0]) + np.multiply(diff[:, :, 1], diff[:, :, 1]) + np.eye(
self.n_nodes)
return np.dstack((diff[:, :, 2], np.divide(diff[:, :, 0], np.multiply(r2, r2)), np.divide(diff[:, :, 0], r2),
diff[:, :, 3], np.divide(diff[:, :, 1], np.multiply(r2, r2)), np.divide(diff[:, :, 1], r2)))
def get_features(self, agg):
"""
Matrix of
Args:
agg (): the aggregated matrix from the last time step
Returns: matrix of aggregated features from all nodes at current time
"""
return np.tile(agg[:, :-self.nx].reshape((self.n_nodes, 1, -1)), (1, self.n_nodes, 1)) # TODO check indexing
def get_comms(self, mat, a_net):
"""
Enforces that agents who are not connected in the network cannot observe each others' states
Args:
mat (): matrix of state information for the whole graph
a_net (): adjacency matrix for flock network (weighted networks unsupported for now)
Returns:
mat (): sparse matrix with NaN values where agents can't communicate
"""
a_net[a_net == 0] = np.nan
return mat * a_net.reshape(self.n_nodes, self.n_nodes, 1)
def get_pool(self, mat, func):
"""
Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who
can't communicate must already be enforced.
Args:
mat (): matrix of state information
func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs.
Returns:
information pooled from neighbors for each agent
"""
return func(mat, axis=1).reshape((self.n_nodes, self.n_features)) # TODO check this axis = 1
def controller(self):
"""
The controller for flocking from Turner 2003.
Args:
x (): the current state
Returns: the optimal action
"""
x = self.x
s_diff = x.reshape((self.n_nodes, 1, self.nx_system)) - x.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(s_diff[:, :, 0], s_diff[:, :, 0]) + np.multiply(s_diff[:, :, 1], s_diff[:, :, 1]) + np.eye(
self.n_nodes)
p = np.dstack((s_diff, self.potential_grad(s_diff[:, :, 0], r2), self.potential_grad(s_diff[:, :, 1], r2)))
p_sum = np.nansum(p, axis=1).reshape((self.n_nodes, self.nx_system + 2))
return np.hstack(((- p_sum[:, 4] - p_sum[:, 2]).reshape((-1, 1)), (- p_sum[:, 3] - p_sum[:, 5]).reshape(-1, 1)))
def potential_grad(self, pos_diff, r2):
"""
Computes the gradient of the potential function for flocking proposed in Turner 2003.
Args:
pos_diff (): difference in a component of position among all agents
r2 (): distance squared between agents
Returns: corresponding component of the gradient of the potential
"""
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
grad[r2 > self.comm_radius] = 0
return grad
| en | 0.763549 | # 0.5 * self.v_max # number of features and outputs # outputs # TODO # self.b = np.ones((self.n_nodes,1)) # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 ) # self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=( # self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32) # Returns a tuple of line objects, thus the comma #u = np.vstack((np.zeros((self.n_leaders, 2)), u)) # x position # y position # x velocity # y velocity # TODO - check the 0.1 # sum of differences in velocities #+ np.sum(np.square(self.u)) * 0.00001 #return np.sum(np.square(self.x[:,2:4] - self.mean_vel)) #[self.n_leaders:, :] # < 0.25: # 0.25: #0.5: #min_dist < 0.25: # randomly initialize the state of all agents # compute distances between agents # x,y location determines connectivity # no self loops # compute minimum distance between agents and degree of network # def render(self, mode='human'): # pass Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms Args: x_agg (): Last time step's aggregated info xt (): Current state of all agents Returns: Aggregated state values Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist Args: x (): current states of all agents Returns: adjacency matrix of network # x,y location determines connectivity # TODO Compute the non-linear features necessary for implementing Turner 2003 Args: xt (): current state of all agents Returns: matrix of features for each agent Matrix of Args: agg (): the aggregated matrix from the last time step Returns: matrix of aggregated features from all nodes at current time # TODO check indexing Enforces that agents who are not connected in the network cannot observe each others' states Args: mat (): matrix of state information for the whole graph a_net (): adjacency matrix for flock network (weighted networks unsupported for now) Returns: mat (): sparse matrix with NaN values where agents can't communicate Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who can't communicate must already be enforced. Args: mat (): matrix of state information func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs. Returns: information pooled from neighbors for each agent # TODO check this axis = 1 The controller for flocking from Turner 2003. Args: x (): the current state Returns: the optimal action Computes the gradient of the potential function for flocking proposed in Turner 2003. Args: pos_diff (): difference in a component of position among all agents r2 (): distance squared between agents Returns: corresponding component of the gradient of the potential | 2.221015 | 2 |
conf/constants.py | codingwangfeng/GoodGoodName | 0 | 9376 | # -*-coding:utf-8-*-
# from functools import reduce
from functools import reduce
SANCAI_jixiang = [1, 3, 5, 7, 8, 11, 13, 15, 16, 18, 21, 23, 24, 25, 31, 32, 33, 35, 37, 39, 41, 45, 47, 48, 52, 57, 61,
63,
65, 67, 68, 81] # 吉祥运暗示数(代表健全,幸福,名誉等)
SANCAI_xiaoji = [6, 17, 26, 27, 29, 30, 38, 49, 51, 55, 58, 71, 73, 75] # 次吉祥运暗示数(代表多少有些障碍,但能获得吉运)
SANCAI_xiong = [2, 4, 9, 10, 12, 14, 19, 20, 22, 28, 34, 36, 40, 42, 43, 44, 46, 50, 53, 54, 56, 59, 60, 62, 64, 66, 69,
70,
72, 74, 76, 77, 78, 79, 80] # 凶数运暗示数(代表逆境,沉浮,薄弱,病难,困难,多灾等)
SANCAI_wise = [3, 13, 16, 21, 23, 29, 31, 37, 39, 41, 45, 47] # 首领运暗示数(智慧 )仁勇全备,立上位,能领导众人)
SANCAI_wealth = [15, 16, 24, 29, 32, 33, 41, 52] # 财富运暗示数(多钱财,富贵,白手可获巨财)
SANCAI_artist = [13, 14, 18, 26, 29, 33, 35, 38, 48] # 艺能运暗示数(富有艺术天才,对审美,艺术,演艺,体育有通达之能)
SANCAI_goodwife = [5, 6, 11, 13, 15, 16, 24, 32, 35] # 女德运暗示数(具有妇德,品性温良,助夫爱子)
SANCAI_death = [21, 23, 26, 28, 29, 33, 39] # 女性孤寡运暗示数(难觅夫君,家庭不和,夫妻两虎相斗,离婚,严重者夫妻一方早亡)
SANCAI_alone = [4, 10, 12, 14, 22, 28, 34] # 孤独运暗示数(妻凌夫或夫克妻)
SANCAI_merry = [5, 6, 15, 16, 32, 39, 41] # 双妻运暗示数
SANCAI_stubbon = [7, 17, 18, 25, 27, 28, 37, 47] # 刚情运暗示数(性刚固执,意气用事)
SANCAI_gentle = [5, 6, 11, 15, 16, 24, 31, 32, 35] # 温和运暗示数(性情平和,能得上下信望)
# 可以自己配置觉得好的数字
# 参考好的搭配
refer_good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
# 自己设定的好的搭配
good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
# 参考坏的搭配
refer_bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone, SANCAI_stubbon]
# 自己设定的坏的搭配
bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone]
good_num_set = set(reduce((lambda x, y: x + y), good_num_list, []))
bad_num_set = set(reduce((lambda x, y: x + y), bad_num_list, []))
print('五格好分值:', good_num_set)
print('五格差分值:', bad_num_set)
# 筛选出有好没坏的三才五格
best_num_set = [x for x in good_num_set if x not in bad_num_set]
print('想要的三才五格数字:', best_num_set)
RESULT_UNKNOWN = '结果未知'
| # -*-coding:utf-8-*-
# from functools import reduce
from functools import reduce
SANCAI_jixiang = [1, 3, 5, 7, 8, 11, 13, 15, 16, 18, 21, 23, 24, 25, 31, 32, 33, 35, 37, 39, 41, 45, 47, 48, 52, 57, 61,
63,
65, 67, 68, 81] # 吉祥运暗示数(代表健全,幸福,名誉等)
SANCAI_xiaoji = [6, 17, 26, 27, 29, 30, 38, 49, 51, 55, 58, 71, 73, 75] # 次吉祥运暗示数(代表多少有些障碍,但能获得吉运)
SANCAI_xiong = [2, 4, 9, 10, 12, 14, 19, 20, 22, 28, 34, 36, 40, 42, 43, 44, 46, 50, 53, 54, 56, 59, 60, 62, 64, 66, 69,
70,
72, 74, 76, 77, 78, 79, 80] # 凶数运暗示数(代表逆境,沉浮,薄弱,病难,困难,多灾等)
SANCAI_wise = [3, 13, 16, 21, 23, 29, 31, 37, 39, 41, 45, 47] # 首领运暗示数(智慧 )仁勇全备,立上位,能领导众人)
SANCAI_wealth = [15, 16, 24, 29, 32, 33, 41, 52] # 财富运暗示数(多钱财,富贵,白手可获巨财)
SANCAI_artist = [13, 14, 18, 26, 29, 33, 35, 38, 48] # 艺能运暗示数(富有艺术天才,对审美,艺术,演艺,体育有通达之能)
SANCAI_goodwife = [5, 6, 11, 13, 15, 16, 24, 32, 35] # 女德运暗示数(具有妇德,品性温良,助夫爱子)
SANCAI_death = [21, 23, 26, 28, 29, 33, 39] # 女性孤寡运暗示数(难觅夫君,家庭不和,夫妻两虎相斗,离婚,严重者夫妻一方早亡)
SANCAI_alone = [4, 10, 12, 14, 22, 28, 34] # 孤独运暗示数(妻凌夫或夫克妻)
SANCAI_merry = [5, 6, 15, 16, 32, 39, 41] # 双妻运暗示数
SANCAI_stubbon = [7, 17, 18, 25, 27, 28, 37, 47] # 刚情运暗示数(性刚固执,意气用事)
SANCAI_gentle = [5, 6, 11, 15, 16, 24, 31, 32, 35] # 温和运暗示数(性情平和,能得上下信望)
# 可以自己配置觉得好的数字
# 参考好的搭配
refer_good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
# 自己设定的好的搭配
good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife,
SANCAI_merry, SANCAI_gentle]
# 参考坏的搭配
refer_bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone, SANCAI_stubbon]
# 自己设定的坏的搭配
bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone]
good_num_set = set(reduce((lambda x, y: x + y), good_num_list, []))
bad_num_set = set(reduce((lambda x, y: x + y), bad_num_list, []))
print('五格好分值:', good_num_set)
print('五格差分值:', bad_num_set)
# 筛选出有好没坏的三才五格
best_num_set = [x for x in good_num_set if x not in bad_num_set]
print('想要的三才五格数字:', best_num_set)
RESULT_UNKNOWN = '结果未知'
| zh | 0.838995 | # -*-coding:utf-8-*- # from functools import reduce # 吉祥运暗示数(代表健全,幸福,名誉等) # 次吉祥运暗示数(代表多少有些障碍,但能获得吉运) # 凶数运暗示数(代表逆境,沉浮,薄弱,病难,困难,多灾等) # 首领运暗示数(智慧 )仁勇全备,立上位,能领导众人) # 财富运暗示数(多钱财,富贵,白手可获巨财) # 艺能运暗示数(富有艺术天才,对审美,艺术,演艺,体育有通达之能) # 女德运暗示数(具有妇德,品性温良,助夫爱子) # 女性孤寡运暗示数(难觅夫君,家庭不和,夫妻两虎相斗,离婚,严重者夫妻一方早亡) # 孤独运暗示数(妻凌夫或夫克妻) # 双妻运暗示数 # 刚情运暗示数(性刚固执,意气用事) # 温和运暗示数(性情平和,能得上下信望) # 可以自己配置觉得好的数字 # 参考好的搭配 # 自己设定的好的搭配 # 参考坏的搭配 # 自己设定的坏的搭配 # 筛选出有好没坏的三才五格 | 2.101429 | 2 |
main/migrations/0006_labourer_allproj.py | kevinmuturi5/farm-Management-system | 1 | 9377 | <filename>main/migrations/0006_labourer_allproj.py
# Generated by Django 3.1.2 on 2020-10-18 16:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20201018_1902'),
]
operations = [
migrations.AddField(
model_name='labourer',
name='allproj',
field=models.ManyToManyField(blank=True, to='main.Listing'),
),
]
| <filename>main/migrations/0006_labourer_allproj.py
# Generated by Django 3.1.2 on 2020-10-18 16:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20201018_1902'),
]
operations = [
migrations.AddField(
model_name='labourer',
name='allproj',
field=models.ManyToManyField(blank=True, to='main.Listing'),
),
]
| en | 0.834864 | # Generated by Django 3.1.2 on 2020-10-18 16:07 | 1.460993 | 1 |
bzt/modules/blazemeter/blazemeter_reporter.py | beachwood23/taurus | 0 | 9378 | <reponame>beachwood23/taurus
"""
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import os
import platform
import sys
import time
import traceback
import zipfile
from collections import defaultdict, OrderedDict
from io import BytesIO
from urllib.error import HTTPError
import requests
from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError
from bzt.bza import User, Session, Test
from bzt.engine import Reporter, Singletone
from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time
from bzt.modules.aggregator import AggregatorListener, DataPoint, KPISet, ResultsProvider, ConsolidatingAggregator
from bzt.modules.monitoring import Monitoring, MonitoringListener
from bzt.modules.blazemeter.project_finder import ProjectFinder
from bzt.modules.blazemeter.const import NOTE_SIZE_LIMIT
class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone):
"""
Reporter class
:type _test: bzt.bza.Test
:type _master: bzt.bza.Master
:type _session: bzt.bza.Session
"""
def __init__(self):
super(BlazeMeterUploader, self).__init__()
self.browser_open = 'start'
self.kpi_buffer = []
self.send_interval = 30
self._last_status_check = time.time()
self.send_data = True
self.upload_artifacts = True
self.send_monitoring = True
self.monitoring_buffer = None
self.public_report = False
self.last_dispatch = 0
self.results_url = None
self._user = User()
self._test = None
self._master = None
self._session = None
self.first_ts = sys.maxsize
self.last_ts = 0
self.report_name = None
self._dpoint_serializer = DatapointSerializer(self)
def prepare(self):
"""
Read options for uploading, check that they're sane
"""
super(BlazeMeterUploader, self).prepare()
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500)
self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log)
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.public_report = self.settings.get("public-report", self.public_report)
self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts)
self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi)
token = self.settings.get("token", "")
if not token:
self.log.warning("No BlazeMeter API key provided, will upload anonymously")
self._user.token = token
# usual fields
self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit)
self._user.address = self.settings.get("address", self._user.address).rstrip("/")
self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/")
self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout))
if isinstance(self._user.http_session, requests.Session):
self.log.debug("Installing http client")
self._user.http_session = self.engine.get_http_client()
self._user.http_request = self._user.http_session.request
# direct data feeding case
sess_id = self.parameters.get("session-id")
if sess_id:
self._session = Session(self._user, {'id': sess_id})
self._session['userId'] = self.parameters.get("user-id", None)
self._session['testId'] = self.parameters.get("test-id", None)
self._test = Test(self._user, {'id': self._session['testId']})
exc = TaurusConfigError("Need signature for session")
self._session.data_signature = self.parameters.get("signature", exc)
self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target)
self.send_data = self.parameters.get("send-data", self.send_data)
else:
try:
self._user.ping() # to check connectivity and auth
except HTTPError:
self.log.error("Cannot reach online results storage, maybe the address/token is wrong")
raise
if token:
wsp = self._user.accounts().workspaces()
if not wsp:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log)
self._test = finder.resolve_external_test()
else:
self._test = Test(self._user, {'id': None})
self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name))
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = input("Please enter report-name: ")
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
for service in self.engine.services:
if isinstance(service, Monitoring):
service.add_listener(self)
def startup(self):
"""
Initiate online test
"""
super(BlazeMeterUploader, self).startup()
self._user.log = self.log.getChild(self.__class__.__name__)
if not self._session:
url = self._start_online()
self.log.info("Started data feeding: %s", url)
if self.browser_open in ('start', 'both'):
open_browser(url)
if self._user.token and self.public_report:
report_link = self._master.make_report_public()
self.log.info("Public report link: %s", report_link)
def _start_online(self):
"""
Start online test
"""
self.log.info("Initiating data feeding...")
if self._test['id']:
self._session, self._master = self._test.start_external()
else:
self._session, self._master, self.results_url = self._test.start_anonymous_external_test()
self._test['id'] = self._session['testId']
if self._test.token:
self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id']
if self.report_name:
self._session.set({"name": str(self.report_name)})
return self.results_url
def __get_jtls_and_more(self):
"""
Compress all files in artifacts dir to single zipfile
:rtype: (io.BytesIO,dict)
"""
mfile = BytesIO()
listing = {}
logs = set()
for handler in self.engine.log.parent.handlers:
if isinstance(handler, logging.FileHandler):
logs.add(handler.baseFilename)
max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB
with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh:
for root, _, files in os.walk(self.engine.artifacts_dir):
for filename in files:
full_path = os.path.join(root, filename)
if full_path in logs:
logs.remove(full_path)
fsize = os.path.getsize(full_path)
if fsize <= max_file_size:
zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename))
listing[full_path] = fsize
else:
msg = "File %s exceeds maximum size quota of %s and won't be included into upload"
self.log.warning(msg, filename, max_file_size)
for filename in logs: # upload logs unconditionally
zfh.write(filename, os.path.basename(filename))
listing[filename] = os.path.getsize(filename)
return mfile, listing
def __upload_artifacts(self):
"""
If token provided, upload artifacts folder contents and bzt.log
"""
if not self._session.token:
return
worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL')
if worker_index:
suffix = '-%s' % worker_index
else:
suffix = ''
artifacts_zip = "artifacts%s.zip" % suffix
mfile, zip_listing = self.__get_jtls_and_more()
self.log.info("Uploading all artifacts as %s ...", artifacts_zip)
self._session.upload_file(artifacts_zip, mfile.getvalue())
self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing))
handlers = self.engine.log.parent.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler):
fname = handler.baseFilename
self.log.info("Uploading %s", fname)
fhead, ftail = os.path.splitext(os.path.split(fname)[-1])
modified_name = fhead + suffix + ftail
with open(fname, 'rb') as _file:
self._session.upload_file(modified_name, _file.read())
_file.seek(-4096, 2)
tail = _file.read()
tail = tail[tail.index(b("\n")) + 1:]
self._session.upload_file(modified_name + ".tail.bz", tail)
def post_process(self):
"""
Upload results if possible
"""
if not self._session:
self.log.debug("No feeding session obtained, nothing to finalize")
return
self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer))
try:
self.log.info("Sending remaining KPI data to server...")
if self.send_data:
self.__send_data(self.kpi_buffer, False, True)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
finally:
self._postproc_phase2()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
self.log.info("Online report link: %s", self.results_url)
def _postproc_phase2(self):
try:
if self.upload_artifacts:
self.__upload_artifacts()
except (IOError, TaurusNetworkError):
self.log.warning("Failed artifact upload: %s", traceback.format_exc())
finally:
self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check)
self.log.debug("Set last check time to: %s", self._last_status_check)
tries = self.send_interval # NOTE: you dirty one...
while not self._last_status_check and tries > 0:
self.log.info("Waiting for ping...")
time.sleep(self.send_interval)
tries -= 1
self._postproc_phase3()
def _postproc_phase3(self):
try:
if self.send_data:
self.end_online()
if self._user.token and self.engine.stopping_reason:
exc_class = self.engine.stopping_reason.__class__.__name__
note = "%s: %s" % (exc_class, str(self.engine.stopping_reason))
self.append_note_to_session(note)
if self._master:
self.append_note_to_master(note)
except KeyboardInterrupt:
raise
except BaseException as exc:
self.log.debug("Failed to finish online: %s", traceback.format_exc())
self.log.warning("Failed to finish online: %s", exc)
def end_online(self):
"""
Finish online test
"""
if not self._session:
self.log.debug("Feeding not started, so not stopping")
else:
self.log.info("Ending data feeding...")
if self._user.token:
self._session.stop()
else:
self._session.stop_anonymous()
def append_note_to_session(self, note):
self._session.fetch()
if 'note' in self._session:
note = self._session['note'] + '\n' + note
note = note.strip()
if note:
self._session.set({'note': note[:NOTE_SIZE_LIMIT]})
def append_note_to_master(self, note):
self._master.fetch()
if 'note' in self._master:
note = self._master['note'] + '\n' + note
note = note.strip()
if note:
self._master.set({'note': note[:NOTE_SIZE_LIMIT]})
def check(self):
"""
Send data if any in buffer
"""
self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer))
if self.last_dispatch < (time.time() - self.send_interval):
self.last_dispatch = time.time()
if self.send_data and len(self.kpi_buffer):
self.__send_data(self.kpi_buffer)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
return super(BlazeMeterUploader, self).check()
def __send_data(self, data, do_check=True, is_final=False):
"""
:type data: list[bzt.modules.aggregator.DataPoint]
"""
if not self._session:
return
self.engine.aggregator.converter(data)
serialized = self._dpoint_serializer.get_kpi_body(data, is_final)
self._session.send_kpi_data(serialized, do_check)
def aggregated_second(self, data):
"""
Send online data
:param data: DataPoint
"""
if self.send_data:
self.kpi_buffer.append(data)
def monitoring_data(self, data):
if self.send_monitoring:
self.monitoring_buffer.record_data(data)
def __send_monitoring(self):
engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '')
if not engine_id:
engine_id = "0"
data = self.monitoring_buffer.get_monitoring_json(self._session)
self._session.send_monitoring_data(engine_id, data)
def __format_listing(self, zip_listing):
lines = []
for fname in sorted(zip_listing.keys()):
bytestr = humanize_bytes(zip_listing[fname])
if fname.startswith(self.engine.artifacts_dir):
fname = fname[len(self.engine.artifacts_dir) + 1:]
lines.append(bytestr + " " + fname)
return "\n".join(lines)
class MonitoringBuffer(object):
def __init__(self, size_limit, parent_log):
self.size_limit = size_limit
self.data = defaultdict(OrderedDict)
self.log = parent_log.getChild(self.__class__.__name__)
# data :: dict(datasource -> dict(interval -> datapoint))
# datapoint :: dict(metric -> value)
def record_data(self, data):
for monitoring_item in data:
item = copy.deepcopy(monitoring_item)
source = item.pop('source')
timestamp = int(item['ts'])
item['interval'] = 1
buff = self.data[source]
if timestamp in buff:
buff[timestamp].update(item)
else:
buff[timestamp] = item
sources = list(self.data)
for source in sources:
if len(self.data[source]) > self.size_limit:
self._downsample(self.data[source])
self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source]))
def _downsample(self, buff):
size = 1
while len(buff) > self.size_limit:
self._merge_small_intervals(buff, size)
size += 1
def _merge_small_intervals(self, buff, size):
timestamps = list(buff)
merged_already = set()
for left, right in zip(timestamps, timestamps[1:]):
if left in merged_already:
continue
if buff[left]['interval'] <= size:
self._merge_datapoints(buff[left], buff[right])
buff.pop(right)
merged_already.add(left)
merged_already.add(right)
@staticmethod
def _merge_datapoints(left, right):
sum_size = float(left['interval'] + right['interval'])
for metric in set(right):
if metric in ('ts', 'interval'):
continue
if metric in left:
left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size
else:
left[metric] = right[metric]
left['interval'] = sum_size
def get_monitoring_json(self, session):
"""
:type session: Session
"""
results = {}
hosts = []
kpis = {}
for source, buff in iteritems(self.data):
for timestamp, item in iteritems(buff):
if source == 'local':
source = platform.node()
if source not in results:
results[source] = {
"name": source,
"intervals": OrderedDict()
}
if source not in hosts:
hosts.append(source)
src = results[source]
tstmp = timestamp * 1000
tstmp_key = '%d' % tstmp
if tstmp_key not in src['intervals']:
src['intervals'][tstmp_key] = {
"start": tstmp,
"duration": item['interval'] * 1000,
"indicators": {}
}
for field, value in iteritems(item):
if field.lower().startswith('conn-all'):
field = 'Connections'
elif field.lower().startswith('cpu'):
field = 'CPU'
elif field.lower().startswith('mem'):
field = 'Memory'
value *= 100
elif field == 'bytes-recv' or field.lower().startswith('net'):
field = 'Network I/O'
elif field == 'engine-loop':
field = 'Busy Taurus'
else:
continue # maybe one day BZA will accept all other metrics...
if field not in kpis:
kpis[field] = field
src['intervals'][tstmp_key]['indicators'][field] = {
"value": value,
"name": field,
"std": 0,
"mean": 0,
"sum": 0,
"min": 0,
"max": 0,
"sumOfSquares": 0,
"n": 1
}
kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"}
return {
"reportInfo": {
"sessionId": session['id'],
"timestamp": time.time(),
"userId": session['userId'],
"testId": session['testId'],
"type": "MONITOR",
"testName": ""
},
"kpis": kpis,
"hosts": hosts,
"results": results
}
class DatapointSerializer(object):
def __init__(self, owner):
"""
:type owner: BlazeMeterUploader
"""
super(DatapointSerializer, self).__init__()
self.owner = owner
self.multi = 1000 # miltiplier factor for reporting
def get_kpi_body(self, data_buffer, is_final):
# - reporting format:
# {labels: <data>, # see below
# sourceID: <id of BlazeMeterClient object>,
# [is_final: True]} # for last report
#
# - elements of 'data' are described in __get_label()
#
# - elements of 'intervals' are described in __get_interval()
# every interval contains info about response codes that were received on it.
report_items = BetterDict()
if data_buffer:
self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP])
self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP])
# following data is received in the cumulative way
for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]):
report_item = self.__get_label(label, kpi_set)
self.__add_errors(report_item, kpi_set) # 'Errors' tab
report_items[label] = report_item
# fill 'Timeline Report' tab with intervals data
# intervals are received in the additive way
if report_items:
for dpoint in data_buffer:
time_stamp = dpoint[DataPoint.TIMESTAMP]
for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
exc = TaurusInternalException('Cumulative KPISet is non-consistent')
report_item = report_items.get(label, exc)
report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp))
report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list
data = {"labels": report_items, "sourceID": id(self.owner)}
if is_final:
data['final'] = True
return to_json(data)
@staticmethod
def __add_errors(report_item, kpi_set):
errors = kpi_set[KPISet.ERRORS]
for error in errors:
if error["type"] == KPISet.ERRTYPE_ERROR:
report_item['errors'].append({
'm': error['msg'],
"rc": error['rc'],
"count": error['cnt'],
})
elif error["type"] == KPISet.ERRTYPE_SUBSAMPLE:
report_item['failedEmbeddedResources'].append({
"count": error['cnt'],
"rm": error['msg'],
"rc": error['rc'],
"url": list(error['urls'])[0] if error['urls'] else None,
})
else:
report_item['assertions'].append({
'failureMessage': error['msg'],
'name': error['tag'] if error['tag'] else 'All Assertions',
'failures': error['cnt']
})
def __get_label(self, name, cumul):
return {
"n": cumul[KPISet.SAMPLE_COUNT], # total count of samples
"name": name if name else 'ALL', # label
"interval": 1, # not used
"intervals": [], # list of intervals, fill later
"samplesNotCounted": 0, # not used
"assertionsNotCounted": 0, # not used
"failedEmbeddedResources": [], # not used
"failedEmbeddedResourcesSpilloverCount": 0, # not used
"otherErrorsCount": 0, # not used
"errors": [], # list of errors, fill later
"assertions": [], # list of assertions, fill later
"percentileHistogram": [], # not used
"percentileHistogramLatency": [], # not used
"percentileHistogramBytes": [], # not used
"empty": False, # not used
"summary": self.__get_summary(cumul) # summary info
}
def __get_summary(self, cumul):
return {
"first": self.owner.first_ts,
"last": self.owner.last_ts,
"duration": self.owner.last_ts - self.owner.first_ts,
"failed": cumul[KPISet.FAILURES],
"hits": cumul[KPISet.SAMPLE_COUNT],
"avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]),
"min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0,
"max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0,
"std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]),
"tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0,
"tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0,
"tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0,
"latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]),
"latencyMax": 0,
"latencyMin": 0,
"latencySTD": 0,
"bytes": cumul[KPISet.BYTE_COUNT],
"bytesMax": 0,
"bytesMin": 0,
"bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])),
"bytesSTD": 0,
"otherErrorsSpillcount": 0,
}
def __get_interval(self, item, time_stamp):
# rc_list - list of info about response codes:
# {'n': <number of code encounters>,
# 'f': <number of failed request (e.q. important for assertions)>
# 'rc': <string value of response code>}
rc_list = []
for r_code, cnt in iteritems(item[KPISet.RESP_CODES]):
fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code]
rc_list.append({"n": cnt, 'f': fails, "rc": r_code})
return {
"ec": item[KPISet.FAILURES],
"ts": time_stamp,
"na": item[KPISet.CONCURRENCY],
"n": item[KPISet.SAMPLE_COUNT],
"failed": item[KPISet.FAILURES],
"rc": rc_list,
"t": {
"min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0,
"max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[
KPISet.PERCENTILES] else 0,
"sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": self.multi * item[KPISet.STDEV_RESP_TIME],
"avg": self.multi * item[KPISet.AVG_RESP_TIME]
},
"lt": {
"min": 0,
"max": 0,
"sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": self.multi * item[KPISet.AVG_LATENCY]
},
"by": {
"min": 0,
"max": 0,
"sum": item[KPISet.BYTE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT])
},
}
| """
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import os
import platform
import sys
import time
import traceback
import zipfile
from collections import defaultdict, OrderedDict
from io import BytesIO
from urllib.error import HTTPError
import requests
from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError
from bzt.bza import User, Session, Test
from bzt.engine import Reporter, Singletone
from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time
from bzt.modules.aggregator import AggregatorListener, DataPoint, KPISet, ResultsProvider, ConsolidatingAggregator
from bzt.modules.monitoring import Monitoring, MonitoringListener
from bzt.modules.blazemeter.project_finder import ProjectFinder
from bzt.modules.blazemeter.const import NOTE_SIZE_LIMIT
class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone):
"""
Reporter class
:type _test: bzt.bza.Test
:type _master: bzt.bza.Master
:type _session: bzt.bza.Session
"""
def __init__(self):
super(BlazeMeterUploader, self).__init__()
self.browser_open = 'start'
self.kpi_buffer = []
self.send_interval = 30
self._last_status_check = time.time()
self.send_data = True
self.upload_artifacts = True
self.send_monitoring = True
self.monitoring_buffer = None
self.public_report = False
self.last_dispatch = 0
self.results_url = None
self._user = User()
self._test = None
self._master = None
self._session = None
self.first_ts = sys.maxsize
self.last_ts = 0
self.report_name = None
self._dpoint_serializer = DatapointSerializer(self)
def prepare(self):
"""
Read options for uploading, check that they're sane
"""
super(BlazeMeterUploader, self).prepare()
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500)
self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log)
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.public_report = self.settings.get("public-report", self.public_report)
self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts)
self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi)
token = self.settings.get("token", "")
if not token:
self.log.warning("No BlazeMeter API key provided, will upload anonymously")
self._user.token = token
# usual fields
self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit)
self._user.address = self.settings.get("address", self._user.address).rstrip("/")
self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/")
self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout))
if isinstance(self._user.http_session, requests.Session):
self.log.debug("Installing http client")
self._user.http_session = self.engine.get_http_client()
self._user.http_request = self._user.http_session.request
# direct data feeding case
sess_id = self.parameters.get("session-id")
if sess_id:
self._session = Session(self._user, {'id': sess_id})
self._session['userId'] = self.parameters.get("user-id", None)
self._session['testId'] = self.parameters.get("test-id", None)
self._test = Test(self._user, {'id': self._session['testId']})
exc = TaurusConfigError("Need signature for session")
self._session.data_signature = self.parameters.get("signature", exc)
self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target)
self.send_data = self.parameters.get("send-data", self.send_data)
else:
try:
self._user.ping() # to check connectivity and auth
except HTTPError:
self.log.error("Cannot reach online results storage, maybe the address/token is wrong")
raise
if token:
wsp = self._user.accounts().workspaces()
if not wsp:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log)
self._test = finder.resolve_external_test()
else:
self._test = Test(self._user, {'id': None})
self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name))
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = input("Please enter report-name: ")
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
for service in self.engine.services:
if isinstance(service, Monitoring):
service.add_listener(self)
def startup(self):
"""
Initiate online test
"""
super(BlazeMeterUploader, self).startup()
self._user.log = self.log.getChild(self.__class__.__name__)
if not self._session:
url = self._start_online()
self.log.info("Started data feeding: %s", url)
if self.browser_open in ('start', 'both'):
open_browser(url)
if self._user.token and self.public_report:
report_link = self._master.make_report_public()
self.log.info("Public report link: %s", report_link)
def _start_online(self):
"""
Start online test
"""
self.log.info("Initiating data feeding...")
if self._test['id']:
self._session, self._master = self._test.start_external()
else:
self._session, self._master, self.results_url = self._test.start_anonymous_external_test()
self._test['id'] = self._session['testId']
if self._test.token:
self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id']
if self.report_name:
self._session.set({"name": str(self.report_name)})
return self.results_url
def __get_jtls_and_more(self):
"""
Compress all files in artifacts dir to single zipfile
:rtype: (io.BytesIO,dict)
"""
mfile = BytesIO()
listing = {}
logs = set()
for handler in self.engine.log.parent.handlers:
if isinstance(handler, logging.FileHandler):
logs.add(handler.baseFilename)
max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB
with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh:
for root, _, files in os.walk(self.engine.artifacts_dir):
for filename in files:
full_path = os.path.join(root, filename)
if full_path in logs:
logs.remove(full_path)
fsize = os.path.getsize(full_path)
if fsize <= max_file_size:
zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename))
listing[full_path] = fsize
else:
msg = "File %s exceeds maximum size quota of %s and won't be included into upload"
self.log.warning(msg, filename, max_file_size)
for filename in logs: # upload logs unconditionally
zfh.write(filename, os.path.basename(filename))
listing[filename] = os.path.getsize(filename)
return mfile, listing
def __upload_artifacts(self):
"""
If token provided, upload artifacts folder contents and bzt.log
"""
if not self._session.token:
return
worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL')
if worker_index:
suffix = '-%s' % worker_index
else:
suffix = ''
artifacts_zip = "artifacts%s.zip" % suffix
mfile, zip_listing = self.__get_jtls_and_more()
self.log.info("Uploading all artifacts as %s ...", artifacts_zip)
self._session.upload_file(artifacts_zip, mfile.getvalue())
self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing))
handlers = self.engine.log.parent.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler):
fname = handler.baseFilename
self.log.info("Uploading %s", fname)
fhead, ftail = os.path.splitext(os.path.split(fname)[-1])
modified_name = fhead + suffix + ftail
with open(fname, 'rb') as _file:
self._session.upload_file(modified_name, _file.read())
_file.seek(-4096, 2)
tail = _file.read()
tail = tail[tail.index(b("\n")) + 1:]
self._session.upload_file(modified_name + ".tail.bz", tail)
def post_process(self):
"""
Upload results if possible
"""
if not self._session:
self.log.debug("No feeding session obtained, nothing to finalize")
return
self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer))
try:
self.log.info("Sending remaining KPI data to server...")
if self.send_data:
self.__send_data(self.kpi_buffer, False, True)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
finally:
self._postproc_phase2()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
self.log.info("Online report link: %s", self.results_url)
def _postproc_phase2(self):
try:
if self.upload_artifacts:
self.__upload_artifacts()
except (IOError, TaurusNetworkError):
self.log.warning("Failed artifact upload: %s", traceback.format_exc())
finally:
self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check)
self.log.debug("Set last check time to: %s", self._last_status_check)
tries = self.send_interval # NOTE: you dirty one...
while not self._last_status_check and tries > 0:
self.log.info("Waiting for ping...")
time.sleep(self.send_interval)
tries -= 1
self._postproc_phase3()
def _postproc_phase3(self):
try:
if self.send_data:
self.end_online()
if self._user.token and self.engine.stopping_reason:
exc_class = self.engine.stopping_reason.__class__.__name__
note = "%s: %s" % (exc_class, str(self.engine.stopping_reason))
self.append_note_to_session(note)
if self._master:
self.append_note_to_master(note)
except KeyboardInterrupt:
raise
except BaseException as exc:
self.log.debug("Failed to finish online: %s", traceback.format_exc())
self.log.warning("Failed to finish online: %s", exc)
def end_online(self):
"""
Finish online test
"""
if not self._session:
self.log.debug("Feeding not started, so not stopping")
else:
self.log.info("Ending data feeding...")
if self._user.token:
self._session.stop()
else:
self._session.stop_anonymous()
def append_note_to_session(self, note):
self._session.fetch()
if 'note' in self._session:
note = self._session['note'] + '\n' + note
note = note.strip()
if note:
self._session.set({'note': note[:NOTE_SIZE_LIMIT]})
def append_note_to_master(self, note):
self._master.fetch()
if 'note' in self._master:
note = self._master['note'] + '\n' + note
note = note.strip()
if note:
self._master.set({'note': note[:NOTE_SIZE_LIMIT]})
def check(self):
"""
Send data if any in buffer
"""
self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer))
if self.last_dispatch < (time.time() - self.send_interval):
self.last_dispatch = time.time()
if self.send_data and len(self.kpi_buffer):
self.__send_data(self.kpi_buffer)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
return super(BlazeMeterUploader, self).check()
def __send_data(self, data, do_check=True, is_final=False):
"""
:type data: list[bzt.modules.aggregator.DataPoint]
"""
if not self._session:
return
self.engine.aggregator.converter(data)
serialized = self._dpoint_serializer.get_kpi_body(data, is_final)
self._session.send_kpi_data(serialized, do_check)
def aggregated_second(self, data):
"""
Send online data
:param data: DataPoint
"""
if self.send_data:
self.kpi_buffer.append(data)
def monitoring_data(self, data):
if self.send_monitoring:
self.monitoring_buffer.record_data(data)
def __send_monitoring(self):
engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '')
if not engine_id:
engine_id = "0"
data = self.monitoring_buffer.get_monitoring_json(self._session)
self._session.send_monitoring_data(engine_id, data)
def __format_listing(self, zip_listing):
lines = []
for fname in sorted(zip_listing.keys()):
bytestr = humanize_bytes(zip_listing[fname])
if fname.startswith(self.engine.artifacts_dir):
fname = fname[len(self.engine.artifacts_dir) + 1:]
lines.append(bytestr + " " + fname)
return "\n".join(lines)
class MonitoringBuffer(object):
def __init__(self, size_limit, parent_log):
self.size_limit = size_limit
self.data = defaultdict(OrderedDict)
self.log = parent_log.getChild(self.__class__.__name__)
# data :: dict(datasource -> dict(interval -> datapoint))
# datapoint :: dict(metric -> value)
def record_data(self, data):
for monitoring_item in data:
item = copy.deepcopy(monitoring_item)
source = item.pop('source')
timestamp = int(item['ts'])
item['interval'] = 1
buff = self.data[source]
if timestamp in buff:
buff[timestamp].update(item)
else:
buff[timestamp] = item
sources = list(self.data)
for source in sources:
if len(self.data[source]) > self.size_limit:
self._downsample(self.data[source])
self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source]))
def _downsample(self, buff):
size = 1
while len(buff) > self.size_limit:
self._merge_small_intervals(buff, size)
size += 1
def _merge_small_intervals(self, buff, size):
timestamps = list(buff)
merged_already = set()
for left, right in zip(timestamps, timestamps[1:]):
if left in merged_already:
continue
if buff[left]['interval'] <= size:
self._merge_datapoints(buff[left], buff[right])
buff.pop(right)
merged_already.add(left)
merged_already.add(right)
@staticmethod
def _merge_datapoints(left, right):
sum_size = float(left['interval'] + right['interval'])
for metric in set(right):
if metric in ('ts', 'interval'):
continue
if metric in left:
left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size
else:
left[metric] = right[metric]
left['interval'] = sum_size
def get_monitoring_json(self, session):
"""
:type session: Session
"""
results = {}
hosts = []
kpis = {}
for source, buff in iteritems(self.data):
for timestamp, item in iteritems(buff):
if source == 'local':
source = platform.node()
if source not in results:
results[source] = {
"name": source,
"intervals": OrderedDict()
}
if source not in hosts:
hosts.append(source)
src = results[source]
tstmp = timestamp * 1000
tstmp_key = '%d' % tstmp
if tstmp_key not in src['intervals']:
src['intervals'][tstmp_key] = {
"start": tstmp,
"duration": item['interval'] * 1000,
"indicators": {}
}
for field, value in iteritems(item):
if field.lower().startswith('conn-all'):
field = 'Connections'
elif field.lower().startswith('cpu'):
field = 'CPU'
elif field.lower().startswith('mem'):
field = 'Memory'
value *= 100
elif field == 'bytes-recv' or field.lower().startswith('net'):
field = 'Network I/O'
elif field == 'engine-loop':
field = 'Busy Taurus'
else:
continue # maybe one day BZA will accept all other metrics...
if field not in kpis:
kpis[field] = field
src['intervals'][tstmp_key]['indicators'][field] = {
"value": value,
"name": field,
"std": 0,
"mean": 0,
"sum": 0,
"min": 0,
"max": 0,
"sumOfSquares": 0,
"n": 1
}
kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"}
return {
"reportInfo": {
"sessionId": session['id'],
"timestamp": time.time(),
"userId": session['userId'],
"testId": session['testId'],
"type": "MONITOR",
"testName": ""
},
"kpis": kpis,
"hosts": hosts,
"results": results
}
class DatapointSerializer(object):
def __init__(self, owner):
"""
:type owner: BlazeMeterUploader
"""
super(DatapointSerializer, self).__init__()
self.owner = owner
self.multi = 1000 # miltiplier factor for reporting
def get_kpi_body(self, data_buffer, is_final):
# - reporting format:
# {labels: <data>, # see below
# sourceID: <id of BlazeMeterClient object>,
# [is_final: True]} # for last report
#
# - elements of 'data' are described in __get_label()
#
# - elements of 'intervals' are described in __get_interval()
# every interval contains info about response codes that were received on it.
report_items = BetterDict()
if data_buffer:
self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP])
self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP])
# following data is received in the cumulative way
for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]):
report_item = self.__get_label(label, kpi_set)
self.__add_errors(report_item, kpi_set) # 'Errors' tab
report_items[label] = report_item
# fill 'Timeline Report' tab with intervals data
# intervals are received in the additive way
if report_items:
for dpoint in data_buffer:
time_stamp = dpoint[DataPoint.TIMESTAMP]
for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
exc = TaurusInternalException('Cumulative KPISet is non-consistent')
report_item = report_items.get(label, exc)
report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp))
report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list
data = {"labels": report_items, "sourceID": id(self.owner)}
if is_final:
data['final'] = True
return to_json(data)
@staticmethod
def __add_errors(report_item, kpi_set):
errors = kpi_set[KPISet.ERRORS]
for error in errors:
if error["type"] == KPISet.ERRTYPE_ERROR:
report_item['errors'].append({
'm': error['msg'],
"rc": error['rc'],
"count": error['cnt'],
})
elif error["type"] == KPISet.ERRTYPE_SUBSAMPLE:
report_item['failedEmbeddedResources'].append({
"count": error['cnt'],
"rm": error['msg'],
"rc": error['rc'],
"url": list(error['urls'])[0] if error['urls'] else None,
})
else:
report_item['assertions'].append({
'failureMessage': error['msg'],
'name': error['tag'] if error['tag'] else 'All Assertions',
'failures': error['cnt']
})
def __get_label(self, name, cumul):
return {
"n": cumul[KPISet.SAMPLE_COUNT], # total count of samples
"name": name if name else 'ALL', # label
"interval": 1, # not used
"intervals": [], # list of intervals, fill later
"samplesNotCounted": 0, # not used
"assertionsNotCounted": 0, # not used
"failedEmbeddedResources": [], # not used
"failedEmbeddedResourcesSpilloverCount": 0, # not used
"otherErrorsCount": 0, # not used
"errors": [], # list of errors, fill later
"assertions": [], # list of assertions, fill later
"percentileHistogram": [], # not used
"percentileHistogramLatency": [], # not used
"percentileHistogramBytes": [], # not used
"empty": False, # not used
"summary": self.__get_summary(cumul) # summary info
}
def __get_summary(self, cumul):
return {
"first": self.owner.first_ts,
"last": self.owner.last_ts,
"duration": self.owner.last_ts - self.owner.first_ts,
"failed": cumul[KPISet.FAILURES],
"hits": cumul[KPISet.SAMPLE_COUNT],
"avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]),
"min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0,
"max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0,
"std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]),
"tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0,
"tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0,
"tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0,
"latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]),
"latencyMax": 0,
"latencyMin": 0,
"latencySTD": 0,
"bytes": cumul[KPISet.BYTE_COUNT],
"bytesMax": 0,
"bytesMin": 0,
"bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])),
"bytesSTD": 0,
"otherErrorsSpillcount": 0,
}
def __get_interval(self, item, time_stamp):
# rc_list - list of info about response codes:
# {'n': <number of code encounters>,
# 'f': <number of failed request (e.q. important for assertions)>
# 'rc': <string value of response code>}
rc_list = []
for r_code, cnt in iteritems(item[KPISet.RESP_CODES]):
fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code]
rc_list.append({"n": cnt, 'f': fails, "rc": r_code})
return {
"ec": item[KPISet.FAILURES],
"ts": time_stamp,
"na": item[KPISet.CONCURRENCY],
"n": item[KPISet.SAMPLE_COUNT],
"failed": item[KPISet.FAILURES],
"rc": rc_list,
"t": {
"min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0,
"max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[
KPISet.PERCENTILES] else 0,
"sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": self.multi * item[KPISet.STDEV_RESP_TIME],
"avg": self.multi * item[KPISet.AVG_RESP_TIME]
},
"lt": {
"min": 0,
"max": 0,
"sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": self.multi * item[KPISet.AVG_LATENCY]
},
"by": {
"min": 0,
"max": 0,
"sum": item[KPISet.BYTE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT])
},
} | en | 0.78294 | Module for reporting into http://www.blazemeter.com/ service Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Reporter class :type _test: bzt.bza.Test :type _master: bzt.bza.Master :type _session: bzt.bza.Session Read options for uploading, check that they're sane # usual fields # direct data feeding case # to check connectivity and auth Initiate online test Start online test #/masters/%s' % self._master['id'] Compress all files in artifacts dir to single zipfile :rtype: (io.BytesIO,dict) # 10MB # upload logs unconditionally If token provided, upload artifacts folder contents and bzt.log Upload results if possible # NOTE: you dirty one... Finish online test Send data if any in buffer :type data: list[bzt.modules.aggregator.DataPoint] Send online data :param data: DataPoint # data :: dict(datasource -> dict(interval -> datapoint)) # datapoint :: dict(metric -> value) :type session: Session # maybe one day BZA will accept all other metrics... :type owner: BlazeMeterUploader # miltiplier factor for reporting # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes that were received on it. # following data is received in the cumulative way # 'Errors' tab # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way # convert dict to list # total count of samples # label # not used # list of intervals, fill later # not used # not used # not used # not used # not used # list of errors, fill later # list of assertions, fill later # not used # not used # not used # not used # summary info # rc_list - list of info about response codes: # {'n': <number of code encounters>, # 'f': <number of failed request (e.q. important for assertions)> # 'rc': <string value of response code>} | 1.566295 | 2 |
nitorch/nn/losses/_spatial.py | wyli/nitorch | 1 | 9379 | """
Losses that assume an underlying spatial organization
(gradients, curvature, etc.)
"""
import torch
import torch.nn as tnn
from nitorch.core.pyutils import make_list, prod
from nitorch.core.utils import slice_tensor
from nitorch.spatial import diff1d
from ._base import Loss
class LocalFeatures(tnn.Module):
"""Base class for feature extractors.
Is it really useful?
"""
def __init__(self, bound='dct2', voxel_size=1, *args, **kwargs):
"""
Parameters
----------
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
voxel_size : float or list[float], default=1
Voxel size
"""
super().__init__(*args, **kwargs)
self.bound = bound
self.voxel_size = voxel_size
class Diff(LocalFeatures):
"""Finite differences."""
def __init__(self, order=1, side='c', dim=None, *args, **kwargs):
"""
Parameters
----------
order : int, default=1
Finite differences order
side : {'c', 'f', 'b'} or list[{'c', 'f', 'b'}], default='c'
Type of finite-differencesto extract about each voxel:
* 'c' : central -> `g[i] = (x[i+1] - x[i-1])/2`
* 'f' : forward -> `g[i] = (x[i+1] - x[i])`
* 'b' : backward -> `g[i] = (x[i] - x[i-1])`
dim : int or list[int], optional
Dimensions along which to compute the finite differences.
By default, all except the first two (batch and channel).
bound : BoundType or list[BoundType], default='dct2'
Boundary conditions, used to compute derivatives at the edges.
voxel_size : float or list[float], default=1
Voxel size
reduction : {'mean', 'sum'} or callable, default='mean'
Type of reduction to apply.
"""
super().__init__(*args, **kwargs)
self.order = order
self.side = side
self.dim = dim
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor with shape (batch, channel, *spatial)
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
g : tensor
Finite differences with shape
(batch, channel, *spatial, len(dim), len(side))
If `dim` or `side` are scalars, not lists, their respective
dimension is dropped in the output tensor.
E.g., if `side='c'`, the output shape is
(batch, channel, *spatial, len(dim))
"""
order = overload.get('order', self.order)
side = make_list(overload.get('side', self.side))
drop_side_dim = not isinstance(side, (tuple, list))
side = make_list(side)
dim = overload.get('dim', self.dim)
dim = list(range(2, x.dim())) if dim is None else dim
drop_dim_dim = not isinstance(dim, (tuple, list))
dim = make_list(dim)
nb_dim = len(dim)
voxel_size = overload.get('voxel_size', self.voxel_size)
voxel_size = make_list(voxel_size, nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
diffs = []
for d, vx, bnd in zip(dim, voxel_size, bound):
sides = []
for s in side:
grad = diff1d(x, order=order, dim=d, voxel_size=vx,
side=s, bound=bnd)
sides.append(grad)
sides = torch.stack(sides, dim=-1)
diffs.append(sides)
diffs = torch.stack(diffs, dim=-2)
if drop_dim_dim:
diffs = slice_tensor(diffs, 0, dim=-2)
if drop_side_dim:
diffs = slice_tensor(diffs, 0, dim=-1)
return diffs
class MembraneLoss(Loss):
"""Compute the membrane energy (squared gradients) of a tensor.
The membrane energy of a field is the integral of its squared
gradient magnitude (l2 norm). This class extends this concept to
other norms of the gradient (l1, l{1,2}).
In the l2 case, if we name "f" the unit of the field and "m" the
spatial unit of a voxel, the output loss has unit `(f/m)**2`.
If `factor` is used to weight each voxel by its volume (as should
be done in a proper integration) the unit becomes
`(f/m)**2 * m**d = f**2 * m**(d-2)`.
In the l1 case, it is `f/m` in the absence of weighting and
`f * m**(d-1)` with volume weighting.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
#
# TODO: when penalty == 'l2', for some boundary conditions, there's no
# need to compute both forward and backward gradients as they are
# the same (but shifted). For now, to avoid having to detect which
# cases can be accelerated, I always compute both (more general).
loss = Diff(side=['f', 'b'], bound=bound, voxel_size=voxel_size)(x)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1).sqrt() # TODO: use self.reduction instead of sum?
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class BendingLoss(Loss):
"""Compute the bending energy (squared gradients) of a tensor.
The bending energy of a field is the integral of its squared
second-order derivatives magnitude (l2 norm).
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, if we name "f" the unit of the field and "m" the
spatial unit of a voxel, the output loss has unit `(f/m**2)**2`.
If `factor` is used to weight each voxel by its volume (as should
be done in a proper integration) the unit becomes
`(f/m**2)**2 * m**d = f**2 * m**(d-4)`.
In the l1 case, it is `f/m**2` in the absence of weighting and
`f * m**(d-2)` with volume weighting.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
loss = Diff(order=2, side='c', bound=bound, voxel_size=voxel_size)(x)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1).sqrt()
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class LameShearLoss(Loss):
"""Strain-part of the (Linear)-Elastic energy (penalty on shears).
= second Lame constant = shear modulus
The shear energy of a deformation field is the integral of the square
magnitude (l2 norm) of the symetric part diagonal terms of its Jacobian.
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, E = sum_{i != j} (dv[i]/dx[j]) ** 2.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
exclude_zooms=False, *args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
Here, `channel` map to elements of the Jacobian matrix, while
`side` map to the combination of sides (forward/backward)
used when extracting finite differences. Therefore, the
number of channels is dim*(dim+1)//2 and the number of sides
is 4.
exclude_zooms : bool, default=False
Do not include diagonal elements of the Jacobian in the
penalty (i.e., penalize only shears)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
self.exclude_zooms = exclude_zooms
def forward(self, x, **overload):
"""
Parameters
----------
x : (batch, ndim, *spatial) tensor
Input displacement tensor (in channel first order)
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
exclude_zooms = overload.get('exclude_zooms', self.exclude_zooms)
# Compute spatial gradients
loss_diag = [] # diagonal elements of the Jacobian
loss_offdiag = [] # off-diagonal elements of hte (symmetric) Jacobian
for i in range(nb_dim):
# symmetric part
x_i = x[:, i:i+1, ...]
subloss_diag = []
subloss_offdiag = []
for j in range(nb_dim):
for side_i in ('f', 'b'):
diff = Diff(dim=[j+2], side=side_i, bound=bound,
voxel_size=voxel_size)
diff_ij = diff(x_i)
if i == j:
# diagonal elements
if not exclude_zooms:
subloss_diag.append(diff_ij)
else:
# off diagonal elements
x_j = x[:, j:j+1, ...]
for side_j in ('f', 'b'):
diff = Diff(dim=[i+2], side=side_j, bound=bound,
voxel_size=voxel_size)
diff_ji = diff(x_j)
subloss_offdiag.append((diff_ij + diff_ji)/2)
if not exclude_zooms:
loss_diag.append(torch.stack(subloss_diag, dim=-1))
loss_offdiag.append(torch.stack(subloss_offdiag, dim=-1))
if not exclude_zooms:
loss_diag = torch.cat(loss_diag, dim=1)
loss_offdiag = torch.cat(loss_offdiag, dim=1)
if l1 not in (None, False):
# Apply l1 reduction
if l1 is True:
if not exclude_zooms:
loss_diag = loss_diag.abs()
loss_offdiag = loss_offdiag.abs()
else:
l1 = make_list(l1)
if not exclude_zooms:
loss_diag = loss_diag.square().sum(dim=l1, keepdim=True).sqrt()
loss_offdiag = loss_offdiag.square().sum(dim=l1, keepdim=True).sqrt()
else:
# Apply l2 reduction
if not exclude_zooms:
loss_diag = loss_diag.square()
loss_offdiag = loss_offdiag.square()
# Mean reduction across sides
if not exclude_zooms:
loss_diag = loss_diag.mean(dim=-1)
loss_offdiag = loss_offdiag.mean(dim=-1)
# Weighted reduction across elements
if not exclude_zooms:
if loss_diag.shape[1] == 1:
# element dimension already reduced -> we need a small hack
loss = (loss_diag.square() + 2*loss_offdiag.square()) / (nb_dim**2)
loss = loss.sum(dim=1, keepdim=True).sqrt()
else:
# simple weighted average
loss = (loss_diag.sum(dim=1, keepdim=True) +
loss_offdiag.sum(dim=1, keepdim=True)*2) / (nb_dim**2)
else:
loss = loss_offdiag.sum(dim=1, keepdim=True)*2 / (nb_dim**2)
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class LameZoomLoss(Loss):
"""Compression-part of the (Linear)-Elastic energy (penalty on volume change).
= first Lame constant
The compression energy of a deformation field is the integral of the square
magnitude (l2 norm) of the trace its Jacobian.
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, E = sum_{ij} (dv[i]/dx[j] + dv[j]/dx[i]) ** 2.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
loss = []
for i in range(nb_dim):
x_i = x[:, i:i+1, ...]
diff = Diff(dim=[i], side=['f', 'b'], bound=bound,
voxel_size=voxel_size)
loss.append(diff(x_i))
loss = torch.cat(loss, dim=1)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1, keepdim=True).sqrt()
# Mean reduction across sides
loss = loss.mean(dim=-1)
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
| """
Losses that assume an underlying spatial organization
(gradients, curvature, etc.)
"""
import torch
import torch.nn as tnn
from nitorch.core.pyutils import make_list, prod
from nitorch.core.utils import slice_tensor
from nitorch.spatial import diff1d
from ._base import Loss
class LocalFeatures(tnn.Module):
"""Base class for feature extractors.
Is it really useful?
"""
def __init__(self, bound='dct2', voxel_size=1, *args, **kwargs):
"""
Parameters
----------
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
voxel_size : float or list[float], default=1
Voxel size
"""
super().__init__(*args, **kwargs)
self.bound = bound
self.voxel_size = voxel_size
class Diff(LocalFeatures):
"""Finite differences."""
def __init__(self, order=1, side='c', dim=None, *args, **kwargs):
"""
Parameters
----------
order : int, default=1
Finite differences order
side : {'c', 'f', 'b'} or list[{'c', 'f', 'b'}], default='c'
Type of finite-differencesto extract about each voxel:
* 'c' : central -> `g[i] = (x[i+1] - x[i-1])/2`
* 'f' : forward -> `g[i] = (x[i+1] - x[i])`
* 'b' : backward -> `g[i] = (x[i] - x[i-1])`
dim : int or list[int], optional
Dimensions along which to compute the finite differences.
By default, all except the first two (batch and channel).
bound : BoundType or list[BoundType], default='dct2'
Boundary conditions, used to compute derivatives at the edges.
voxel_size : float or list[float], default=1
Voxel size
reduction : {'mean', 'sum'} or callable, default='mean'
Type of reduction to apply.
"""
super().__init__(*args, **kwargs)
self.order = order
self.side = side
self.dim = dim
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor with shape (batch, channel, *spatial)
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
g : tensor
Finite differences with shape
(batch, channel, *spatial, len(dim), len(side))
If `dim` or `side` are scalars, not lists, their respective
dimension is dropped in the output tensor.
E.g., if `side='c'`, the output shape is
(batch, channel, *spatial, len(dim))
"""
order = overload.get('order', self.order)
side = make_list(overload.get('side', self.side))
drop_side_dim = not isinstance(side, (tuple, list))
side = make_list(side)
dim = overload.get('dim', self.dim)
dim = list(range(2, x.dim())) if dim is None else dim
drop_dim_dim = not isinstance(dim, (tuple, list))
dim = make_list(dim)
nb_dim = len(dim)
voxel_size = overload.get('voxel_size', self.voxel_size)
voxel_size = make_list(voxel_size, nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
diffs = []
for d, vx, bnd in zip(dim, voxel_size, bound):
sides = []
for s in side:
grad = diff1d(x, order=order, dim=d, voxel_size=vx,
side=s, bound=bnd)
sides.append(grad)
sides = torch.stack(sides, dim=-1)
diffs.append(sides)
diffs = torch.stack(diffs, dim=-2)
if drop_dim_dim:
diffs = slice_tensor(diffs, 0, dim=-2)
if drop_side_dim:
diffs = slice_tensor(diffs, 0, dim=-1)
return diffs
class MembraneLoss(Loss):
"""Compute the membrane energy (squared gradients) of a tensor.
The membrane energy of a field is the integral of its squared
gradient magnitude (l2 norm). This class extends this concept to
other norms of the gradient (l1, l{1,2}).
In the l2 case, if we name "f" the unit of the field and "m" the
spatial unit of a voxel, the output loss has unit `(f/m)**2`.
If `factor` is used to weight each voxel by its volume (as should
be done in a proper integration) the unit becomes
`(f/m)**2 * m**d = f**2 * m**(d-2)`.
In the l1 case, it is `f/m` in the absence of weighting and
`f * m**(d-1)` with volume weighting.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
#
# TODO: when penalty == 'l2', for some boundary conditions, there's no
# need to compute both forward and backward gradients as they are
# the same (but shifted). For now, to avoid having to detect which
# cases can be accelerated, I always compute both (more general).
loss = Diff(side=['f', 'b'], bound=bound, voxel_size=voxel_size)(x)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1).sqrt() # TODO: use self.reduction instead of sum?
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class BendingLoss(Loss):
"""Compute the bending energy (squared gradients) of a tensor.
The bending energy of a field is the integral of its squared
second-order derivatives magnitude (l2 norm).
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, if we name "f" the unit of the field and "m" the
spatial unit of a voxel, the output loss has unit `(f/m**2)**2`.
If `factor` is used to weight each voxel by its volume (as should
be done in a proper integration) the unit becomes
`(f/m**2)**2 * m**d = f**2 * m**(d-4)`.
In the l1 case, it is `f/m**2` in the absence of weighting and
`f * m**(d-2)` with volume weighting.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
loss = Diff(order=2, side='c', bound=bound, voxel_size=voxel_size)(x)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1).sqrt()
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class LameShearLoss(Loss):
"""Strain-part of the (Linear)-Elastic energy (penalty on shears).
= second Lame constant = shear modulus
The shear energy of a deformation field is the integral of the square
magnitude (l2 norm) of the symetric part diagonal terms of its Jacobian.
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, E = sum_{i != j} (dv[i]/dx[j]) ** 2.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
exclude_zooms=False, *args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
Here, `channel` map to elements of the Jacobian matrix, while
`side` map to the combination of sides (forward/backward)
used when extracting finite differences. Therefore, the
number of channels is dim*(dim+1)//2 and the number of sides
is 4.
exclude_zooms : bool, default=False
Do not include diagonal elements of the Jacobian in the
penalty (i.e., penalize only shears)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
self.exclude_zooms = exclude_zooms
def forward(self, x, **overload):
"""
Parameters
----------
x : (batch, ndim, *spatial) tensor
Input displacement tensor (in channel first order)
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
exclude_zooms = overload.get('exclude_zooms', self.exclude_zooms)
# Compute spatial gradients
loss_diag = [] # diagonal elements of the Jacobian
loss_offdiag = [] # off-diagonal elements of hte (symmetric) Jacobian
for i in range(nb_dim):
# symmetric part
x_i = x[:, i:i+1, ...]
subloss_diag = []
subloss_offdiag = []
for j in range(nb_dim):
for side_i in ('f', 'b'):
diff = Diff(dim=[j+2], side=side_i, bound=bound,
voxel_size=voxel_size)
diff_ij = diff(x_i)
if i == j:
# diagonal elements
if not exclude_zooms:
subloss_diag.append(diff_ij)
else:
# off diagonal elements
x_j = x[:, j:j+1, ...]
for side_j in ('f', 'b'):
diff = Diff(dim=[i+2], side=side_j, bound=bound,
voxel_size=voxel_size)
diff_ji = diff(x_j)
subloss_offdiag.append((diff_ij + diff_ji)/2)
if not exclude_zooms:
loss_diag.append(torch.stack(subloss_diag, dim=-1))
loss_offdiag.append(torch.stack(subloss_offdiag, dim=-1))
if not exclude_zooms:
loss_diag = torch.cat(loss_diag, dim=1)
loss_offdiag = torch.cat(loss_offdiag, dim=1)
if l1 not in (None, False):
# Apply l1 reduction
if l1 is True:
if not exclude_zooms:
loss_diag = loss_diag.abs()
loss_offdiag = loss_offdiag.abs()
else:
l1 = make_list(l1)
if not exclude_zooms:
loss_diag = loss_diag.square().sum(dim=l1, keepdim=True).sqrt()
loss_offdiag = loss_offdiag.square().sum(dim=l1, keepdim=True).sqrt()
else:
# Apply l2 reduction
if not exclude_zooms:
loss_diag = loss_diag.square()
loss_offdiag = loss_offdiag.square()
# Mean reduction across sides
if not exclude_zooms:
loss_diag = loss_diag.mean(dim=-1)
loss_offdiag = loss_offdiag.mean(dim=-1)
# Weighted reduction across elements
if not exclude_zooms:
if loss_diag.shape[1] == 1:
# element dimension already reduced -> we need a small hack
loss = (loss_diag.square() + 2*loss_offdiag.square()) / (nb_dim**2)
loss = loss.sum(dim=1, keepdim=True).sqrt()
else:
# simple weighted average
loss = (loss_diag.sum(dim=1, keepdim=True) +
loss_offdiag.sum(dim=1, keepdim=True)*2) / (nb_dim**2)
else:
loss = loss_offdiag.sum(dim=1, keepdim=True)*2 / (nb_dim**2)
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
class LameZoomLoss(Loss):
"""Compression-part of the (Linear)-Elastic energy (penalty on volume change).
= first Lame constant
The compression energy of a deformation field is the integral of the square
magnitude (l2 norm) of the trace its Jacobian.
This class extends this concept to other norms of the gradient
(l1, l{1,2}).
In the l2 case, E = sum_{ij} (dv[i]/dx[j] + dv[j]/dx[i]) ** 2.
"""
def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None,
*args, **kwargs):
"""
Parameters
----------
voxel_size : float or list[float], default=1
Voxel size. Useful for anisotropic tensors (where the
sampling rate is higher in some directions than others).
factor : float or list[float], default=1
Scale the loss by a per-dimension factor. Useful when
working with resized tensor to compensate for different
number of voxels.
bound : BoundType, default='dct2'
Boundary conditions, used to compute derivatives at the edges.
l1 : bool or int or list[int], default=None
Dimensions along which to apply a square root reduction
('l1 norm'), after taking the square. Dimensions are
those of the gradient map with shape
(batch, channel, *spatial, direction, side)
* False: nowhere == (squared) l2 norm
* True: everywhere == l1 norm
* Otherwise: l_{1,2} norm (group sparsity)
"""
super().__init__(*args, **kwargs)
self.voxel_size = voxel_size
self.factor = factor
self.bound = bound
self.l1 = l1
def forward(self, x, **overload):
"""
Parameters
----------
x : tensor
Input tensor
overload : dict
All parameters defined at build time can be overridden
at call time.
Returns
-------
loss : scalar or tensor
The output shape depends on the type of reduction used.
If 'mean' or 'sum', this function returns a scalar.
"""
nb_dim = x.dim() - 2
voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim)
factor = make_list(overload.get('factor', self.factor), nb_dim)
bound = make_list(overload.get('bound', self.bound), nb_dim)
l1 = overload.get('l1', self.l1)
# Compute spatial gradients
loss = []
for i in range(nb_dim):
x_i = x[:, i:i+1, ...]
diff = Diff(dim=[i], side=['f', 'b'], bound=bound,
voxel_size=voxel_size)
loss.append(diff(x_i))
loss = torch.cat(loss, dim=1)
loss = loss.square()
# Apply l1
if l1 not in (None, False):
if l1 is True:
loss = loss.sqrt()
else:
l1 = make_list(l1)
loss = loss.sum(dim=l1, keepdim=True).sqrt()
# Mean reduction across sides
loss = loss.mean(dim=-1)
# Reduce
loss = super().forward(loss)
# Scale
factor = prod(factor)
if factor != 1:
loss = loss * factor
return loss
| en | 0.746772 | Losses that assume an underlying spatial organization (gradients, curvature, etc.) Base class for feature extractors. Is it really useful? Parameters ---------- bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. voxel_size : float or list[float], default=1 Voxel size Finite differences. Parameters ---------- order : int, default=1 Finite differences order side : {'c', 'f', 'b'} or list[{'c', 'f', 'b'}], default='c' Type of finite-differencesto extract about each voxel: * 'c' : central -> `g[i] = (x[i+1] - x[i-1])/2` * 'f' : forward -> `g[i] = (x[i+1] - x[i])` * 'b' : backward -> `g[i] = (x[i] - x[i-1])` dim : int or list[int], optional Dimensions along which to compute the finite differences. By default, all except the first two (batch and channel). bound : BoundType or list[BoundType], default='dct2' Boundary conditions, used to compute derivatives at the edges. voxel_size : float or list[float], default=1 Voxel size reduction : {'mean', 'sum'} or callable, default='mean' Type of reduction to apply. Parameters ---------- x : tensor Input tensor with shape (batch, channel, *spatial) overload : dict All parameters defined at build time can be overridden at call time. Returns ------- g : tensor Finite differences with shape (batch, channel, *spatial, len(dim), len(side)) If `dim` or `side` are scalars, not lists, their respective dimension is dropped in the output tensor. E.g., if `side='c'`, the output shape is (batch, channel, *spatial, len(dim)) Compute the membrane energy (squared gradients) of a tensor. The membrane energy of a field is the integral of its squared gradient magnitude (l2 norm). This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, if we name "f" the unit of the field and "m" the spatial unit of a voxel, the output loss has unit `(f/m)**2`. If `factor` is used to weight each voxel by its volume (as should be done in a proper integration) the unit becomes `(f/m)**2 * m**d = f**2 * m**(d-2)`. In the l1 case, it is `f/m` in the absence of weighting and `f * m**(d-1)` with volume weighting. Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, direction, side) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) Parameters ---------- x : tensor Input tensor overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. # Compute spatial gradients # # TODO: when penalty == 'l2', for some boundary conditions, there's no # need to compute both forward and backward gradients as they are # the same (but shifted). For now, to avoid having to detect which # cases can be accelerated, I always compute both (more general). # Apply l1 # TODO: use self.reduction instead of sum? # Reduce # Scale Compute the bending energy (squared gradients) of a tensor. The bending energy of a field is the integral of its squared second-order derivatives magnitude (l2 norm). This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, if we name "f" the unit of the field and "m" the spatial unit of a voxel, the output loss has unit `(f/m**2)**2`. If `factor` is used to weight each voxel by its volume (as should be done in a proper integration) the unit becomes `(f/m**2)**2 * m**d = f**2 * m**(d-4)`. In the l1 case, it is `f/m**2` in the absence of weighting and `f * m**(d-2)` with volume weighting. Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, direction) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) Parameters ---------- x : tensor Input tensor overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. # Compute spatial gradients # Apply l1 # Reduce # Scale Strain-part of the (Linear)-Elastic energy (penalty on shears). = second Lame constant = shear modulus The shear energy of a deformation field is the integral of the square magnitude (l2 norm) of the symetric part diagonal terms of its Jacobian. This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, E = sum_{i != j} (dv[i]/dx[j]) ** 2. Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, side) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) Here, `channel` map to elements of the Jacobian matrix, while `side` map to the combination of sides (forward/backward) used when extracting finite differences. Therefore, the number of channels is dim*(dim+1)//2 and the number of sides is 4. exclude_zooms : bool, default=False Do not include diagonal elements of the Jacobian in the penalty (i.e., penalize only shears) Parameters ---------- x : (batch, ndim, *spatial) tensor Input displacement tensor (in channel first order) overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. # Compute spatial gradients # diagonal elements of the Jacobian # off-diagonal elements of hte (symmetric) Jacobian # symmetric part # diagonal elements # off diagonal elements # Apply l1 reduction # Apply l2 reduction # Mean reduction across sides # Weighted reduction across elements # element dimension already reduced -> we need a small hack # simple weighted average # Reduce # Scale Compression-part of the (Linear)-Elastic energy (penalty on volume change). = first Lame constant The compression energy of a deformation field is the integral of the square magnitude (l2 norm) of the trace its Jacobian. This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, E = sum_{ij} (dv[i]/dx[j] + dv[j]/dx[i]) ** 2. Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, direction, side) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) Parameters ---------- x : tensor Input tensor overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. # Compute spatial gradients # Apply l1 # Mean reduction across sides # Reduce # Scale | 2.42972 | 2 |
items/models.py | roberthtamayose/digitalmenu | 0 | 9380 | <reponame>roberthtamayose/digitalmenu<gh_stars>0
from django.db import models
from django.utils import timezone
class Categoria(models.Model):
nome = models.CharField(max_length=255)
def __str__(self):
return self.nome
class Item(models.Model):
nome = models.CharField(max_length=255)
data_criacao = models.DateTimeField(default=timezone.now)
descricao = models.TextField(blank=True)
categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING)
ocultar = models.BooleanField(default=False)
foto = models.ImageField(blank=True, upload_to='fotos/%y/%m/')
def __str__(self):
return self.nome
| from django.db import models
from django.utils import timezone
class Categoria(models.Model):
nome = models.CharField(max_length=255)
def __str__(self):
return self.nome
class Item(models.Model):
nome = models.CharField(max_length=255)
data_criacao = models.DateTimeField(default=timezone.now)
descricao = models.TextField(blank=True)
categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING)
ocultar = models.BooleanField(default=False)
foto = models.ImageField(blank=True, upload_to='fotos/%y/%m/')
def __str__(self):
return self.nome | none | 1 | 2.273124 | 2 |
|
app/services/__init__.py | zeroday0619/XenXenXenSe | 1 | 9381 | from app.services.console import Console
from app.services.server import Server
__main__ = ["server", "console"]
| from app.services.console import Console
from app.services.server import Server
__main__ = ["server", "console"]
| none | 1 | 1.127339 | 1 |
|
launch/twist_mux_launch.py | nuclearsandwich-ros/twist_mux-release | 31 | 9382 | #!/usr/bin/env python3
# Copyright 2020 Gaitech Korea Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
default_config_locks = os.path.join(get_package_share_directory('twist_mux'),
'config', 'twist_mux_locks.yaml')
default_config_topics = os.path.join(get_package_share_directory('twist_mux'),
'config', 'twist_mux_topics.yaml')
default_config_joystick = os.path.join(get_package_share_directory('twist_mux'),
'config', 'joystick.yaml')
return LaunchDescription([
DeclareLaunchArgument(
'config_locks',
default_value=default_config_locks,
description='Default locks config file'),
DeclareLaunchArgument(
'config_topics',
default_value=default_config_topics,
description='Default topics config file'),
DeclareLaunchArgument(
'config_joy',
default_value=default_config_joystick,
description='Default joystick config file'),
DeclareLaunchArgument(
'cmd_vel_out',
default_value='twist_mux/cmd_vel',
description='cmd vel output topic'),
Node(
package='twist_mux',
executable='twist_mux',
output='screen',
remappings={('/cmd_vel_out', LaunchConfiguration('cmd_vel_out'))},
parameters=[
LaunchConfiguration('config_locks'),
LaunchConfiguration('config_topics'),
LaunchConfiguration('config_joy')]
),
Node(
package='twist_mux',
executable='twist_marker',
output='screen',
remappings={('/twist', LaunchConfiguration('cmd_vel_out'))},
parameters=[{
'frame_id': 'base_link',
'scale': 1.0,
'vertical_position': 2.0}])
])
| #!/usr/bin/env python3
# Copyright 2020 Gaitech Korea Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def generate_launch_description():
default_config_locks = os.path.join(get_package_share_directory('twist_mux'),
'config', 'twist_mux_locks.yaml')
default_config_topics = os.path.join(get_package_share_directory('twist_mux'),
'config', 'twist_mux_topics.yaml')
default_config_joystick = os.path.join(get_package_share_directory('twist_mux'),
'config', 'joystick.yaml')
return LaunchDescription([
DeclareLaunchArgument(
'config_locks',
default_value=default_config_locks,
description='Default locks config file'),
DeclareLaunchArgument(
'config_topics',
default_value=default_config_topics,
description='Default topics config file'),
DeclareLaunchArgument(
'config_joy',
default_value=default_config_joystick,
description='Default joystick config file'),
DeclareLaunchArgument(
'cmd_vel_out',
default_value='twist_mux/cmd_vel',
description='cmd vel output topic'),
Node(
package='twist_mux',
executable='twist_mux',
output='screen',
remappings={('/cmd_vel_out', LaunchConfiguration('cmd_vel_out'))},
parameters=[
LaunchConfiguration('config_locks'),
LaunchConfiguration('config_topics'),
LaunchConfiguration('config_joy')]
),
Node(
package='twist_mux',
executable='twist_marker',
output='screen',
remappings={('/twist', LaunchConfiguration('cmd_vel_out'))},
parameters=[{
'frame_id': 'base_link',
'scale': 1.0,
'vertical_position': 2.0}])
])
| en | 0.826262 | #!/usr/bin/env python3 # Copyright 2020 Gaitech Korea Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Author: <NAME> | 1.925896 | 2 |
Tests/testLiveService.py | psu-capstone-teamD/ElementalAuth | 2 | 9383 | import sys
import unittest
import requests_mock
from mock import patch
sys.path.append('services/LiveService')
from LiveService import LiveService
L = LiveService()
baseURL = "https://yanexx65s8e1.live.elementalclouddev.com/api"
class LiveServiceTest(unittest.TestCase):
'''@patch('services.LiveService.LiveService.time', return_value=1502345833)
def testSetHeaders(self, mock_time):
headers = L.setHeaders("/schedules")
self.assertEqual(headers, {'X-Auth-Expires': '1502345863',
'X-Auth-Key': '9c9a72cd3a8feec48539f1943afbef8d',
'Content-type': 'application/xml',
'X-Auth-User': '',
'Accept': 'application/xml'})'''
@requests_mock.Mocker()
def testGetStatus(self, m):
m.get(baseURL + "/live_events/150/status", status_code=200)
resp = L.getLiveEventStatus(150)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetEvents(self, m):
m.get(baseURL + "/live_events", status_code=200)
m.get(baseURL + "/live_events?filter=running", status_code=200)
resp = L.getLiveEvents(None)
self.assertEqual(resp.status_code, 200)
resp = L.getLiveEvents("running")
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetEvent(self, m):
m.get(baseURL + "/live_events/164", status_code=200)
resp = L.getLiveEvent(164)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetSchedules(self, m):
m.get(baseURL + "/schedules", status_code=200)
resp = L.getSchedules()
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetLiveProfiles(self, m):
m.get(baseURL + "/live_event_profiles", status_code=200)
resp = L.getLiveProfiles()
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetLiveProfile(self, m):
m.get(baseURL + "/live_event_profiles/11", status_code=200)
resp = L.getLiveProfile(11)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testCreateLiveEvent(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/live_events", status_code=201)
resp = L.createEvent(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testCreateSchedule(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/schedules", status_code=201)
resp = L.createSchedule(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testCreateProfile(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/schedules", status_code=201)
resp = L.createSchedule(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testUpdateEvent(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/live_events/50", status_code=200)
resp = L.updateLiveEvent(50, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdatePlaylist(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/live_events/92/playlist", status_code=200)
resp = L.updatePlaylist(92, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdateSchedule(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/schedules/13", status_code=200)
resp = L.updateSchedule(13, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdateProfile(self, m):
with open('Tests/test_XML/live_profile.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/live_event_profiles/33", status_code=200)
resp = L.updateProfile(33, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveLiveEvent(self, m):
m.delete(baseURL + "/live_events/191", status_code=200)
resp = L.removeEvent(191)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveSchedule(self, m):
m.delete(baseURL + "/schedules/13", status_code=200)
resp = L.removeSchedule(13)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveProfile(self, m):
m.delete(baseURL + "/live_event_profiles/33", status_code=200)
resp = L.removeProfile(33)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testStartEvent(self, m):
m.post(baseURL + "/live_events/50/start", status_code=200)
resp = L.startLiveEvent(50)
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| import sys
import unittest
import requests_mock
from mock import patch
sys.path.append('services/LiveService')
from LiveService import LiveService
L = LiveService()
baseURL = "https://yanexx65s8e1.live.elementalclouddev.com/api"
class LiveServiceTest(unittest.TestCase):
'''@patch('services.LiveService.LiveService.time', return_value=1502345833)
def testSetHeaders(self, mock_time):
headers = L.setHeaders("/schedules")
self.assertEqual(headers, {'X-Auth-Expires': '1502345863',
'X-Auth-Key': '9c9a72cd3a8feec48539f1943afbef8d',
'Content-type': 'application/xml',
'X-Auth-User': '',
'Accept': 'application/xml'})'''
@requests_mock.Mocker()
def testGetStatus(self, m):
m.get(baseURL + "/live_events/150/status", status_code=200)
resp = L.getLiveEventStatus(150)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetEvents(self, m):
m.get(baseURL + "/live_events", status_code=200)
m.get(baseURL + "/live_events?filter=running", status_code=200)
resp = L.getLiveEvents(None)
self.assertEqual(resp.status_code, 200)
resp = L.getLiveEvents("running")
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetEvent(self, m):
m.get(baseURL + "/live_events/164", status_code=200)
resp = L.getLiveEvent(164)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetSchedules(self, m):
m.get(baseURL + "/schedules", status_code=200)
resp = L.getSchedules()
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetLiveProfiles(self, m):
m.get(baseURL + "/live_event_profiles", status_code=200)
resp = L.getLiveProfiles()
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testGetLiveProfile(self, m):
m.get(baseURL + "/live_event_profiles/11", status_code=200)
resp = L.getLiveProfile(11)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testCreateLiveEvent(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/live_events", status_code=201)
resp = L.createEvent(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testCreateSchedule(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/schedules", status_code=201)
resp = L.createSchedule(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testCreateProfile(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/schedules", status_code=201)
resp = L.createSchedule(xml)
self.assertEqual(resp.status_code, 201)
@requests_mock.Mocker()
def testUpdateEvent(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/live_events/50", status_code=200)
resp = L.updateLiveEvent(50, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdatePlaylist(self, m):
with open('Tests/test_XML/live_event.xml', 'r') as infile:
xml = infile.read()
m.post(baseURL + "/live_events/92/playlist", status_code=200)
resp = L.updatePlaylist(92, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdateSchedule(self, m):
with open('Tests/test_XML/schedule.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/schedules/13", status_code=200)
resp = L.updateSchedule(13, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testUpdateProfile(self, m):
with open('Tests/test_XML/live_profile.xml', 'r') as infile:
xml = infile.read()
m.put(baseURL + "/live_event_profiles/33", status_code=200)
resp = L.updateProfile(33, xml)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveLiveEvent(self, m):
m.delete(baseURL + "/live_events/191", status_code=200)
resp = L.removeEvent(191)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveSchedule(self, m):
m.delete(baseURL + "/schedules/13", status_code=200)
resp = L.removeSchedule(13)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testRemoveProfile(self, m):
m.delete(baseURL + "/live_event_profiles/33", status_code=200)
resp = L.removeProfile(33)
self.assertEqual(resp.status_code, 200)
@requests_mock.Mocker()
def testStartEvent(self, m):
m.post(baseURL + "/live_events/50/start", status_code=200)
resp = L.startLiveEvent(50)
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
| en | 0.215627 | @patch('services.LiveService.LiveService.time', return_value=1502345833) def testSetHeaders(self, mock_time): headers = L.setHeaders("/schedules") self.assertEqual(headers, {'X-Auth-Expires': '1502345863', 'X-Auth-Key': '9c9a72cd3a8feec48539f1943afbef8d', 'Content-type': 'application/xml', 'X-Auth-User': '', 'Accept': 'application/xml'}) | 2.67274 | 3 |
tests/models/test_stacking.py | LionelMassoulard/aikit | 0 | 9384 | <reponame>LionelMassoulard/aikit
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:49:10 2018
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from sklearn.base import is_regressor, is_classifier
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.dummy import DummyRegressor
from aikit.models.stacking import OutSamplerTransformer, StackerClassifier, StackerRegressor
def test_OutSamplerTransformer_classifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
model.fit(X, y)
p1 = model.model.predict_proba(X)
p2 = model.transform(X)
assert not is_classifier(model)
assert not is_regressor(model)
assert np.abs(p1[:, 1] - p2[:, 0]).max() <= 10 ** (-10)
assert p2.shape == (100, 1)
assert model.get_feature_names() == ["RandomForestClassifier__1"]
y = np.array(["a", "b", "c"])[np.random.randint(0, 3, 100)]
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
model.fit(X, y)
p1 = model.model.predict_proba(X)
p2 = model.transform(X)
assert p1.shape == (100, 3)
assert p2.shape == (100, 3)
assert np.abs(p1 - p2).max() <= 10 ** (-10)
assert model.get_feature_names() == [
"RandomForestClassifier__a",
"RandomForestClassifier__b",
"RandomForestClassifier__c",
]
def test_OutSampleTransformer_classifier_unbalanced():
np.random.seed(123)
X = np.random.randn(100, 2)
y = np.array(["AA"] * 33 + ["BB"] * 33 + ["CC"] * 33 + ["DD"])
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
p3 = model.fit_transform(X, y)
assert (p3.max(axis=1) > 0).all()
def test_OutSamplerTransformer_classifier_fit_transform():
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
model.fit(X, y)
y1 = model.transform(X)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
y2 = model.fit_transform(X, y)
assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different
def test_OutSamplerTransformer_regressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
model = OutSamplerTransformer(RandomForestRegressor(n_estimators=10,random_state=123), cv=10)
model.fit(X, y)
y1 = model.model.predict(X)
y2 = model.transform(X)
assert not is_classifier(model)
assert not is_regressor(model)
assert np.abs(y1 - y2[:, 0]).max() <= 10 ** (-10)
assert y2.shape == (100, 1)
assert model.get_feature_names() == ["RandomForestRegressor__target"]
def test_OutSamplerTransformer_regressor_fit_transform():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
model.fit(X, y)
y1 = model.transform(X)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
y2 = model.fit_transform(X, y)
assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different
def test_approx_cross_validation_OutSamplerTransformer_regressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
model = OutSamplerTransformer(RandomForestRegressor(random_state=123), cv=10)
cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True)
assert cv_res is None
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 1)
with pytest.raises(NotFittedError):
model.transform(X)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
yhat1 = model.fit_transform(X, y)
cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True)
# Approx cross val and fit transform should return the same thing here
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
yhat3 = np.zeros((y.shape[0], 1))
for train, test in cv.split(X, y):
model = DummyRegressor()
model.fit(X[train, :], y[train])
yhat3[test, 0] = model.predict(X[test, :])
assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5)
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
def test_approx_cross_validation_OutSamplerTransformer_classifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
model = OutSamplerTransformer(RandomForestClassifier(random_state=123), cv=10)
cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True)
assert cv_res is None
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 1)
with pytest.raises(NotFittedError):
model.transform(X)
with pytest.raises(NotFittedError):
model.model.predict(X)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
yhat1 = model.fit_transform(X, y)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True)
# Approx cross val and fit transform should return the same thing here
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
yhat3 = np.zeros((y.shape[0], 1))
for train, test in cv.split(X, y):
model = LogisticRegression()
model.fit(X[train, :], y[train])
yhat3[test, 0] = model.predict_proba(X[test, :])[:, 1]
assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5)
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
def test_StackerRegressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123))
stacker.fit(X, y)
yhat = stacker.predict(X)
assert yhat.ndim == 1
assert yhat.shape[0] == X.shape[0]
assert is_regressor(stacker)
assert not is_classifier(stacker)
with pytest.raises(AttributeError):
stacker.predict_proba(X)
with pytest.raises(AttributeError):
stacker.classes_
def test_StackerClassifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
stacker = StackerClassifier(
models=[RandomForestClassifier(random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123)
)
stacker.fit(X, y)
yhat = stacker.predict(X)
assert yhat.ndim == 1
assert yhat.shape[0] == X.shape[0]
assert list(set(yhat)) == [0, 1]
assert list(stacker.classes_) == [0, 1]
yhat_proba = stacker.predict_proba(X)
assert yhat_proba.shape == (y.shape[0], 2)
assert not is_regressor(stacker)
assert is_classifier(stacker)
def test_approx_cross_validation_StackerRegressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123))
cv_res, yhat = stacker.approx_cross_validation(
X, y, cv=10, method="predict", scoring=["neg_mean_squared_error"], return_predict=True, verbose=False
)
assert cv_res is not None
assert isinstance(cv_res, pd.DataFrame)
assert cv_res.shape[0] == 10
assert "test_neg_mean_squared_error" in cv_res
assert "train_neg_mean_squared_error" in cv_res
assert yhat.ndim == 1
assert yhat.shape[0] == y.shape[0]
with pytest.raises(NotFittedError):
stacker.predict(X)
for m in stacker.models:
with pytest.raises(NotFittedError):
m.predict(X)
def test_approx_cross_validation_StackerClassifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
stacker = StackerClassifier(
models=[RandomForestClassifier(n_estimators=10,random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123)
)
cv_res, yhat = stacker.approx_cross_validation(
X, y, cv=10, method="predict_proba", scoring=["accuracy"], return_predict=True, verbose=False
)
assert cv_res is not None
assert isinstance(cv_res, pd.DataFrame)
assert cv_res.shape[0] == 10
assert "test_accuracy" in cv_res
assert "train_accuracy" in cv_res
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 2)
with pytest.raises(NotFittedError):
stacker.predict(X)
for m in stacker.models:
with pytest.raises(NotFittedError):
m.predict(X)
def _verif_all():
test_OutSamplerTransformer_classifier()
test_OutSamplerTransformer_regressor()
test_OutSamplerTransformer_classifier_fit_transform()
test_OutSamplerTransformer_regressor_fit_transform()
test_approx_cross_validation_OutSamplerTransformer_regressor()
test_approx_cross_validation_OutSamplerTransformer_classifier()
test_StackerClassifier()
test_StackerRegressor()
test_approx_cross_validation_StackerClassifier()
test_approx_cross_validation_StackerRegressor()
| # -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 11:49:10 2018
@author: <NAME>
"""
import pytest
import numpy as np
import pandas as pd
from sklearn.base import is_regressor, is_classifier
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.dummy import DummyRegressor
from aikit.models.stacking import OutSamplerTransformer, StackerClassifier, StackerRegressor
def test_OutSamplerTransformer_classifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
model.fit(X, y)
p1 = model.model.predict_proba(X)
p2 = model.transform(X)
assert not is_classifier(model)
assert not is_regressor(model)
assert np.abs(p1[:, 1] - p2[:, 0]).max() <= 10 ** (-10)
assert p2.shape == (100, 1)
assert model.get_feature_names() == ["RandomForestClassifier__1"]
y = np.array(["a", "b", "c"])[np.random.randint(0, 3, 100)]
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
model.fit(X, y)
p1 = model.model.predict_proba(X)
p2 = model.transform(X)
assert p1.shape == (100, 3)
assert p2.shape == (100, 3)
assert np.abs(p1 - p2).max() <= 10 ** (-10)
assert model.get_feature_names() == [
"RandomForestClassifier__a",
"RandomForestClassifier__b",
"RandomForestClassifier__c",
]
def test_OutSampleTransformer_classifier_unbalanced():
np.random.seed(123)
X = np.random.randn(100, 2)
y = np.array(["AA"] * 33 + ["BB"] * 33 + ["CC"] * 33 + ["DD"])
model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123))
p3 = model.fit_transform(X, y)
assert (p3.max(axis=1) > 0).all()
def test_OutSamplerTransformer_classifier_fit_transform():
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
model.fit(X, y)
y1 = model.transform(X)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
y2 = model.fit_transform(X, y)
assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different
def test_OutSamplerTransformer_regressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
model = OutSamplerTransformer(RandomForestRegressor(n_estimators=10,random_state=123), cv=10)
model.fit(X, y)
y1 = model.model.predict(X)
y2 = model.transform(X)
assert not is_classifier(model)
assert not is_regressor(model)
assert np.abs(y1 - y2[:, 0]).max() <= 10 ** (-10)
assert y2.shape == (100, 1)
assert model.get_feature_names() == ["RandomForestRegressor__target"]
def test_OutSamplerTransformer_regressor_fit_transform():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
model.fit(X, y)
y1 = model.transform(X)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
y2 = model.fit_transform(X, y)
assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different
def test_approx_cross_validation_OutSamplerTransformer_regressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
model = OutSamplerTransformer(RandomForestRegressor(random_state=123), cv=10)
cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True)
assert cv_res is None
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 1)
with pytest.raises(NotFittedError):
model.transform(X)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(DummyRegressor(), cv=cv)
yhat1 = model.fit_transform(X, y)
cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True)
# Approx cross val and fit transform should return the same thing here
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
yhat3 = np.zeros((y.shape[0], 1))
for train, test in cv.split(X, y):
model = DummyRegressor()
model.fit(X[train, :], y[train])
yhat3[test, 0] = model.predict(X[test, :])
assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5)
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
def test_approx_cross_validation_OutSamplerTransformer_classifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
model = OutSamplerTransformer(RandomForestClassifier(random_state=123), cv=10)
cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True)
assert cv_res is None
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 1)
with pytest.raises(NotFittedError):
model.transform(X)
with pytest.raises(NotFittedError):
model.model.predict(X)
cv = KFold(n_splits=10, shuffle=True, random_state=123)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
yhat1 = model.fit_transform(X, y)
model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv)
cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True)
# Approx cross val and fit transform should return the same thing here
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
yhat3 = np.zeros((y.shape[0], 1))
for train, test in cv.split(X, y):
model = LogisticRegression()
model.fit(X[train, :], y[train])
yhat3[test, 0] = model.predict_proba(X[test, :])[:, 1]
assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5)
assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5)
def test_StackerRegressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123))
stacker.fit(X, y)
yhat = stacker.predict(X)
assert yhat.ndim == 1
assert yhat.shape[0] == X.shape[0]
assert is_regressor(stacker)
assert not is_classifier(stacker)
with pytest.raises(AttributeError):
stacker.predict_proba(X)
with pytest.raises(AttributeError):
stacker.classes_
def test_StackerClassifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
stacker = StackerClassifier(
models=[RandomForestClassifier(random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123)
)
stacker.fit(X, y)
yhat = stacker.predict(X)
assert yhat.ndim == 1
assert yhat.shape[0] == X.shape[0]
assert list(set(yhat)) == [0, 1]
assert list(stacker.classes_) == [0, 1]
yhat_proba = stacker.predict_proba(X)
assert yhat_proba.shape == (y.shape[0], 2)
assert not is_regressor(stacker)
assert is_classifier(stacker)
def test_approx_cross_validation_StackerRegressor():
np.random.seed(123)
X = np.random.randn(100, 10)
y = np.random.randn(100)
stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123))
cv_res, yhat = stacker.approx_cross_validation(
X, y, cv=10, method="predict", scoring=["neg_mean_squared_error"], return_predict=True, verbose=False
)
assert cv_res is not None
assert isinstance(cv_res, pd.DataFrame)
assert cv_res.shape[0] == 10
assert "test_neg_mean_squared_error" in cv_res
assert "train_neg_mean_squared_error" in cv_res
assert yhat.ndim == 1
assert yhat.shape[0] == y.shape[0]
with pytest.raises(NotFittedError):
stacker.predict(X)
for m in stacker.models:
with pytest.raises(NotFittedError):
m.predict(X)
def test_approx_cross_validation_StackerClassifier():
np.random.seed(123)
X = np.random.randn(100, 10)
y = 1 * (np.random.randn(100) > 0)
stacker = StackerClassifier(
models=[RandomForestClassifier(n_estimators=10,random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123)
)
cv_res, yhat = stacker.approx_cross_validation(
X, y, cv=10, method="predict_proba", scoring=["accuracy"], return_predict=True, verbose=False
)
assert cv_res is not None
assert isinstance(cv_res, pd.DataFrame)
assert cv_res.shape[0] == 10
assert "test_accuracy" in cv_res
assert "train_accuracy" in cv_res
assert yhat.ndim == 2
assert yhat.shape == (y.shape[0], 2)
with pytest.raises(NotFittedError):
stacker.predict(X)
for m in stacker.models:
with pytest.raises(NotFittedError):
m.predict(X)
def _verif_all():
test_OutSamplerTransformer_classifier()
test_OutSamplerTransformer_regressor()
test_OutSamplerTransformer_classifier_fit_transform()
test_OutSamplerTransformer_regressor_fit_transform()
test_approx_cross_validation_OutSamplerTransformer_regressor()
test_approx_cross_validation_OutSamplerTransformer_classifier()
test_StackerClassifier()
test_StackerRegressor()
test_approx_cross_validation_StackerClassifier()
test_approx_cross_validation_StackerRegressor() | en | 0.765778 | # -*- coding: utf-8 -*- Created on Fri Sep 14 11:49:10 2018 @author: <NAME> # vector should be different # vector should be different # Approx cross val and fit transform should return the same thing here # Approx cross val and fit transform should return the same thing here | 2.295985 | 2 |
employee/views/check_rental.py | odrolliv13/Hex-Photos | 0 | 9385 | <reponame>odrolliv13/Hex-Photos
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.conf import settings
import decimal, datetime
# This view will display all users and then on a new page display all the current rentals for a given user
def process_request(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/shop')
if request.user.is_staff == False:
return HttpResponseRedirect('/shop')
if request.urlparams[0] == "":
#This form will display all users
form = CheckRentalForm(initial ={
'user': "",
})
if request.method == 'POST':
form = CheckRentalForm(request.POST)
if form.is_valid():
#From here the page will redirect to show all the current rentals for the user picked
complete = "/employee/customer_rentals/" + str(form.cleaned_data['user'].id)
return HttpResponseRedirect(complete)
tvars = {
'form': form,
}
return templater.render_to_response(request, 'return_rental.html', tvars)
else:
try:
complete_rental = pmod.Rental.objects.get(id=request.urlparams[0])
form = CheckRentalForm(initial ={
'user': "",
})
except:
pass
form = "dfd"
tvars = {
'form': form,
}
return templater.render_to_response(request, 'return_rental.html', tvars)
class CheckRentalForm(forms.Form):
user = forms.ModelChoiceField(queryset=pmod.User.objects.exclude(is_active=False), label="User", widget=forms.Select(attrs={'class':'form-control'})) | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.conf import settings
import decimal, datetime
# This view will display all users and then on a new page display all the current rentals for a given user
def process_request(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/shop')
if request.user.is_staff == False:
return HttpResponseRedirect('/shop')
if request.urlparams[0] == "":
#This form will display all users
form = CheckRentalForm(initial ={
'user': "",
})
if request.method == 'POST':
form = CheckRentalForm(request.POST)
if form.is_valid():
#From here the page will redirect to show all the current rentals for the user picked
complete = "/employee/customer_rentals/" + str(form.cleaned_data['user'].id)
return HttpResponseRedirect(complete)
tvars = {
'form': form,
}
return templater.render_to_response(request, 'return_rental.html', tvars)
else:
try:
complete_rental = pmod.Rental.objects.get(id=request.urlparams[0])
form = CheckRentalForm(initial ={
'user': "",
})
except:
pass
form = "dfd"
tvars = {
'form': form,
}
return templater.render_to_response(request, 'return_rental.html', tvars)
class CheckRentalForm(forms.Form):
user = forms.ModelChoiceField(queryset=pmod.User.objects.exclude(is_active=False), label="User", widget=forms.Select(attrs={'class':'form-control'})) | en | 0.736795 | # This view will display all users and then on a new page display all the current rentals for a given user #This form will display all users #From here the page will redirect to show all the current rentals for the user picked | 2.376353 | 2 |
jupyter/settings.py | nguyenngtt/GSE---TEAM-A | 3 | 9386 | import pandas as pd
import numpy as np
import os
import logging
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
from tqdm.autonotebook import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
# adjust pandas display
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 200 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; None = all
# Number of array items in summary at beginning and end of each dimension
# np.set_printoptions(edgeitems=3) # default 3
np.set_printoptions(suppress=True) # no scientific notation for small numbers
# IPython (Jupyter) setting:
# Print out every value instead of just "last_expr" (default)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import matplotlib as mpl
from matplotlib import pyplot as plt
# defaults: mpl.rcParamsDefault
rc_params = {'figure.figsize': (8, 4),
'axes.labelsize': 'large',
'axes.titlesize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'savefig.dpi': 100,
'figure.dpi': 100 }
# adjust matplotlib defaults
mpl.rcParams.update(rc_params)
import seaborn as sns
sns.set_style("darkgrid")
# sns.set()
| import pandas as pd
import numpy as np
import os
import logging
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
from tqdm.autonotebook import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
# adjust pandas display
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 200 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; None = all
# Number of array items in summary at beginning and end of each dimension
# np.set_printoptions(edgeitems=3) # default 3
np.set_printoptions(suppress=True) # no scientific notation for small numbers
# IPython (Jupyter) setting:
# Print out every value instead of just "last_expr" (default)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import matplotlib as mpl
from matplotlib import pyplot as plt
# defaults: mpl.rcParamsDefault
rc_params = {'figure.figsize': (8, 4),
'axes.labelsize': 'large',
'axes.titlesize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'savefig.dpi': 100,
'figure.dpi': 100 }
# adjust matplotlib defaults
mpl.rcParams.update(rc_params)
import seaborn as sns
sns.set_style("darkgrid")
# sns.set()
| en | 0.324311 | # suppress warnings # register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm` # https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options # adjust pandas display # default 20 # default 60 # pd.options.display.precision = 2 # default 50; None = all # Number of array items in summary at beginning and end of each dimension # np.set_printoptions(edgeitems=3) # default 3 # no scientific notation for small numbers # IPython (Jupyter) setting: # Print out every value instead of just "last_expr" (default) # defaults: mpl.rcParamsDefault # adjust matplotlib defaults # sns.set() | 2.141716 | 2 |
var/spack/repos/builtin/packages/py-cyvcf2/package.py | jeanbez/spack | 0 | 9387 | <filename>var/spack/repos/builtin/packages/py-cyvcf2/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyCyvcf2(PythonPackage):
"""fast vcf parsing with cython + htslib"""
homepage = "https://github.com/brentp/cyvcf2"
pypi = "cyvcf2/cyvcf2-0.11.7.tar.gz"
version('0.11.7', sha256='a4b6229b89a0a1043684c65cbdd702c366a8800dc3591fb44c4b5a08640cbeec')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-coloredlogs', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('curl')
| <filename>var/spack/repos/builtin/packages/py-cyvcf2/package.py
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyCyvcf2(PythonPackage):
"""fast vcf parsing with cython + htslib"""
homepage = "https://github.com/brentp/cyvcf2"
pypi = "cyvcf2/cyvcf2-0.11.7.tar.gz"
version('0.11.7', sha256='a4b6229b89a0a1043684c65cbdd702c366a8800dc3591fb44c4b5a08640cbeec')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-coloredlogs', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('curl')
| en | 0.648946 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) fast vcf parsing with cython + htslib | 1.573296 | 2 |
pset_functions/db_search/p1.py | mottaquikarim/pydev-psets | 5 | 9388 | """
GPA Calculator
"""
# Write a function called "simple_gpa" to find GPA when student enters a letter grade as a string. Assign the result to a variable called "gpa".
"""
Use these conversions:
A+ --> 4.0
A --> 4.0
A- --> 3.7
B+ --> 3.3
B --> 3.0
B- --> 2.7
C+ --> 2.3
C --> 2.0
C- --> 1.7
D+ --> 1.3
D --> 1.0
D- --> 0.7
F --> 0.0
"""
| """
GPA Calculator
"""
# Write a function called "simple_gpa" to find GPA when student enters a letter grade as a string. Assign the result to a variable called "gpa".
"""
Use these conversions:
A+ --> 4.0
A --> 4.0
A- --> 3.7
B+ --> 3.3
B --> 3.0
B- --> 2.7
C+ --> 2.3
C --> 2.0
C- --> 1.7
D+ --> 1.3
D --> 1.0
D- --> 0.7
F --> 0.0
"""
| en | 0.656525 | GPA Calculator # Write a function called "simple_gpa" to find GPA when student enters a letter grade as a string. Assign the result to a variable called "gpa". Use these conversions: A+ --> 4.0 A --> 4.0 A- --> 3.7 B+ --> 3.3 B --> 3.0 B- --> 2.7 C+ --> 2.3 C --> 2.0 C- --> 1.7 D+ --> 1.3 D --> 1.0 D- --> 0.7 F --> 0.0 | 3.773573 | 4 |
test_soundcard.py | flying-sheep/SoundCard | 1 | 9389 | import sys
import soundcard
import numpy
import pytest
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
| import sys
import soundcard
import numpy
import pytest
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
| en | 0.794081 | # must install https://www.vb-audio.com/Cable/index.htm # must install soundflower # pacmd load-module module-null-sink channels=6 rate=48000 # must install https://www.vb-audio.com/Cable/index.htm # must install soundflower # unmapped channels on linux are filled with the mean of other channels # something like zero | 2.412843 | 2 |
Last 3 digits of 11^x.py | jaiveergill/Last-Three-Digits-of-11-x | 0 | 9390 | # This is a simple program to find the last three digits of 11 raised to any given number.
# The main algorithm that does the work is on line 10
def trim_num(num):
if len(str(num)) > 3: # no need to trim if the number is 3 or less digits long
return str(num)[(len(str(num)) - 3):] # trims the number
return num
def main(exp):
init_val = str((((exp-1) * (exp))/2) % 10 + (exp % 100) / 10) + str(exp % 10) + "1" # The main algorithm which needs to be cleaned (only the last three digits should be shown)
return "{}".format(trim_num(init_val))
# To use it, simply copy the code and run the function
| # This is a simple program to find the last three digits of 11 raised to any given number.
# The main algorithm that does the work is on line 10
def trim_num(num):
if len(str(num)) > 3: # no need to trim if the number is 3 or less digits long
return str(num)[(len(str(num)) - 3):] # trims the number
return num
def main(exp):
init_val = str((((exp-1) * (exp))/2) % 10 + (exp % 100) / 10) + str(exp % 10) + "1" # The main algorithm which needs to be cleaned (only the last three digits should be shown)
return "{}".format(trim_num(init_val))
# To use it, simply copy the code and run the function
| en | 0.895495 | # This is a simple program to find the last three digits of 11 raised to any given number. # The main algorithm that does the work is on line 10 # no need to trim if the number is 3 or less digits long # trims the number # The main algorithm which needs to be cleaned (only the last three digits should be shown) # To use it, simply copy the code and run the function | 4.272123 | 4 |
osr_odometry/scripts/osr_odom_ackerman2.py | ljb2208/osr-rover-code | 0 | 9391 | <filename>osr_odometry/scripts/osr_odom_ackerman2.py
#!/usr/bin/env python
import time
from osr_msgs.msg import Joystick, Commands, Encoder, RunStop
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
import rospy
import tf
import math
import numpy
class Odometry2():
def __init__(self, baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=False):
self.encValid = False
self.priorTime = rospy.Time.now()
self.priorEncs = [0,0,0,0,0,0]
self.mpt = mpt
self.pubTF = pubTF
# distance between wheels
self.wheelTrack = wheelTrack
self.d4 = d4
self.baseFrame = baseFrame
self.maxTickPerSec = maxTickPerSec
self.x = 0.0
self.y = 0.0
self.th = 0.0
self.odomPub = rospy.Publisher("/odom", Odometry, queue_size = 1)
if self.pubTF:
self.odomBroadcaster = tf.TransformBroadcaster()
self.twistCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel()
self.poseCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel()
def onEncoderMessage(self, message):
self.calculateOdometry(message)
def isValid(self, message):
dencLeft = abs(message.rel_enc[1] - self.priorEncs[1])
dencRight = abs(message.rel_enc[4] - self.priorEncs[4])
dt = self.getElapsedTime(message.header.stamp)
if (dencLeft/dt) > self.maxTickPerSec:
rospy.logwarn("Invalid relative encoder value on left wheel. No odom calculated")
return False
if (dencRight/dt) > self.maxTickPerSec:
rospy.logwarn("Invalid relative encoder value on right wheel. No odom calculated")
return False
return True
def publishTransform(self, x, y, quaternion, timestamp):
self.odomBroadcaster.sendTransform(
(x, y, 0),
(quaternion.x, quaternion.y, quaternion.z, quaternion.w),
timestamp,
self.baseFrame,
"odom")
def publishOdomMessage(self, x, y, vx, vy, vth, quaternion, timestamp):
odom = Odometry()
odom.header.frame_id = "odom"
odom.child_frame_id = self.baseFrame
odom.header.stamp = timestamp
odom.pose.pose.position.x = x
odom.pose.pose.position.y = y
odom.pose.pose.position.z = 0
odom.pose.covariance = self.poseCovar
odom.pose.pose.orientation = quaternion
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.linear.z = 0
odom.twist.twist.angular.z = vth
odom.twist.covariance = self.twistCovar
self.odomPub.publish(odom)
def getElapsedTime(self, timestamp, save=False):
dt = (timestamp - self.priorTime).to_sec()
if save:
self.priorTime = timestamp
return dt
def calculateTurnRadius(self, dLeft, dRight):
dlr = dLeft - dRight
# calculate radius of turn
if dlr != 0 and dLeft != 0 and dRight != 0:
lv = self.d4 + dLeft / dRight * self.d4
# print ("lv: " + str(lv))
r = lv / (1 - (dLeft / dRight))
else:
r = 0
dist = (dLeft + dRight) / 2
# calculate angle change
if (r != 0):
dTheta = dist / -r
else:
dTheta = 0
return r, dTheta
def calculateOdometry(self, message):
currentTime = message.header.stamp
encs = message.rel_enc
if not self.isValid(message):
return
dt = self.getElapsedTime(currentTime, save=True)
dLeft = self.mpt * (encs[1] - self.priorEncs[1])
dRight = self.mpt * (encs[4] - self.priorEncs[4])
# dth = (dRight - dLeft) / self.wheelTrack
radius, dTheta = self.calculateTurnRadius(dLeft, dRight)
# calculate centre of turn circle
xOrig = self.x + radius * math.cos(self.th)
yOrig = self.y + radius * math.sin(self.th)
# calculate new co-ordinates
xNew = xOrig + (self.x - xOrig) * math.cos(dTheta) - (self.y - yOrig) * math.sin(dTheta)
yNew = yOrig + (self.x - xOrig) * math.sin(dTheta) + (self.y - yOrig) * math.cos(dTheta)
#calculate change in x,y values
dx = xNew - self.x
dy = yNew - self.y
self.th += dTheta
if (self.th > (math.pi * 2)):
self.th -= (math.pi * 2)
elif (self.th < (-math.pi * 2)):
self.th += (math.pi * 2)
self.x = xNew
self.y = yNew
# convert to ros co-ords
xRos = self.y
yRos = -self.x
vxRos = dy / dt
vyRos = -dx / dt
vth = dTheta /dt
quaternion = self.getQuaternion(self.th)
if self.pubTF:
self.publishTransform(xRos, yRos, quaternion, currentTime)
self.publishOdomMessage(xRos, yRos, vxRos, vyRos, vth, quaternion, currentTime)
self.priorEncs = encs
def getQuaternion(self, th):
quaternion = Quaternion()
quaternion.x = 0.0
quaternion.y = 0.0
quaternion.z = math.sin(th / 2.0)
quaternion.w = math.cos(th / 2.0)
return quaternion
if __name__ == '__main__':
rospy.init_node('osr_odometry2')
rospy.loginfo("Starting the osr odometry2 node")
baseFrame = rospy.get_param("/odometry/base_frame_id", "base_link")
# mpt = rospy.get_param("/odometry/mpt", 0.000026322)
mpt = rospy.get_param("/odometry/mpt", 0.000100708)
wheelTrack = rospy.get_param("/odometry/wheel_track", 0.455)
d4 = rospy.get_param("/odometry/d4", 0.2559)
maxTickPerSec = rospy.get_param("/odometry/maxTickPerSec", 8000)
publishTF = rospy.get_param("~publishTF", False)
odom = Odometry2(baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=publishTF)
encSub = rospy.Subscriber("/encoder", Encoder, odom.onEncoderMessage)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
rate.sleep()
| <filename>osr_odometry/scripts/osr_odom_ackerman2.py
#!/usr/bin/env python
import time
from osr_msgs.msg import Joystick, Commands, Encoder, RunStop
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
import rospy
import tf
import math
import numpy
class Odometry2():
def __init__(self, baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=False):
self.encValid = False
self.priorTime = rospy.Time.now()
self.priorEncs = [0,0,0,0,0,0]
self.mpt = mpt
self.pubTF = pubTF
# distance between wheels
self.wheelTrack = wheelTrack
self.d4 = d4
self.baseFrame = baseFrame
self.maxTickPerSec = maxTickPerSec
self.x = 0.0
self.y = 0.0
self.th = 0.0
self.odomPub = rospy.Publisher("/odom", Odometry, queue_size = 1)
if self.pubTF:
self.odomBroadcaster = tf.TransformBroadcaster()
self.twistCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel()
self.poseCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel()
def onEncoderMessage(self, message):
self.calculateOdometry(message)
def isValid(self, message):
dencLeft = abs(message.rel_enc[1] - self.priorEncs[1])
dencRight = abs(message.rel_enc[4] - self.priorEncs[4])
dt = self.getElapsedTime(message.header.stamp)
if (dencLeft/dt) > self.maxTickPerSec:
rospy.logwarn("Invalid relative encoder value on left wheel. No odom calculated")
return False
if (dencRight/dt) > self.maxTickPerSec:
rospy.logwarn("Invalid relative encoder value on right wheel. No odom calculated")
return False
return True
def publishTransform(self, x, y, quaternion, timestamp):
self.odomBroadcaster.sendTransform(
(x, y, 0),
(quaternion.x, quaternion.y, quaternion.z, quaternion.w),
timestamp,
self.baseFrame,
"odom")
def publishOdomMessage(self, x, y, vx, vy, vth, quaternion, timestamp):
odom = Odometry()
odom.header.frame_id = "odom"
odom.child_frame_id = self.baseFrame
odom.header.stamp = timestamp
odom.pose.pose.position.x = x
odom.pose.pose.position.y = y
odom.pose.pose.position.z = 0
odom.pose.covariance = self.poseCovar
odom.pose.pose.orientation = quaternion
odom.twist.twist.linear.x = vx
odom.twist.twist.linear.y = vy
odom.twist.twist.linear.z = 0
odom.twist.twist.angular.z = vth
odom.twist.covariance = self.twistCovar
self.odomPub.publish(odom)
def getElapsedTime(self, timestamp, save=False):
dt = (timestamp - self.priorTime).to_sec()
if save:
self.priorTime = timestamp
return dt
def calculateTurnRadius(self, dLeft, dRight):
dlr = dLeft - dRight
# calculate radius of turn
if dlr != 0 and dLeft != 0 and dRight != 0:
lv = self.d4 + dLeft / dRight * self.d4
# print ("lv: " + str(lv))
r = lv / (1 - (dLeft / dRight))
else:
r = 0
dist = (dLeft + dRight) / 2
# calculate angle change
if (r != 0):
dTheta = dist / -r
else:
dTheta = 0
return r, dTheta
def calculateOdometry(self, message):
currentTime = message.header.stamp
encs = message.rel_enc
if not self.isValid(message):
return
dt = self.getElapsedTime(currentTime, save=True)
dLeft = self.mpt * (encs[1] - self.priorEncs[1])
dRight = self.mpt * (encs[4] - self.priorEncs[4])
# dth = (dRight - dLeft) / self.wheelTrack
radius, dTheta = self.calculateTurnRadius(dLeft, dRight)
# calculate centre of turn circle
xOrig = self.x + radius * math.cos(self.th)
yOrig = self.y + radius * math.sin(self.th)
# calculate new co-ordinates
xNew = xOrig + (self.x - xOrig) * math.cos(dTheta) - (self.y - yOrig) * math.sin(dTheta)
yNew = yOrig + (self.x - xOrig) * math.sin(dTheta) + (self.y - yOrig) * math.cos(dTheta)
#calculate change in x,y values
dx = xNew - self.x
dy = yNew - self.y
self.th += dTheta
if (self.th > (math.pi * 2)):
self.th -= (math.pi * 2)
elif (self.th < (-math.pi * 2)):
self.th += (math.pi * 2)
self.x = xNew
self.y = yNew
# convert to ros co-ords
xRos = self.y
yRos = -self.x
vxRos = dy / dt
vyRos = -dx / dt
vth = dTheta /dt
quaternion = self.getQuaternion(self.th)
if self.pubTF:
self.publishTransform(xRos, yRos, quaternion, currentTime)
self.publishOdomMessage(xRos, yRos, vxRos, vyRos, vth, quaternion, currentTime)
self.priorEncs = encs
def getQuaternion(self, th):
quaternion = Quaternion()
quaternion.x = 0.0
quaternion.y = 0.0
quaternion.z = math.sin(th / 2.0)
quaternion.w = math.cos(th / 2.0)
return quaternion
if __name__ == '__main__':
rospy.init_node('osr_odometry2')
rospy.loginfo("Starting the osr odometry2 node")
baseFrame = rospy.get_param("/odometry/base_frame_id", "base_link")
# mpt = rospy.get_param("/odometry/mpt", 0.000026322)
mpt = rospy.get_param("/odometry/mpt", 0.000100708)
wheelTrack = rospy.get_param("/odometry/wheel_track", 0.455)
d4 = rospy.get_param("/odometry/d4", 0.2559)
maxTickPerSec = rospy.get_param("/odometry/maxTickPerSec", 8000)
publishTF = rospy.get_param("~publishTF", False)
odom = Odometry2(baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=publishTF)
encSub = rospy.Subscriber("/encoder", Encoder, odom.onEncoderMessage)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
rate.sleep()
| en | 0.639307 | #!/usr/bin/env python # distance between wheels # calculate radius of turn # print ("lv: " + str(lv)) # calculate angle change # dth = (dRight - dLeft) / self.wheelTrack # calculate centre of turn circle # calculate new co-ordinates #calculate change in x,y values # convert to ros co-ords # mpt = rospy.get_param("/odometry/mpt", 0.000026322) | 2.148301 | 2 |
src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py | alisiahkoohi/importance-of-transfer-learning | 0 | 9392 | import numpy as np
import h5py
import os
from devito.logger import info
from devito import TimeFunction, clear_cache
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import Model, RickerSource, Receiver, TimeAxis
from math import floor
from scipy.interpolate import griddata
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data_path', dest='data_path', type=str, default='/home/ec2-user/data', help='raw data path')
parser.add_argument('--save_dir', dest='save_dir', type=str, default='/home/ec2-user/data', help='saving directory')
args = parser.parse_args()
data_path = args.data_path
save_dir = args.save_dir
origin = (0., 0.)
spacing=(7.5, 7.5)
tn=1100.
nbpml=40
# Define your vp in km/sec (x, z)
vp = np.fromfile(os.path.join(data_path, 'vp_marmousi_bi'),
dtype='float32', sep="")
vp = np.reshape(vp, (1601, 401))
# vp = vp[400:1401, 0:401]
shape=[401, 301]
values = np.zeros([vp.shape[0]*vp.shape[1], ])
points = np.zeros([vp.shape[0]*vp.shape[1], 2])
k = 0
for indx in range(0, vp.shape[0]):
for indy in range(0, vp.shape[1]):
values[k] = vp[indx, indy]
points[k, 0] = indx
points[k, 1] = indy
k = k + 1
# nx, ny = shape[0], shape[1]
X, Y = np.meshgrid(np.array(np.linspace(1000, 1287, shape[0])), np.array(np.linspace(120, 232, shape[1])))
int_vp = griddata(points, values, (X, Y), method='cubic')
int_vp = np.transpose(int_vp)
vp = int_vp
# create model
model = Model(origin, spacing, shape, 2, vp, nbpml=nbpml)
# Derive timestepping from model spacing
dt = model.critical_dt
t0 = 0.0
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time = np.linspace(t0, tn, nt) # Discretized time axis
datasize0 = int(np.shape(range(0, shape[0], 4))[0])
datasize1 = int(np.shape(range(100, nt, 20))[0])
datasize = datasize0*datasize1
strTrainA = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')
strTrainB = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')
dataset_train = "train_dataset"
file_trainA = h5py.File(strTrainA, 'w-')
datasetA = file_trainA.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
file_trainB = h5py.File(strTrainB, 'w-')
datasetB = file_trainB.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
num_rec = 601
rec_samp = np.linspace(0., model.domain_size[0], num=num_rec);
rec_samp = rec_samp[1]-rec_samp[0]
time_range = TimeAxis(start=t0, stop=tn, step=dt)
src = RickerSource(name='src', grid=model.grid, f0=0.025, time_range=time_range, space_order=1, npoint=1)
src.coordinates.data[0, :] = np.array([1*spacing[0], 2*spacing[1]]).astype(np.float32)
rec = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
solverbad = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=2, freesurface=False)
solvergood = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=20, freesurface=False)
ulocgood = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20, save=nt)
ulocbad = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2, save=nt)
kk = 0
for xsrc in range(0, shape[0], 4):
clear_cache()
ulocgood.data.fill(0.)
ulocbad.data.fill(0.)
src.coordinates.data[0, :] = np.array([xsrc*spacing[0], 2*spacing[1]]).astype(np.float32)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
_, ulocgood, _ = solvergood.forward(m=model.m, src=src, time=nt-1, save=True)
_, ulocbad, _ = solverbad.forward(m=model.m, src=src, time=nt-1, save=True)
datasetA[kk:(kk+datasize1), :, :] = np.array(ulocgood.data[range(100, nt, 20), :, :])
datasetB[kk:(kk+datasize1), :, :] = np.array(ulocbad.data[range(100, nt, 20), :, :])
kk = kk + datasize1
file_trainA.close()
file_trainB.close()
| import numpy as np
import h5py
import os
from devito.logger import info
from devito import TimeFunction, clear_cache
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import Model, RickerSource, Receiver, TimeAxis
from math import floor
from scipy.interpolate import griddata
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data_path', dest='data_path', type=str, default='/home/ec2-user/data', help='raw data path')
parser.add_argument('--save_dir', dest='save_dir', type=str, default='/home/ec2-user/data', help='saving directory')
args = parser.parse_args()
data_path = args.data_path
save_dir = args.save_dir
origin = (0., 0.)
spacing=(7.5, 7.5)
tn=1100.
nbpml=40
# Define your vp in km/sec (x, z)
vp = np.fromfile(os.path.join(data_path, 'vp_marmousi_bi'),
dtype='float32', sep="")
vp = np.reshape(vp, (1601, 401))
# vp = vp[400:1401, 0:401]
shape=[401, 301]
values = np.zeros([vp.shape[0]*vp.shape[1], ])
points = np.zeros([vp.shape[0]*vp.shape[1], 2])
k = 0
for indx in range(0, vp.shape[0]):
for indy in range(0, vp.shape[1]):
values[k] = vp[indx, indy]
points[k, 0] = indx
points[k, 1] = indy
k = k + 1
# nx, ny = shape[0], shape[1]
X, Y = np.meshgrid(np.array(np.linspace(1000, 1287, shape[0])), np.array(np.linspace(120, 232, shape[1])))
int_vp = griddata(points, values, (X, Y), method='cubic')
int_vp = np.transpose(int_vp)
vp = int_vp
# create model
model = Model(origin, spacing, shape, 2, vp, nbpml=nbpml)
# Derive timestepping from model spacing
dt = model.critical_dt
t0 = 0.0
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time = np.linspace(t0, tn, nt) # Discretized time axis
datasize0 = int(np.shape(range(0, shape[0], 4))[0])
datasize1 = int(np.shape(range(100, nt, 20))[0])
datasize = datasize0*datasize1
strTrainA = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')
strTrainB = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')
dataset_train = "train_dataset"
file_trainA = h5py.File(strTrainA, 'w-')
datasetA = file_trainA.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
file_trainB = h5py.File(strTrainB, 'w-')
datasetB = file_trainB.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
num_rec = 601
rec_samp = np.linspace(0., model.domain_size[0], num=num_rec);
rec_samp = rec_samp[1]-rec_samp[0]
time_range = TimeAxis(start=t0, stop=tn, step=dt)
src = RickerSource(name='src', grid=model.grid, f0=0.025, time_range=time_range, space_order=1, npoint=1)
src.coordinates.data[0, :] = np.array([1*spacing[0], 2*spacing[1]]).astype(np.float32)
rec = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
solverbad = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=2, freesurface=False)
solvergood = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=20, freesurface=False)
ulocgood = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20, save=nt)
ulocbad = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2, save=nt)
kk = 0
for xsrc in range(0, shape[0], 4):
clear_cache()
ulocgood.data.fill(0.)
ulocbad.data.fill(0.)
src.coordinates.data[0, :] = np.array([xsrc*spacing[0], 2*spacing[1]]).astype(np.float32)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
_, ulocgood, _ = solvergood.forward(m=model.m, src=src, time=nt-1, save=True)
_, ulocbad, _ = solverbad.forward(m=model.m, src=src, time=nt-1, save=True)
datasetA[kk:(kk+datasize1), :, :] = np.array(ulocgood.data[range(100, nt, 20), :, :])
datasetB[kk:(kk+datasize1), :, :] = np.array(ulocbad.data[range(100, nt, 20), :, :])
kk = kk + datasize1
file_trainA.close()
file_trainB.close()
| en | 0.824598 | # Define your vp in km/sec (x, z) # vp = vp[400:1401, 0:401] # nx, ny = shape[0], shape[1] # create model # Derive timestepping from model spacing # Number of timesteps # Discretized time axis | 1.863713 | 2 |
facto.py | divine-coder/CODECHEF-PYTHON | 0 | 9393 | <reponame>divine-coder/CODECHEF-PYTHON
import math
if __name__=='__main__':
n=(int)(input())
for abc in range(n):
t=(int)(input())
print math.factorial(t)
| import math
if __name__=='__main__':
n=(int)(input())
for abc in range(n):
t=(int)(input())
print math.factorial(t) | none | 1 | 3.535951 | 4 |
|
setup.py | johnmartingodo/pyKinematicsKineticsToolbox | 0 | 9394 | from setuptools import setup
setup(name="pykinematicskineticstoolbox",
version="0.0",
description="Installable python package which collects useful kinematics and kinetics functions",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=["pykinematicskineticstoolbox"],
install_requires=["numpy"],
)
| from setuptools import setup
setup(name="pykinematicskineticstoolbox",
version="0.0",
description="Installable python package which collects useful kinematics and kinetics functions",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=["pykinematicskineticstoolbox"],
install_requires=["numpy"],
)
| none | 1 | 1.024757 | 1 |
|
summary/summary_avail.py | bit0fun/plugins | 173 | 9395 | from datetime import datetime
# ensure an rpc peer is added
def addpeer(p, rpcpeer):
pid = rpcpeer['id']
if pid not in p.persist['peerstate']:
p.persist['peerstate'][pid] = {
'connected': rpcpeer['connected'],
'last_seen': datetime.now() if rpcpeer['connected'] else None,
'avail': 1.0 if rpcpeer['connected'] else 0.0
}
# exponetially smooth online/offline states of peers
def trace_availability(p, rpcpeers):
p.persist['availcount'] += 1
leadwin = max(min(p.avail_window, p.persist['availcount'] * p.avail_interval), p.avail_interval)
samples = leadwin / p.avail_interval
alpha = 1.0 / samples
beta = 1.0 - alpha
for rpcpeer in rpcpeers['peers']:
pid = rpcpeer['id']
addpeer(p, rpcpeer)
if rpcpeer['connected']:
p.persist['peerstate'][pid]['last_seen'] = datetime.now()
p.persist['peerstate'][pid]['connected'] = True
p.persist['peerstate'][pid]['avail'] = 1.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
else:
p.persist['peerstate'][pid]['connected'] = False
p.persist['peerstate'][pid]['avail'] = 0.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
| from datetime import datetime
# ensure an rpc peer is added
def addpeer(p, rpcpeer):
pid = rpcpeer['id']
if pid not in p.persist['peerstate']:
p.persist['peerstate'][pid] = {
'connected': rpcpeer['connected'],
'last_seen': datetime.now() if rpcpeer['connected'] else None,
'avail': 1.0 if rpcpeer['connected'] else 0.0
}
# exponetially smooth online/offline states of peers
def trace_availability(p, rpcpeers):
p.persist['availcount'] += 1
leadwin = max(min(p.avail_window, p.persist['availcount'] * p.avail_interval), p.avail_interval)
samples = leadwin / p.avail_interval
alpha = 1.0 / samples
beta = 1.0 - alpha
for rpcpeer in rpcpeers['peers']:
pid = rpcpeer['id']
addpeer(p, rpcpeer)
if rpcpeer['connected']:
p.persist['peerstate'][pid]['last_seen'] = datetime.now()
p.persist['peerstate'][pid]['connected'] = True
p.persist['peerstate'][pid]['avail'] = 1.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
else:
p.persist['peerstate'][pid]['connected'] = False
p.persist['peerstate'][pid]['avail'] = 0.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
| en | 0.850368 | # ensure an rpc peer is added # exponetially smooth online/offline states of peers | 2.444984 | 2 |
terrascript/dns/r.py | hugovk/python-terrascript | 4 | 9396 | # terrascript/dns/r.py
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
| # terrascript/dns/r.py
import terrascript
class dns_a_record_set(terrascript.Resource):
pass
class dns_aaaa_record_set(terrascript.Resource):
pass
class dns_cname_record(terrascript.Resource):
pass
class dns_mx_record_set(terrascript.Resource):
pass
class dns_ns_record_set(terrascript.Resource):
pass
class dns_ptr_record(terrascript.Resource):
pass
class dns_srv_record_set(terrascript.Resource):
pass
class dns_txt_record_set(terrascript.Resource):
pass
| en | 0.357026 | # terrascript/dns/r.py | 1.850092 | 2 |
JumpscaleCore/clients/tcprouter/TCPRouterFactory.py | gneumann333/jumpscaleX_core | 1 | 9397 | <filename>JumpscaleCore/clients/tcprouter/TCPRouterFactory.py<gh_stars>1-10
from Jumpscale import j
from .TCPRouterClient import TCPRouterClient
JSConfigs = j.baseclasses.object_config_collection
class TCPRouterFactory(JSConfigs):
__jslocation__ = "j.clients.tcp_router"
_CHILDCLASS = TCPRouterClient
def test(self):
"""
kosmos 'j.clients.tcp_router.test()'
"""
# get a client instance (TO CHECK: secret is already assigned to backend)
cl = self.get(
"test_instance",
local_ip="0.0.0.0",
local_port=18000,
remote_url="127.0.0.1",
remote_port=6379,
secret="test",
)
# connect to backend
cl.connect()
# stop connection
cl.stop()
print("TEST OK")
| <filename>JumpscaleCore/clients/tcprouter/TCPRouterFactory.py<gh_stars>1-10
from Jumpscale import j
from .TCPRouterClient import TCPRouterClient
JSConfigs = j.baseclasses.object_config_collection
class TCPRouterFactory(JSConfigs):
__jslocation__ = "j.clients.tcp_router"
_CHILDCLASS = TCPRouterClient
def test(self):
"""
kosmos 'j.clients.tcp_router.test()'
"""
# get a client instance (TO CHECK: secret is already assigned to backend)
cl = self.get(
"test_instance",
local_ip="0.0.0.0",
local_port=18000,
remote_url="127.0.0.1",
remote_port=6379,
secret="test",
)
# connect to backend
cl.connect()
# stop connection
cl.stop()
print("TEST OK")
| en | 0.712469 | kosmos 'j.clients.tcp_router.test()' # get a client instance (TO CHECK: secret is already assigned to backend) # connect to backend # stop connection | 2.154353 | 2 |
nmrglue/fileio/spinsolve.py | miguelarbesu/nmrglue | 0 | 9398 | """
Functions for reading Magritek Spinsolve binary (dx/1d) files and
parameter (acqu.par/proc.par) files.
"""
import os
from warnings import warn
import numpy as np
from . import fileiobase
from . import jcampdx
__developer_info__ = """
Spinsolve is the software used on the Magritek benchtop NMR devices.
A spectrum is saved in a folder with several files. The spectral data is
stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed)
and 'spectrum_processed.1d' (FT + processed by spinsolve)
Optional spectral data (System->Prefs->Setup->Global data storage):
'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`),
'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each
point and intensity delimited by ';')
Other files:
'acqu.par' - all parameters that are used for acquisition
'Protocol.par' - text file used to reload data back into the Spinsolve software
'processing.script' - text file to transfer Spinsolve software protocol settings
into MNOVA
The Spinsolve Expert software has a slightly different output:
[Needs to be double checked as I do not have access to this software -LCageman]
- Output into JCAMP-DX is not possible
- 'spectrum_processed.1d' is not generated
- (new) 'fid.1d' - seems to be the same as 'data.1d'
- (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par'
- (new) .pt1 files - seem to be plot files specific for the expert software, cannot
be read by NMRglue
"""
def read(dir='.', specfile=None, acqupar="acqu.par", procpar="proc.par"):
"""
Reads spinsolve files from a directory
When no spectrum filename is given (specfile), the following list is tried, in
that specific order
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
To use the resolution enhanced spectrum use the './Enhanced' folder as input.
Note that spectrum.1d and spectrum_processed.1d contain only data in the
frequency domain, so no Fourier transformation is needed. Also, use
dic["spectrum"]["xaxis"] to plot the x-axis
Parameters
----------
dir : str
Directory to read from
specfile : str, optional
Filename to import spectral data from. None uses standard filename from:
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
acqupar : str, optional
Filename for acquisition parameters. None uses standard name.
procpar : str, optional
Filename for processing parameters. None uses standard name.
Returns
-------
dic : dict
All parameters that can be present in the data folder:
dic["spectrum"] - First bytes of spectrum(_processed).1d
dic["acqu"] - Parameters present in acqu.par
dic["proc"] - Parameters present in proc.par
dic["dx"] - - Parameters present in the header of nmr_fid.dx
data : ndarray
Array of NMR data
"""
if os.path.isdir(dir) is not True:
raise IOError("directory %s does not exist" % (dir))
# Create empty dic
dic = {"spectrum": {}, "acqu": {}, "proc":{}, "dx":{}}
# Read in acqu.par and write to dic
acqupar = os.path.join(dir, acqupar)
if os.path.isfile(acqupar):
with open(acqupar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["acqu"][k.strip()] = v.strip()
# Read in proc.par and write to dic
procpar = os.path.join(dir,procpar)
if os.path.isfile(procpar):
with open(procpar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["proc"][k.strip()] = v.strip()
# Define which spectrumfile to take, using 'specfile' when defined, otherwise
# the files in 'priority_list' are tried, in that particular order
priority_list = ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d", None]
if specfile:
inputfile = os.path.join(dir, specfile)
if not os.path.isfile(inputfile):
raise IOError("File %s does not exist" % (inputfile))
else:
for priority in priority_list:
if priority == None:
raise IOError("directory %s does not contain spectral data" % (dir))
inputfile = os.path.join(dir, priority)
if os.path.isfile(inputfile):
break
# Detect which file we are dealing with from the extension and read in the spectral data
# Reading .dx file using existing nmrglue.fileio.jcampdx module
if inputfile.split('.')[-1] == "dx":
dic["dx"], raw_data = jcampdx.read(inputfile)
data = np.empty((int(dic["dx"]["$TD"][0]), ), dtype='complex128')
data = raw_data[0][:] + 1j * raw_data[1][:]
# Reading .1d files
elif inputfile.split('.')[-1] == "1d":
with open(inputfile, "rb") as f:
raw_data = f.read()
# Write out parameters from the first 32 bytes into dic["spectrum"]
keys = ["owner", "format", "version", "dataType", "xDim", "yDim", "zDim", "qDim"]
for i, k in enumerate(keys):
start = i * 4
end = start + 4
value = int.from_bytes( raw_data[start:end], "little")
dic["spectrum"][k] = value
data = np.frombuffer(raw_data[end:], "<f")
# The first 1/3 of the file is xaxis data (s or ppm)
split = data.shape[-1] // 3
xscale = data[0 : split]
dic["spectrum"]["xaxis"] = xscale
# The rest is real and imaginary data points interleaved
data = data[split : : 2] + 1j * data[split + 1 : : 2]
else:
raise IOError("File %s cannot be interpreted, use .dx or .1d instead" % (inputfile))
return dic,data
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic
| """
Functions for reading Magritek Spinsolve binary (dx/1d) files and
parameter (acqu.par/proc.par) files.
"""
import os
from warnings import warn
import numpy as np
from . import fileiobase
from . import jcampdx
__developer_info__ = """
Spinsolve is the software used on the Magritek benchtop NMR devices.
A spectrum is saved in a folder with several files. The spectral data is
stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed)
and 'spectrum_processed.1d' (FT + processed by spinsolve)
Optional spectral data (System->Prefs->Setup->Global data storage):
'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`),
'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each
point and intensity delimited by ';')
Other files:
'acqu.par' - all parameters that are used for acquisition
'Protocol.par' - text file used to reload data back into the Spinsolve software
'processing.script' - text file to transfer Spinsolve software protocol settings
into MNOVA
The Spinsolve Expert software has a slightly different output:
[Needs to be double checked as I do not have access to this software -LCageman]
- Output into JCAMP-DX is not possible
- 'spectrum_processed.1d' is not generated
- (new) 'fid.1d' - seems to be the same as 'data.1d'
- (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par'
- (new) .pt1 files - seem to be plot files specific for the expert software, cannot
be read by NMRglue
"""
def read(dir='.', specfile=None, acqupar="acqu.par", procpar="proc.par"):
"""
Reads spinsolve files from a directory
When no spectrum filename is given (specfile), the following list is tried, in
that specific order
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
To use the resolution enhanced spectrum use the './Enhanced' folder as input.
Note that spectrum.1d and spectrum_processed.1d contain only data in the
frequency domain, so no Fourier transformation is needed. Also, use
dic["spectrum"]["xaxis"] to plot the x-axis
Parameters
----------
dir : str
Directory to read from
specfile : str, optional
Filename to import spectral data from. None uses standard filename from:
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
acqupar : str, optional
Filename for acquisition parameters. None uses standard name.
procpar : str, optional
Filename for processing parameters. None uses standard name.
Returns
-------
dic : dict
All parameters that can be present in the data folder:
dic["spectrum"] - First bytes of spectrum(_processed).1d
dic["acqu"] - Parameters present in acqu.par
dic["proc"] - Parameters present in proc.par
dic["dx"] - - Parameters present in the header of nmr_fid.dx
data : ndarray
Array of NMR data
"""
if os.path.isdir(dir) is not True:
raise IOError("directory %s does not exist" % (dir))
# Create empty dic
dic = {"spectrum": {}, "acqu": {}, "proc":{}, "dx":{}}
# Read in acqu.par and write to dic
acqupar = os.path.join(dir, acqupar)
if os.path.isfile(acqupar):
with open(acqupar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["acqu"][k.strip()] = v.strip()
# Read in proc.par and write to dic
procpar = os.path.join(dir,procpar)
if os.path.isfile(procpar):
with open(procpar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["proc"][k.strip()] = v.strip()
# Define which spectrumfile to take, using 'specfile' when defined, otherwise
# the files in 'priority_list' are tried, in that particular order
priority_list = ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d", None]
if specfile:
inputfile = os.path.join(dir, specfile)
if not os.path.isfile(inputfile):
raise IOError("File %s does not exist" % (inputfile))
else:
for priority in priority_list:
if priority == None:
raise IOError("directory %s does not contain spectral data" % (dir))
inputfile = os.path.join(dir, priority)
if os.path.isfile(inputfile):
break
# Detect which file we are dealing with from the extension and read in the spectral data
# Reading .dx file using existing nmrglue.fileio.jcampdx module
if inputfile.split('.')[-1] == "dx":
dic["dx"], raw_data = jcampdx.read(inputfile)
data = np.empty((int(dic["dx"]["$TD"][0]), ), dtype='complex128')
data = raw_data[0][:] + 1j * raw_data[1][:]
# Reading .1d files
elif inputfile.split('.')[-1] == "1d":
with open(inputfile, "rb") as f:
raw_data = f.read()
# Write out parameters from the first 32 bytes into dic["spectrum"]
keys = ["owner", "format", "version", "dataType", "xDim", "yDim", "zDim", "qDim"]
for i, k in enumerate(keys):
start = i * 4
end = start + 4
value = int.from_bytes( raw_data[start:end], "little")
dic["spectrum"][k] = value
data = np.frombuffer(raw_data[end:], "<f")
# The first 1/3 of the file is xaxis data (s or ppm)
split = data.shape[-1] // 3
xscale = data[0 : split]
dic["spectrum"]["xaxis"] = xscale
# The rest is real and imaginary data points interleaved
data = data[split : : 2] + 1j * data[split + 1 : : 2]
else:
raise IOError("File %s cannot be interpreted, use .dx or .1d instead" % (inputfile))
return dic,data
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic
| en | 0.721382 | Functions for reading Magritek Spinsolve binary (dx/1d) files and parameter (acqu.par/proc.par) files. Spinsolve is the software used on the Magritek benchtop NMR devices. A spectrum is saved in a folder with several files. The spectral data is stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed) and 'spectrum_processed.1d' (FT + processed by spinsolve) Optional spectral data (System->Prefs->Setup->Global data storage): 'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`), 'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each point and intensity delimited by ';') Other files: 'acqu.par' - all parameters that are used for acquisition 'Protocol.par' - text file used to reload data back into the Spinsolve software 'processing.script' - text file to transfer Spinsolve software protocol settings into MNOVA The Spinsolve Expert software has a slightly different output: [Needs to be double checked as I do not have access to this software -LCageman] - Output into JCAMP-DX is not possible - 'spectrum_processed.1d' is not generated - (new) 'fid.1d' - seems to be the same as 'data.1d' - (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par' - (new) .pt1 files - seem to be plot files specific for the expert software, cannot be read by NMRglue Reads spinsolve files from a directory When no spectrum filename is given (specfile), the following list is tried, in that specific order ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"] To use the resolution enhanced spectrum use the './Enhanced' folder as input. Note that spectrum.1d and spectrum_processed.1d contain only data in the frequency domain, so no Fourier transformation is needed. Also, use dic["spectrum"]["xaxis"] to plot the x-axis Parameters ---------- dir : str Directory to read from specfile : str, optional Filename to import spectral data from. None uses standard filename from: ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"] acqupar : str, optional Filename for acquisition parameters. None uses standard name. procpar : str, optional Filename for processing parameters. None uses standard name. Returns ------- dic : dict All parameters that can be present in the data folder: dic["spectrum"] - First bytes of spectrum(_processed).1d dic["acqu"] - Parameters present in acqu.par dic["proc"] - Parameters present in proc.par dic["dx"] - - Parameters present in the header of nmr_fid.dx data : ndarray Array of NMR data # Create empty dic # Read in acqu.par and write to dic # Read in proc.par and write to dic # Define which spectrumfile to take, using 'specfile' when defined, otherwise # the files in 'priority_list' are tried, in that particular order # Detect which file we are dealing with from the extension and read in the spectral data # Reading .dx file using existing nmrglue.fileio.jcampdx module # Reading .1d files # Write out parameters from the first 32 bytes into dic["spectrum"] # The first 1/3 of the file is xaxis data (s or ppm) # The rest is real and imaginary data points interleaved Guess parameters of universal dictionary from dic, data pair. Parameters ---------- dic : dict Dictionary of JCAMP-DX, acqu, proc and spectrum parameters. data : ndarray Array of NMR data. Returns ------- udic : dict Universal dictionary of spectral parameters. # Create an empty universal dictionary # Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters # size # sw # obs # car # label #keys left to default # udic[0]['complex'] # udic[0]['encoding'] # udic[0]['time'] = True # udic[0]['freq'] = False | 2.645175 | 3 |
src/navigation_analytics/navigation_data.py | mielgosez/navigation_analytics | 0 | 9399 | <reponame>mielgosez/navigation_analytics
import logging
import copy
import pickle
import pandas as pd
class BaseClass:
def __init__(self,
input_data: pd.DataFrame,
logger: logging.Logger,
metadata: dict):
self.__input_data = input_data
self.__logger = logger
self.__metadata = metadata
@property
def logger(self):
return self.__logger
@property
def metadata(self):
return self.__metadata
@property
def input_data(self):
return self.__input_data
@input_data.setter
def input_data(self, new_input_data: pd.DataFrame):
self.__input_data = new_input_data
@property
def events_id(self):
return self.__metadata['metadata']['primary_keys']['events']
@property
def session_id(self):
return self.__metadata['metadata']['primary_keys']['sessions']
@property
def page_id(self):
return self.__metadata['metadata']['primary_keys']['pages']
@property
def group_id(self):
return self.metadata['metadata']['valid_values']['groups']['group_id']
@property
def valid_groups(self):
return self.metadata['metadata']['valid_values']['groups']['valid']
@property
def action_id(self):
return self.metadata['metadata']['valid_values']['actions']['action_id']
@property
def valid_actions(self):
return self.metadata['metadata']['valid_values']['actions']['valid']
@property
def search_action(self):
return self.metadata['metadata']['valid_values']['actions']['search_action']
@property
def visit_action(self):
return self.metadata['metadata']['valid_values']['actions']['visit_action']
@property
def timestamp_id(self):
return self.metadata['metadata']['datetime']
@property
def kpi_duration(self):
return self.metadata['metadata']['valid_values']['kpis']['duration_page']
@property
def kpi_position(self):
return self.metadata['metadata']['valid_values']['kpis']['result_position']
@property
def kpi_number_results(self):
return self.metadata['metadata']['valid_values']['kpis']['number_results']
class DataValidator(BaseClass):
def __init__(self,
logger: logging.Logger,
metadata: dict,
input_data: pd.DataFrame):
super().__init__(logger=logger,
metadata=metadata,
input_data=input_data)
self.default_pipeline()
# Pipelines
def default_pipeline(self):
self.check_events_are_unique()
self.check_groups_are_valid()
self.check_one_group_per_session()
# Validation Rules
def check_events_are_unique(self):
"""
Verifies that event identifier is primary key of input data.
:return: Validation
"""
number_rows = self.input_data.shape[0]
events_id = self.metadata['metadata']['primary_keys']['events']
number_events = len(self.input_data[events_id].unique())
if number_rows == number_events:
self.logger.info(f'Validation - Events are unique: {number_rows} rows and {number_events} events.')
else:
self.logger.error(f'Validation - Events are not unique: {number_rows} rows and {number_events} events.')
def check_groups_are_valid(self):
"""
Verifies that groups matches with those declared in metadata.
:return: Validation
"""
group_id = self.metadata['metadata']['valid_values']['groups']['group_id']
groups_in_data = list(self.input_data[group_id].unique())
group_valid_names = list(self.metadata['metadata']['valid_values']['groups']['valid'])
if set(groups_in_data) == set(group_valid_names):
self.logger.info(f'Validation - Groups are valid: {", ".join(group_valid_names)}.')
else:
self.logger.error(f'Validation - Group names are not valid: '
f'Names in data are {", ".join(groups_in_data)}. '
f'Names in metadata are {", ".join(group_valid_names)}.')
def check_one_group_per_session(self):
"""
Verifies that there's at most one group per session.
:return: Validation
"""
group_id = self.metadata['metadata']['valid_values']['groups']['group_id']
session_id = self.metadata['metadata']['primary_keys']['sessions']
max_num_groups = self.input_data.groupby(session_id)[group_id].apply(lambda x: len(set(x))).max()
if max_num_groups == 1:
self.logger.info(f'Validation - Just one group per session.')
else:
self.logger.error(f'Validation - Groups per session is different to one. '
f'Maximum number of groups per session detected in data set is: {max_num_groups}')
class SessionAnalyzer(BaseClass):
def __init__(self,
input_data: pd.DataFrame,
metadata: dict,
logger: logging.Logger):
super().__init__(logger=logger,
metadata=metadata,
input_data=input_data)
self.__results = dict()
self.__session_data = self.create_session_look_up()
self.__page_data = self.create_page_look_up()
self.__page_data_out = self.create_page_look_up_out()
self.__search_table = self.create_search_table()
self.__duration_table = self.create_duration_table()
def filter_session_by_group(self, group_id: str):
"""
Filter session by group id provided in the input. This is expected to be a recurrent operation.
:param group_id:
:return:
"""
if group_id not in self.valid_groups:
self.logger.error(f'{group_id} is not a valid group.')
return self.session_data.loc[self.session_data[self.group_id] == group_id, :]
# Metrics
def compute_click_through_rate(self, group_id: str = None):
"""
This function computes the click through rate, understanding this quantity as the ratio of searches ending up in
a session landing in a page. Session Attribute.
:param group_id:
:return:
"""
result = None
if group_id is None:
key = 'click_through_rate'
sub_key = 'all'
# Merging sessions with page ids
df = copy.deepcopy(self.session_data.merge(self.page_data, on=self.session_id, how='left'))
# Computing boolean vector: True means session has a visit, False otherwise.
result = df.groupby(by=self.session_id)[self.action_id].apply(lambda x: self.visit_action in set(x))
else:
key = 'click_through_rate'
sub_key = group_id
if group_id in self.valid_groups:
# Filtering sessions by required group.
filtered_sessions = self.filter_session_by_group(group_id=group_id)
df = copy.deepcopy(filtered_sessions.merge(self.page_data, on=self.session_id, how='left'))
result = df.groupby(by='session_id').action.apply(lambda x: 'visitPage' in set(x))
else:
self.logger.error(f'{group_id} is not a valid group.')
# Computing ctr
ctr = sum(result) / len(result)
self.logger.info(f'Click Through Rate is equal to: {ctr}')
# Storing results
update_result = self.kpi_results
try:
update_result[key][key].append(ctr)
update_result[key]['group'].append(sub_key)
except KeyError:
update_result[key] = dict()
update_result[key][key] = [ctr]
update_result[key]['group'] = [sub_key]
self.kpi_results = update_result
return ctr
def compute_search_frequency(self,
group_id: str = None,
number_ranking: int = 10):
"""
Get the most common first result per session. This is a Session Attribute.
:param number_ranking: Number of results to visualize.
:param group_id:
:return:
"""
if group_id is None:
key = 'search_frequency'
sub_key = 'all'
df_sessions = self.session_data.copy()
else:
key = 'search_frequency'
sub_key = group_id
df_sessions = self.filter_session_by_group(group_id=group_id)
df = df_sessions.merge(self.page_data, on=self.session_id, how='left')
# Merge with duration table to retrieve datestamp data.
df_all = df.merge(self.duration_table, on=self.page_id, how='left')
df_all.dropna(inplace=True)
# Most common first result
df_all = df_all.groupby('session_id').apply(lambda x:
x.loc[x[self.timestamp_id] == min(x[self.timestamp_id]),
[self.kpi_position, self.timestamp_id]])
# Result
result = df_all[self.kpi_position].value_counts(normalize=True)[:number_ranking]
self.logger.info(f'Most common result is {result.index[0]}')
# Store result
updated_results = self.kpi_results
try:
updated_results[key][key].extend(list(result.values))
updated_results[key]['position'].extend(list(result.index))
updated_results[key]['group'].extend([sub_key]*len(result.index))
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = list(result.values)
updated_results[key]['position'] = list(result.index)
updated_results[key]['group'] = [sub_key]*len(result.index)
self.kpi_results = updated_results
return result
def compute_zero_result_rate(self,
group_id: str = None):
"""
Computes the proportion of searches that end up in no results.
:param group_id:
:return:
"""
df = self.search_table.copy()
# Compute number of searches resulting in found elements.
df['success'] = [True if item == 0 else False for item in df[self.kpi_number_results]]
if group_id is None:
key = 'zero_result_rate'
sub_key = 'all'
result = df['success']
else:
key = 'zero_result_rate'
sub_key = group_id
df_sessions = self.filter_session_by_group(group_id=group_id)
df_pages = df_sessions.merge(self.page_data, on=self.session_id, how='left')
df = df.merge(df_pages, on=self.page_id, how='left')
df.dropna(inplace=True)
result = df['success']
# Computing result
value = sum(result) / len(result)
self.logger.info(f'Zero result rate is: {value}')
# Storing result.
updated_results = self.kpi_results
try:
updated_results[key][key].append(value)
updated_results[key]['group'].append(sub_key)
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = [value]
updated_results[key]['group'] = [sub_key]
self.kpi_results = updated_results
return value
def compute_session_length(self,
group_id: str = None):
"""
Compute session's length
:param group_id:
:return:
"""
if group_id is None:
key = 'session_length'
sub_key = 'all'
df = self.input_data
else:
key = 'session_length'
sub_key = group_id
df = self.filter_session_by_group(group_id=group_id)
df = df.merge(self.input_data, on=self.session_id, how='left')
# Compute results
value = df.groupby(self.session_id)[self.timestamp_id].apply(lambda x: (max(x) - min(x)).total_seconds())
time_value = df.groupby(self.session_id)[self.timestamp_id].min()
# Store results
updated_results = self.kpi_results
try:
updated_results[key][key].extend(list(value.values))
updated_results[key]['session_date'].extend(list(time_value.values))
updated_results[key]['session_id'].extend(list(value.index))
updated_results[key]['group'].extend([sub_key]*len(value.index))
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = list(value.values)
updated_results[key]['session_date'] = list(time_value.values)
updated_results[key]['session_id'] = list(value.index)
updated_results[key]['group'] = [sub_key]*len(value.index)
self.kpi_results = updated_results
return value
# Instantiation
def update_data(self):
self.page_data = self.create_page_look_up()
self.page_data_out = self.create_page_look_up_out()
self.session_data = self.create_session_look_up()
self.duration_table = self.create_duration_table()
self.search_table = self.create_search_table()
def create_session_look_up(self):
return self.input_data[[self.session_id, self.group_id]].drop_duplicates()
def create_page_look_up_out(self):
return self.input_data[[self.session_id, self.page_id]].drop_duplicates()
def create_page_look_up(self):
return self.input_data[[self.session_id, self.page_id, self.action_id]].drop_duplicates()
def create_search_table(self):
"""
Preserves just search results from original dataset.
:return: Information relevant only to searches
"""
local_df = self.input_data.copy()
local_df = local_df.loc[local_df[self.action_id] == self.search_action,
[self.events_id, self.timestamp_id, self.page_id, self.kpi_number_results]]
return local_df
def create_duration_table(self):
"""
Preserves just search results from original dataset.
:return: Information relevant only to searches
"""
local_df = self.input_data.copy()
local_df = local_df.loc[local_df[self.action_id] != self.search_action,
[self.timestamp_id,
self.page_id,
self.kpi_position,
self.kpi_duration]]
# Remove redundant information on position and duration
local_df = local_df.groupby(self.page_id).max()
no_duration_info = local_df[self.kpi_duration].isna()
no_position_info = local_df[self.kpi_position].isna()
self.logger.warning(f'{no_position_info.sum()} NA values for {self.kpi_position}.')
self.logger.warning(f'{no_duration_info.sum()} NA values for {self.kpi_duration}.')
# Remove those observations where position of results do not exist while there is duration
no_position_but_duration = [(2 * item[1] - item[0]) != 2 for item in zip(no_duration_info, no_position_info)]
position_but_duration = [(2 * item[1] - item[0]) == 2 for item in zip(no_duration_info, no_position_info)]
kpi_results = self.kpi_results
kpi_results['invalid_results'] = local_df.loc[position_but_duration, :].copy()
self.kpi_results = kpi_results
self.logger.warning(f'{sum([not item for item in no_position_but_duration])} '
f'NA values for position with duration.')
local_df = local_df.loc[no_position_but_duration, :]
# The rest of cases fill 0
local_df.fillna(0, inplace=True)
local_df.reset_index(inplace=True)
local_df.sort_values(by=[self.timestamp_id, self.page_id], inplace=True)
return local_df
# Getters and setters
@property
def session_data(self):
return self.__session_data
@session_data.setter
def session_data(self, new_session_data: pd.DataFrame):
self.__session_data = new_session_data
@property
def page_data(self):
return self.__page_data
@page_data.setter
def page_data(self, new_page_data: pd.DataFrame):
self.__page_data = new_page_data
@property
def page_data_out(self):
return self.__page_data_out
@page_data_out.setter
def page_data_out(self, new_page_data_out: pd.DataFrame):
self.__page_data_out = new_page_data_out
@property
def number_sessions(self):
return self.session_data.shape[0]
@property
def number_pages(self):
return self.page_data.shape[0]
@property
def duration_table(self):
return self.__duration_table
@duration_table.setter
def duration_table(self, new_duration_table: pd.DataFrame):
self.__duration_table = new_duration_table
@property
def search_table(self):
return self.__search_table
@search_table.setter
def search_table(self, new_search_table: pd.DataFrame):
self.__search_table = new_search_table
@property
def kpi_results(self):
return self.__results
@kpi_results.setter
def kpi_results(self, results: dict):
self.__results = results
class NavigationDataAnalyzer:
def __init__(self,
input_data: pd.DataFrame,
metadata: dict,
logger_level: int = logging.WARNING):
self.__logger = logging.Logger(name='default_logger',
level=logger_level)
self.__input_data = input_data
self.__metadata = metadata
self.__data_validator = DataValidator(input_data=input_data,
metadata=metadata,
logger=self.logger)
self.__session_analyzer = SessionAnalyzer(input_data=input_data,
metadata=metadata,
logger=self.logger)
def get_number_events(self,
group_name: str = None):
"""
Method used to retrieve the number of events in the dataset. It can be also be filtered by group name.
This function assumes that events are the primary key of the dataset.
:param group_name: Name of the study groups as defined in metadata (['valid_values']['groups']['valid'])
:return: Number of events in the dataset (in total or per group)
"""
groups_id = self.metadata['metadata']['valid_values']['groups']['group_id']
valid_groups = self.metadata['metadata']['valid_values']['groups']['valid']
if group_name is None:
return self.input_data.shape[0]
else:
if group_name in valid_groups:
return self.input_data.loc[self.input_data[groups_id] == group_name].shape[0]
else:
self.logger.error(f'{group_name} is not a valid group name. '
f'Please select among those listed here: {", ".join(valid_groups)}')
def save(self, name: str = 'navigation_data_analyzer.pickle'):
objects_to_store = dict()
objects_to_store['metadata'] = self.metadata
objects_to_store['input_data'] = self.input_data
objects_to_store['kpi_results'] = self.session_analyzer.kpi_results
with open(name, 'wb') as fp:
pickle.dump(objects_to_store, fp)
@staticmethod
def load(filepath: str):
with open(filepath, 'rb') as fp:
existing_object = pickle.load(fp)
instance_object = NavigationDataAnalyzer(input_data=existing_object['input_data'],
metadata=existing_object['metadata'])
instance_object.session_analyzer.kpi_results = existing_object['kpi_results']
return instance_object
def to_excel(self, filename: str):
excel_writer = pd.ExcelWriter(filename)
self.session_analyzer.session_data.to_excel(excel_writer, sheet_name='session_data', index=False)
self.session_analyzer.page_data_out.to_excel(excel_writer, sheet_name='page_data', index=False)
self.session_analyzer.duration_table.to_excel(excel_writer, sheet_name='duration_table', index=False)
self.session_analyzer.search_table.to_excel(excel_writer, sheet_name='search_table', index=False)
for key, value in self.session_analyzer.kpi_results.items():
results = pd.DataFrame(value)
results.to_excel(excel_writer, sheet_name=f'kpi_{key}', index=False)
groups_df = pd.DataFrame({'group': self.session_analyzer.valid_groups})
groups_df.to_excel(excel_writer, sheet_name='groups', index=False)
excel_writer.save()
excel_writer.close()
# Getters and Setters
@property
def session_analyzer(self):
return self.__session_analyzer
@property
def data_validator(self):
return self.__data_validator
@property
def input_data(self):
return self.__input_data
@input_data.setter
def input_data(self, new_input_data: pd.DataFrame):
self.data_validator.input_data = new_input_data
self.data_validator.default_pipeline()
self.__input_data = new_input_data
@property
def metadata(self):
return self.__metadata
@metadata.setter
def metadata(self, new_metadata: dict):
self.__input_data = new_metadata
@property
def logger(self):
return self.__logger
@logger.setter
def logger(self, new_logger):
self.__logger = new_logger
| import logging
import copy
import pickle
import pandas as pd
class BaseClass:
def __init__(self,
input_data: pd.DataFrame,
logger: logging.Logger,
metadata: dict):
self.__input_data = input_data
self.__logger = logger
self.__metadata = metadata
@property
def logger(self):
return self.__logger
@property
def metadata(self):
return self.__metadata
@property
def input_data(self):
return self.__input_data
@input_data.setter
def input_data(self, new_input_data: pd.DataFrame):
self.__input_data = new_input_data
@property
def events_id(self):
return self.__metadata['metadata']['primary_keys']['events']
@property
def session_id(self):
return self.__metadata['metadata']['primary_keys']['sessions']
@property
def page_id(self):
return self.__metadata['metadata']['primary_keys']['pages']
@property
def group_id(self):
return self.metadata['metadata']['valid_values']['groups']['group_id']
@property
def valid_groups(self):
return self.metadata['metadata']['valid_values']['groups']['valid']
@property
def action_id(self):
return self.metadata['metadata']['valid_values']['actions']['action_id']
@property
def valid_actions(self):
return self.metadata['metadata']['valid_values']['actions']['valid']
@property
def search_action(self):
return self.metadata['metadata']['valid_values']['actions']['search_action']
@property
def visit_action(self):
return self.metadata['metadata']['valid_values']['actions']['visit_action']
@property
def timestamp_id(self):
return self.metadata['metadata']['datetime']
@property
def kpi_duration(self):
return self.metadata['metadata']['valid_values']['kpis']['duration_page']
@property
def kpi_position(self):
return self.metadata['metadata']['valid_values']['kpis']['result_position']
@property
def kpi_number_results(self):
return self.metadata['metadata']['valid_values']['kpis']['number_results']
class DataValidator(BaseClass):
def __init__(self,
logger: logging.Logger,
metadata: dict,
input_data: pd.DataFrame):
super().__init__(logger=logger,
metadata=metadata,
input_data=input_data)
self.default_pipeline()
# Pipelines
def default_pipeline(self):
self.check_events_are_unique()
self.check_groups_are_valid()
self.check_one_group_per_session()
# Validation Rules
def check_events_are_unique(self):
"""
Verifies that event identifier is primary key of input data.
:return: Validation
"""
number_rows = self.input_data.shape[0]
events_id = self.metadata['metadata']['primary_keys']['events']
number_events = len(self.input_data[events_id].unique())
if number_rows == number_events:
self.logger.info(f'Validation - Events are unique: {number_rows} rows and {number_events} events.')
else:
self.logger.error(f'Validation - Events are not unique: {number_rows} rows and {number_events} events.')
def check_groups_are_valid(self):
"""
Verifies that groups matches with those declared in metadata.
:return: Validation
"""
group_id = self.metadata['metadata']['valid_values']['groups']['group_id']
groups_in_data = list(self.input_data[group_id].unique())
group_valid_names = list(self.metadata['metadata']['valid_values']['groups']['valid'])
if set(groups_in_data) == set(group_valid_names):
self.logger.info(f'Validation - Groups are valid: {", ".join(group_valid_names)}.')
else:
self.logger.error(f'Validation - Group names are not valid: '
f'Names in data are {", ".join(groups_in_data)}. '
f'Names in metadata are {", ".join(group_valid_names)}.')
def check_one_group_per_session(self):
"""
Verifies that there's at most one group per session.
:return: Validation
"""
group_id = self.metadata['metadata']['valid_values']['groups']['group_id']
session_id = self.metadata['metadata']['primary_keys']['sessions']
max_num_groups = self.input_data.groupby(session_id)[group_id].apply(lambda x: len(set(x))).max()
if max_num_groups == 1:
self.logger.info(f'Validation - Just one group per session.')
else:
self.logger.error(f'Validation - Groups per session is different to one. '
f'Maximum number of groups per session detected in data set is: {max_num_groups}')
class SessionAnalyzer(BaseClass):
def __init__(self,
input_data: pd.DataFrame,
metadata: dict,
logger: logging.Logger):
super().__init__(logger=logger,
metadata=metadata,
input_data=input_data)
self.__results = dict()
self.__session_data = self.create_session_look_up()
self.__page_data = self.create_page_look_up()
self.__page_data_out = self.create_page_look_up_out()
self.__search_table = self.create_search_table()
self.__duration_table = self.create_duration_table()
def filter_session_by_group(self, group_id: str):
"""
Filter session by group id provided in the input. This is expected to be a recurrent operation.
:param group_id:
:return:
"""
if group_id not in self.valid_groups:
self.logger.error(f'{group_id} is not a valid group.')
return self.session_data.loc[self.session_data[self.group_id] == group_id, :]
# Metrics
def compute_click_through_rate(self, group_id: str = None):
"""
This function computes the click through rate, understanding this quantity as the ratio of searches ending up in
a session landing in a page. Session Attribute.
:param group_id:
:return:
"""
result = None
if group_id is None:
key = 'click_through_rate'
sub_key = 'all'
# Merging sessions with page ids
df = copy.deepcopy(self.session_data.merge(self.page_data, on=self.session_id, how='left'))
# Computing boolean vector: True means session has a visit, False otherwise.
result = df.groupby(by=self.session_id)[self.action_id].apply(lambda x: self.visit_action in set(x))
else:
key = 'click_through_rate'
sub_key = group_id
if group_id in self.valid_groups:
# Filtering sessions by required group.
filtered_sessions = self.filter_session_by_group(group_id=group_id)
df = copy.deepcopy(filtered_sessions.merge(self.page_data, on=self.session_id, how='left'))
result = df.groupby(by='session_id').action.apply(lambda x: 'visitPage' in set(x))
else:
self.logger.error(f'{group_id} is not a valid group.')
# Computing ctr
ctr = sum(result) / len(result)
self.logger.info(f'Click Through Rate is equal to: {ctr}')
# Storing results
update_result = self.kpi_results
try:
update_result[key][key].append(ctr)
update_result[key]['group'].append(sub_key)
except KeyError:
update_result[key] = dict()
update_result[key][key] = [ctr]
update_result[key]['group'] = [sub_key]
self.kpi_results = update_result
return ctr
def compute_search_frequency(self,
group_id: str = None,
number_ranking: int = 10):
"""
Get the most common first result per session. This is a Session Attribute.
:param number_ranking: Number of results to visualize.
:param group_id:
:return:
"""
if group_id is None:
key = 'search_frequency'
sub_key = 'all'
df_sessions = self.session_data.copy()
else:
key = 'search_frequency'
sub_key = group_id
df_sessions = self.filter_session_by_group(group_id=group_id)
df = df_sessions.merge(self.page_data, on=self.session_id, how='left')
# Merge with duration table to retrieve datestamp data.
df_all = df.merge(self.duration_table, on=self.page_id, how='left')
df_all.dropna(inplace=True)
# Most common first result
df_all = df_all.groupby('session_id').apply(lambda x:
x.loc[x[self.timestamp_id] == min(x[self.timestamp_id]),
[self.kpi_position, self.timestamp_id]])
# Result
result = df_all[self.kpi_position].value_counts(normalize=True)[:number_ranking]
self.logger.info(f'Most common result is {result.index[0]}')
# Store result
updated_results = self.kpi_results
try:
updated_results[key][key].extend(list(result.values))
updated_results[key]['position'].extend(list(result.index))
updated_results[key]['group'].extend([sub_key]*len(result.index))
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = list(result.values)
updated_results[key]['position'] = list(result.index)
updated_results[key]['group'] = [sub_key]*len(result.index)
self.kpi_results = updated_results
return result
def compute_zero_result_rate(self,
group_id: str = None):
"""
Computes the proportion of searches that end up in no results.
:param group_id:
:return:
"""
df = self.search_table.copy()
# Compute number of searches resulting in found elements.
df['success'] = [True if item == 0 else False for item in df[self.kpi_number_results]]
if group_id is None:
key = 'zero_result_rate'
sub_key = 'all'
result = df['success']
else:
key = 'zero_result_rate'
sub_key = group_id
df_sessions = self.filter_session_by_group(group_id=group_id)
df_pages = df_sessions.merge(self.page_data, on=self.session_id, how='left')
df = df.merge(df_pages, on=self.page_id, how='left')
df.dropna(inplace=True)
result = df['success']
# Computing result
value = sum(result) / len(result)
self.logger.info(f'Zero result rate is: {value}')
# Storing result.
updated_results = self.kpi_results
try:
updated_results[key][key].append(value)
updated_results[key]['group'].append(sub_key)
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = [value]
updated_results[key]['group'] = [sub_key]
self.kpi_results = updated_results
return value
def compute_session_length(self,
group_id: str = None):
"""
Compute session's length
:param group_id:
:return:
"""
if group_id is None:
key = 'session_length'
sub_key = 'all'
df = self.input_data
else:
key = 'session_length'
sub_key = group_id
df = self.filter_session_by_group(group_id=group_id)
df = df.merge(self.input_data, on=self.session_id, how='left')
# Compute results
value = df.groupby(self.session_id)[self.timestamp_id].apply(lambda x: (max(x) - min(x)).total_seconds())
time_value = df.groupby(self.session_id)[self.timestamp_id].min()
# Store results
updated_results = self.kpi_results
try:
updated_results[key][key].extend(list(value.values))
updated_results[key]['session_date'].extend(list(time_value.values))
updated_results[key]['session_id'].extend(list(value.index))
updated_results[key]['group'].extend([sub_key]*len(value.index))
except KeyError:
updated_results[key] = dict()
updated_results[key][key] = list(value.values)
updated_results[key]['session_date'] = list(time_value.values)
updated_results[key]['session_id'] = list(value.index)
updated_results[key]['group'] = [sub_key]*len(value.index)
self.kpi_results = updated_results
return value
# Instantiation
def update_data(self):
self.page_data = self.create_page_look_up()
self.page_data_out = self.create_page_look_up_out()
self.session_data = self.create_session_look_up()
self.duration_table = self.create_duration_table()
self.search_table = self.create_search_table()
def create_session_look_up(self):
return self.input_data[[self.session_id, self.group_id]].drop_duplicates()
def create_page_look_up_out(self):
return self.input_data[[self.session_id, self.page_id]].drop_duplicates()
def create_page_look_up(self):
return self.input_data[[self.session_id, self.page_id, self.action_id]].drop_duplicates()
def create_search_table(self):
"""
Preserves just search results from original dataset.
:return: Information relevant only to searches
"""
local_df = self.input_data.copy()
local_df = local_df.loc[local_df[self.action_id] == self.search_action,
[self.events_id, self.timestamp_id, self.page_id, self.kpi_number_results]]
return local_df
def create_duration_table(self):
"""
Preserves just search results from original dataset.
:return: Information relevant only to searches
"""
local_df = self.input_data.copy()
local_df = local_df.loc[local_df[self.action_id] != self.search_action,
[self.timestamp_id,
self.page_id,
self.kpi_position,
self.kpi_duration]]
# Remove redundant information on position and duration
local_df = local_df.groupby(self.page_id).max()
no_duration_info = local_df[self.kpi_duration].isna()
no_position_info = local_df[self.kpi_position].isna()
self.logger.warning(f'{no_position_info.sum()} NA values for {self.kpi_position}.')
self.logger.warning(f'{no_duration_info.sum()} NA values for {self.kpi_duration}.')
# Remove those observations where position of results do not exist while there is duration
no_position_but_duration = [(2 * item[1] - item[0]) != 2 for item in zip(no_duration_info, no_position_info)]
position_but_duration = [(2 * item[1] - item[0]) == 2 for item in zip(no_duration_info, no_position_info)]
kpi_results = self.kpi_results
kpi_results['invalid_results'] = local_df.loc[position_but_duration, :].copy()
self.kpi_results = kpi_results
self.logger.warning(f'{sum([not item for item in no_position_but_duration])} '
f'NA values for position with duration.')
local_df = local_df.loc[no_position_but_duration, :]
# The rest of cases fill 0
local_df.fillna(0, inplace=True)
local_df.reset_index(inplace=True)
local_df.sort_values(by=[self.timestamp_id, self.page_id], inplace=True)
return local_df
# Getters and setters
@property
def session_data(self):
return self.__session_data
@session_data.setter
def session_data(self, new_session_data: pd.DataFrame):
self.__session_data = new_session_data
@property
def page_data(self):
return self.__page_data
@page_data.setter
def page_data(self, new_page_data: pd.DataFrame):
self.__page_data = new_page_data
@property
def page_data_out(self):
return self.__page_data_out
@page_data_out.setter
def page_data_out(self, new_page_data_out: pd.DataFrame):
self.__page_data_out = new_page_data_out
@property
def number_sessions(self):
return self.session_data.shape[0]
@property
def number_pages(self):
return self.page_data.shape[0]
@property
def duration_table(self):
return self.__duration_table
@duration_table.setter
def duration_table(self, new_duration_table: pd.DataFrame):
self.__duration_table = new_duration_table
@property
def search_table(self):
return self.__search_table
@search_table.setter
def search_table(self, new_search_table: pd.DataFrame):
self.__search_table = new_search_table
@property
def kpi_results(self):
return self.__results
@kpi_results.setter
def kpi_results(self, results: dict):
self.__results = results
class NavigationDataAnalyzer:
def __init__(self,
input_data: pd.DataFrame,
metadata: dict,
logger_level: int = logging.WARNING):
self.__logger = logging.Logger(name='default_logger',
level=logger_level)
self.__input_data = input_data
self.__metadata = metadata
self.__data_validator = DataValidator(input_data=input_data,
metadata=metadata,
logger=self.logger)
self.__session_analyzer = SessionAnalyzer(input_data=input_data,
metadata=metadata,
logger=self.logger)
def get_number_events(self,
group_name: str = None):
"""
Method used to retrieve the number of events in the dataset. It can be also be filtered by group name.
This function assumes that events are the primary key of the dataset.
:param group_name: Name of the study groups as defined in metadata (['valid_values']['groups']['valid'])
:return: Number of events in the dataset (in total or per group)
"""
groups_id = self.metadata['metadata']['valid_values']['groups']['group_id']
valid_groups = self.metadata['metadata']['valid_values']['groups']['valid']
if group_name is None:
return self.input_data.shape[0]
else:
if group_name in valid_groups:
return self.input_data.loc[self.input_data[groups_id] == group_name].shape[0]
else:
self.logger.error(f'{group_name} is not a valid group name. '
f'Please select among those listed here: {", ".join(valid_groups)}')
def save(self, name: str = 'navigation_data_analyzer.pickle'):
objects_to_store = dict()
objects_to_store['metadata'] = self.metadata
objects_to_store['input_data'] = self.input_data
objects_to_store['kpi_results'] = self.session_analyzer.kpi_results
with open(name, 'wb') as fp:
pickle.dump(objects_to_store, fp)
@staticmethod
def load(filepath: str):
with open(filepath, 'rb') as fp:
existing_object = pickle.load(fp)
instance_object = NavigationDataAnalyzer(input_data=existing_object['input_data'],
metadata=existing_object['metadata'])
instance_object.session_analyzer.kpi_results = existing_object['kpi_results']
return instance_object
def to_excel(self, filename: str):
excel_writer = pd.ExcelWriter(filename)
self.session_analyzer.session_data.to_excel(excel_writer, sheet_name='session_data', index=False)
self.session_analyzer.page_data_out.to_excel(excel_writer, sheet_name='page_data', index=False)
self.session_analyzer.duration_table.to_excel(excel_writer, sheet_name='duration_table', index=False)
self.session_analyzer.search_table.to_excel(excel_writer, sheet_name='search_table', index=False)
for key, value in self.session_analyzer.kpi_results.items():
results = pd.DataFrame(value)
results.to_excel(excel_writer, sheet_name=f'kpi_{key}', index=False)
groups_df = pd.DataFrame({'group': self.session_analyzer.valid_groups})
groups_df.to_excel(excel_writer, sheet_name='groups', index=False)
excel_writer.save()
excel_writer.close()
# Getters and Setters
@property
def session_analyzer(self):
return self.__session_analyzer
@property
def data_validator(self):
return self.__data_validator
@property
def input_data(self):
return self.__input_data
@input_data.setter
def input_data(self, new_input_data: pd.DataFrame):
self.data_validator.input_data = new_input_data
self.data_validator.default_pipeline()
self.__input_data = new_input_data
@property
def metadata(self):
return self.__metadata
@metadata.setter
def metadata(self, new_metadata: dict):
self.__input_data = new_metadata
@property
def logger(self):
return self.__logger
@logger.setter
def logger(self, new_logger):
self.__logger = new_logger | en | 0.796107 | # Pipelines # Validation Rules Verifies that event identifier is primary key of input data. :return: Validation Verifies that groups matches with those declared in metadata. :return: Validation Verifies that there's at most one group per session. :return: Validation Filter session by group id provided in the input. This is expected to be a recurrent operation. :param group_id: :return: # Metrics This function computes the click through rate, understanding this quantity as the ratio of searches ending up in a session landing in a page. Session Attribute. :param group_id: :return: # Merging sessions with page ids # Computing boolean vector: True means session has a visit, False otherwise. # Filtering sessions by required group. # Computing ctr # Storing results Get the most common first result per session. This is a Session Attribute. :param number_ranking: Number of results to visualize. :param group_id: :return: # Merge with duration table to retrieve datestamp data. # Most common first result # Result # Store result Computes the proportion of searches that end up in no results. :param group_id: :return: # Compute number of searches resulting in found elements. # Computing result # Storing result. Compute session's length :param group_id: :return: # Compute results # Store results # Instantiation Preserves just search results from original dataset. :return: Information relevant only to searches Preserves just search results from original dataset. :return: Information relevant only to searches # Remove redundant information on position and duration # Remove those observations where position of results do not exist while there is duration # The rest of cases fill 0 # Getters and setters Method used to retrieve the number of events in the dataset. It can be also be filtered by group name. This function assumes that events are the primary key of the dataset. :param group_name: Name of the study groups as defined in metadata (['valid_values']['groups']['valid']) :return: Number of events in the dataset (in total or per group) # Getters and Setters | 2.425006 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.