code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# Generated by Django 3.0.8 on 2020-08-06 19:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0003_auto_20200806_1404'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='url',
),
migrations.RemoveField(
model_name='planet',
name='url',
),
migrations.RemoveField(
model_name='species',
name='url',
),
migrations.AlterField(
model_name='person',
name='birth_year',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='person',
name='eye_color',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='person',
name='hair_color',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='person',
name='height',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='person',
name='mass',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='person',
name='skin_color',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='planet',
name='climate',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='diameter',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='gravity',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='orbital_period',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='population',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='rotation_period',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='surface_water',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='planet',
name='terrain',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='species',
name='average_height',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='species',
name='average_lifespan',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='species',
name='classification',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='species',
name='designation',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='species',
name='eye_colors',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='species',
name='hair_colors',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='species',
name='language',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AlterField(
model_name='species',
name='skin_colors',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((234, 289), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""person"""', 'name': '"""url"""'}), "(model_name='person', name='url')\n", (256, 289), False, 'from django.db import migrations, models\n'), ((334, 389), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""planet"""', 'name': '"""url"""'}), "(model_name='planet', name='url')\n", (356, 389), False, 'from django.db import migrations, models\n'), ((434, 490), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""species"""', 'name': '"""url"""'}), "(model_name='species', name='url')\n", (456, 490), False, 'from django.db import migrations, models\n'), ((640, 694), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (656, 694), False, 'from django.db import migrations, models\n'), ((819, 873), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (835, 873), False, 'from django.db import migrations, models\n'), ((995, 1049), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1011, 1049), False, 'from django.db import migrations, models\n'), ((1175, 1229), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (1191, 1229), False, 'from django.db import migrations, models\n'), ((1351, 1405), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1367, 1405), False, 'from django.db import migrations, models\n'), ((1525, 1579), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(10)', 'null': '(True)'}), '(blank=True, max_length=10, null=True)\n', (1541, 1579), False, 'from django.db import migrations, models\n'), ((1705, 1759), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)'}), '(blank=True, max_length=20, null=True)\n', (1721, 1759), False, 'from django.db import migrations, models\n'), ((1882, 1936), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (1898, 1936), False, 'from django.db import migrations, models\n'), ((2060, 2114), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (2076, 2114), False, 'from django.db import migrations, models\n'), ((2237, 2291), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (2253, 2291), False, 'from django.db import migrations, models\n'), ((2421, 2475), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (2437, 2475), False, 'from django.db import migrations, models\n'), ((2601, 2655), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (2617, 2655), False, 'from django.db import migrations, models\n'), ((2786, 2840), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (2802, 2840), False, 'from django.db import migrations, models\n'), ((2969, 3023), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (2985, 3023), False, 'from django.db import migrations, models\n'), ((3146, 3200), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (3162, 3200), False, 'from django.db import migrations, models\n'), ((3331, 3385), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (3347, 3385), False, 'from django.db import migrations, models\n'), ((3518, 3572), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (3534, 3572), False, 'from django.db import migrations, models\n'), ((3703, 3757), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (3719, 3757), False, 'from django.db import migrations, models\n'), ((3885, 3939), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (3901, 3939), False, 'from django.db import migrations, models\n'), ((4066, 4121), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (4082, 4121), False, 'from django.db import migrations, models\n'), ((4249, 4304), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (4265, 4304), False, 'from django.db import migrations, models\n'), ((4429, 4483), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(40)', 'null': '(True)'}), '(blank=True, max_length=40, null=True)\n', (4445, 4483), False, 'from django.db import migrations, models\n'), ((4611, 4666), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(200)', 'null': '(True)'}), '(blank=True, max_length=200, null=True)\n', (4627, 4666), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env/python
## objective is to scrape table from a website and save it as csv file
##reference http://qiita.com/kitsuyui/items/4906bb457af4d0e2d0a5
# -*- coding: utf-8 -*-
import pandas as pd
urlString = "http://typhoon.yahoo.co.jp/weather/jp/earthquake/list/?sort=1&key=1&b=15001"
import lxml.html as LH
result = pd.io.LH.read_html(urlString)
print (result)
| [
"pandas.io.LH.read_html"
] | [((324, 353), 'pandas.io.LH.read_html', 'pd.io.LH.read_html', (['urlString'], {}), '(urlString)\n', (342, 353), True, 'import pandas as pd\n')] |
"""basic_crud.py
created 31-mar-2019 by <EMAIL>
"""
import base64
from connexion import NoContent
from datetime import datetime, timedelta
from flask import g, request
from flask_babel import _
import json
import logging
import re
from sqlalchemy import asc, desc
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from sqlalchemy.orm.exc import NoResultFound
from .access import AccessControl
from .account_settings import AccountSettings
from .const import Constants
from .database import db_abort
from .grants import Grants
from .service_config import ServiceConfig
from . import geocode, state, utils
class BasicCRUD(object):
"""Controller base class
Create/Read/Update/Delete/find controller operations.
This class provides permission-based, paginated access to database
models behind your application's endpoints. Most endpoints need no
boilerplate code, and can inherit these functions directly. Some
endpoints only need a few lines of code before or after inheriting
these functions. You can always write your own custom function for
special-case endpoints.
Args:
resource (str): a resource name (endpoint prefix)
model (obj): the model corresponding to the resource
"""
def __init__(self, resource=None, model=None):
self.models = state.models
self.resource = resource
if self.resource not in state.controllers:
if model:
self.model = model
else:
self.model = getattr(self.models, resource.capitalize())
state.controllers[self.resource] = self
@staticmethod
def create(body, id_prefix='x-', limit_related={}):
"""Controller for POST endpoints. This method assigns a new
object ID, sets the _created_ timestamp, evaluates user's
permissions, adds a default category_id if the model has
this attribute, and inserts a row to the back-end database.
Args:
body (dict): resource fields as defined by openapi.yaml schema
id_prefix (str): generated objects will be assigned a random 10-
to 16-character ID; you can set a unique prefix if desired
limit_related (dict): limits on number of related records,
keyed by relationship name
Returns:
tuple:
first element is a dict with the id, second element is
response code (201 on success)
"""
self = state.controllers[request.url_rule.rule.split('/')[3]]
# TODO reject extraneous keys beyond those in the openapi path spec
if self.resource == 'contact':
retval = self._create_contact(body)
if retval[1] != 201:
return retval
acc = AccessControl(model=self.model)
logmsg = dict(action='create', account_id=acc.account_id,
resource=self.resource, ident=acc.identity)
if 'id' in body:
return dict(message='id is a read-only property',
title='Bad Request'), 405
body['id'] = id = utils.gen_id(prefix=id_prefix)
body['created'] = utils.utcnow()
if body.get('expires'):
try:
body['expires'] = self._fromdate(body['expires'])
except Exception:
return dict(message=_(u'invalid date')), 405
if hasattr(self.model, 'uid') and not body.get('uid'):
body['uid'] = acc.uid
uid = logmsg['uid'] = body.get('uid')
if not acc.with_permission('c', new_uid=uid,
membership=acc.primary_resource,
id=body.get('event_id')):
logging.warning(dict(message='access denied', **logmsg))
return dict(message=_(u'access denied')), 403
ret_info = {}
logging.info(dict(id=id, name=body.get('name'), **logmsg))
if not body.get('category_id') and hasattr(self.model, 'category_id'):
if acc.account_id:
body['category_id'] = AccountSettings(
acc.account_id, g.db).get.category_id
elif body.get('event_id'):
body['category_id'] = g.db.query(self.models.Event).filter_by(
id=body['event_id']).one().category_id
else:
logging.warning(dict(message='unexpected no creds', **logmsg))
return dict(message=_(u'access denied')), 403
if hasattr(self.model, 'status'):
body['status'] = body.get('status', 'active')
grant = (self.resource if self.resource.endswith('s')
else self.resource + 's')
limit = Grants().get(grant, uid=uid)
if grant in ServiceConfig().config.DEFAULT_GRANTS and g.db.query(
self.model).filter_by(uid=uid).count() >= limit:
msg = _('user limit exceeded')
logging.info(dict(message=msg, allowed=limit, **logmsg))
return dict(message=msg, allowed=limit), 405
related_items = None
if hasattr(self.model, '__rest_related__'):
related_items = {
item: body[item] for item in self.model.__rest_related__
if item in body}
for related in self.model.__rest_related__:
if (related in body and related in limit_related and
len(body[related]) > limit_related[related]):
logging.warning(dict(message='limit exceeded',
field=related, **logmsg))
return dict(message=_(u'limit exceeded')), 405
body.pop(related, None)
if self.resource == 'apikey':
body['prefix'], secret, body['hashvalue'] = acc.apikey_create()
ret_info = dict(apikey=body['prefix'] + '.' + secret,
name=body['name'])
try:
record = self.model(**body)
except (AttributeError, TypeError) as ex:
logging.warning(dict(message=str(ex), **logmsg))
return dict(message=str(ex)), 405
g.db.add(record)
try:
if related_items:
g.db.commit()
for related, records in related_items.items():
self._update_related(id, related, records)
g.db.commit()
except IntegrityError as ex:
message = 'duplicate or other conflict'
logging.warning(dict(message=message, error=str(ex), **logmsg))
return dict(message=message, data=str(body)), 405
except Exception as ex:
return db_abort(str(ex), rollback=True, **logmsg)
return dict(id=id, **ret_info), 201
@staticmethod
def get(id):
"""Controller for GET endpoints. This method evaluates
privacy settings against the user's permissions, looks up
category, owner and geocode values, and fetches the object
from back-end database.
Args:
id (str): ID of the desired resource
Returns:
tuple:
first element is a dict with the object or error
message, second element is response code (200 on success)
"""
self = state.controllers[request.url_rule.rule.split('/')[3]]
acc = AccessControl(model=self.model)
try:
query = self.db_get(id)
record = query.one()
except NoResultFound as ex:
msg = _(u'not found') if 'No row was found' in str(ex) else str(ex)
return dict(id=id, message=msg), 404
retval = record.as_dict()
if hasattr(self.model, 'uid') and hasattr(self.model, 'owner'):
retval['owner'] = record.owner.name
if hasattr(self.model, 'category_id') and retval['category_id']:
retval['category'] = record.category.name
if 'modified' in retval and not retval['modified']:
del(retval['modified'])
eid = None if hasattr(self.model, 'event_id') else (
acc.auth_ids[acc.primary_resource] or [None])[0]
retval['rbac'] = ''.join(sorted(list(
acc.rbac_permissions(query=query, membership=acc.primary_resource,
id=eid) - set('c'))))
if 'geolat' in retval:
access = 'r' if 'r' in retval['rbac'] else None
if retval['privacy'] == 'secret' and not access:
return dict(message=_(u'access denied')), 403
return geocode.with_privacy(retval, access), 200
if 'r' in retval['rbac']:
logging.info(dict(
action='get', resource=self.resource, id=id,
ident=acc.identity, duration=utils.req_duration()))
return retval, 200
else:
return dict(message=_(u'access denied'), id=id), 403
@staticmethod
def update(id, body, access='u', limit_related={}):
"""Controller for PUT endpoints. This method looks for an existing
record, evaluates user's permissions, and updates the row in
the back-end database.
Args:
body (dict): fields to be updated
access (str): access-level required for RBAC evaluation
limit_related (dict): limits on number of related records,
indexed by relationship name
Returns:
dict:
first element is a dict with the id, second element is
response code (200 on success)
"""
if 'id' in body and body['id'] != id:
return dict(message='id is a read-only property',
title='Bad Request'), 405
self = state.controllers[request.url_rule.rule.split('/')[3]]
body['modified'] = utils.utcnow()
if body.get('expires'):
try:
body['expires'] = self._fromdate(body['expires'])
except Exception:
return dict(message=_(u'invalid date')), 405
if 'totp' in body and not body.pop('totp'):
body['totp_secret'] = None
if hasattr(self.model, '__rest_related__'):
for related in self.model.__rest_related__:
if related in body:
ret = self._update_related(
id, related, body.pop(related, None),
limit=(limit_related[related]
if related in limit_related else None))
if ret[1] != 200:
return ret
logmsg = dict(action='update', resource=self.resource, id=id,
ident=AccessControl().identity)
try:
query = g.db.query(self.model).filter_by(id=id)
if not AccessControl(
model=self.model).with_permission(
access, query=query):
return dict(message=_(u'access denied'), id=id), 403
current = query.one().as_dict()
query.update(body)
g.db.commit()
except IntegrityError as ex:
message = _(u'duplicate or other conflict')
logging.warning(dict(message=message, error=str(ex), **logmsg))
return dict(message=message, data=str(ex)), 405
except Exception as ex:
return db_abort(str(ex), rollback=True, **logmsg)
updated = {key: body[key] for key, val in body.items()
if key in current and current[key] != val and
key not in ('name', 'modified')}
logging.info(dict(name=body.get('name'), duration=utils.req_duration(),
**logmsg, **updated))
return dict(id=id, message=_(u'updated')), 200
@staticmethod
def delete(ids, force=False):
"""Controller for DELETE endpoints. This method looks for existing
records, evaluates user's permissions, and updates or removes
rows in the back-end database.
Args:
ids (list of str): record IDs to be flagged for removal
force (bool): flag for removal if false; remove data if true
Returns:
tuple:
first element is a dict with the id, second element is
response code (200 on success)
"""
# TODO - update auth if model could affect any session's auth
self = state.controllers[request.url_rule.rule.split('/')[3]]
logmsg = dict(action='delete', resource=self.resource,
account_id=AccessControl().account_id,
ident=AccessControl().identity)
errors = 0
count = 0
for id in ids:
try:
query = g.db.query(self.model).filter_by(id=id)
if not AccessControl(
model=self.model).with_permission(
'd', query=query):
return dict(message=_(u'access denied'), id=id), 403
if force:
if query.delete():
logging.info(dict(id=id, **logmsg))
else:
logging.info(dict(id=id, msg='query failed', **logmsg))
errors += 1
else:
logging.info(dict(id=id, status='disabled', **logmsg))
query.update(dict(status='disabled'))
except Exception as ex:
return db_abort(str(ex), **logmsg)
count += 1
try:
g.db.commit()
except Exception as ex:
return db_abort(str(ex), rollback=True, **logmsg)
logging.info(dict(count=count, ids=ids, **logmsg))
return NoContent, 404 if errors else 204
@staticmethod
def find(**kwargs):
"""Find records which match query parameters passed from
connexion by name, in a dictionary that also includes user
and token info
Args:
cursor_next (str): pagination token to fetch subsequent records
filter (dict): field/value pairs to query (simple queries
only, with string or list matching; or * for any)
limit (int): max records to fetch
offset (int): old-style pagination starting offset
sort (str): <field>[:{asc|desc}]
status (str): value is added to filter
Returns:
dict: items (list), count(int), cursor_next (str)
"""
self = state.controllers[request.url_rule.rule.split('/')[3]]
acc = AccessControl(model=self.model)
logmsg = dict(action='find', ident=acc.identity,
resource=self.resource)
# TODO this conditional is a band-aid to give status 403 instead of
# 200 with empty item list for anonymous / restricted users
if self.resource != 'account' and not acc.with_permission(
'r', new_uid=acc.uid):
return dict(message=_(u'access denied')), 403
conditions = {item: value for item, value in kwargs.items()
if item in ('status')}
if 'cursor_next' in kwargs:
offset = int(self._fromb64(kwargs['cursor_next']).split(':')[1])
elif 'offset' in kwargs:
offset = int(kwargs['offset'])
else:
offset = 0
sort = kwargs.get('sort',
'name' if hasattr(self.model, 'name') else 'id')
if ':' in sort:
sort, dir = sort.split(':')
sortdir = desc if dir == 'desc' else asc
else:
sortdir = asc
query = g.db.query(self.model)
limit = int(kwargs.get('limit', Constants.PER_PAGE_DEFAULT))
try:
filter = json.loads(kwargs.get('filter', '{}'))
except json.decoder.JSONDecodeError as ex:
msg = _(u'invalid filter specified') + '=%s' % str(ex)
logging.error(dict(message=msg, **logmsg))
return dict(message=msg), 405
if 'id' in filter:
# TODO: this seems a bit hackish, try to remember what
# use-case this supports. The del() use-case is to
# enable get:guest?filter{id:null,event_id=foo}
if not filter.get('id'):
if len(filter) == 1:
return dict(count=0, items=[]), 200
else:
del(filter['id'])
elif 'filter' in kwargs and not filter:
return dict(count=0, items=[]), 200
if filter.get('account_id') and not hasattr(self.model, 'account_id'):
try:
account = g.db.query(self.models.Account).filter_by(
id=filter['account_id']).one()
except NoResultFound:
return dict(count=0, items=[],
message=_(u'not found')), 404
filter['uid'] = account.uid
filter.pop('account_id')
for key in filter.copy():
if filter[key] == '*':
filter.pop(key)
elif (filter[key] and type(filter[key]) is str and '%'
in filter[key]):
query = query.filter(getattr(self.model, key).like(
filter[key]))
filter.pop(key)
elif type(filter[key]) is list:
items = filter.pop(key)
if len(items) == 1:
filter[key] = items[0]
else:
query = query.filter(getattr(self.model, key).in_(items))
if hasattr(self.model, 'starts'):
if filter.get('prev'):
query = query.filter(self.model.starts < utils.utcnow())
else:
# Filter out records that start more than 12 hours ago
query = query.filter(self.model.starts >
utils.utcnow() - timedelta(hours=12))
filter.pop('prev', None)
try:
query = query.filter_by(**filter).order_by(
sortdir(getattr(self.model, sort)))
except InvalidRequestError as ex:
logging.warning(dict(message=str(ex), **logmsg))
return dict(message=_(u'invalid filter specified')), 405
if 'status' in conditions and conditions['status'] == 'disabled':
query = query.filter(self.model.status == 'disabled')
else:
query = query.filter(self.model.status != 'disabled')
query = acc.with_filter(query)
try:
results = query.slice(offset, offset + limit + 1).all()
retval = dict(items=[], count=query.count())
except Exception as ex:
return db_abort(str(ex), **logmsg)
count = 0
for result in results[:limit]:
record = result.as_dict()
if hasattr(self.model, 'owner'):
record['owner'] = result.owner.name
# TODO - find a way to avoid this, causes KeyError on lazy-load
if record.get('category_id'):
record['category'] = result.category.name
# TODO - takes 30ms per record for rbac: optimize or redesign
record['rbac'] = ''.join(sorted(list(
acc.rbac_permissions(owner_uid=record.get('uid'),
membership=acc.primary_resource,
id=record.get('id'),
privacy=record.get('privacy'))
- set('c'))))
retval['items'].append(record)
count += 1
if len(results) > limit:
retval['cursor_next'] = self._tob64('cursor:%d' % (offset + limit))
elif count < retval['count'] and count < limit and offset == 0:
# TODO find a way to get query.count() to return accurate value
retval['count'] = count
logging.info(dict(
offset=offset, limit=limit,
duration=utils.req_duration(), **conditions, **logmsg))
return retval, 200
def _create_contact(self, body):
"""Perform pre-checks against fields for contact resource
prior to rest of contact-create
Args:
body (dict): as defined in openapi.yaml schema
"""
logmsg = dict(action='create', resource='contact',
uid=body.get('uid'))
acc = AccessControl()
if body.get('type') == 'sms':
if body.get('carrier') is None:
return dict(message=_(u'carrier required for sms')), 405
elif not re.match(Constants.REGEX_PHONE, body['info']):
return dict(message=_(u'invalid mobile number')), 405
body['info'] = re.sub('[- ()]', '', body['info'])
elif body.get('type') == 'email':
body['info'] = utils.identity_normalize(body['info'].strip())
if not re.match(Constants.REGEX_EMAIL, body['info']):
return dict(message=_(u'invalid email address')), 405
elif 'type' in body and body.get('type') not in ['sms', 'email']:
return dict(message='contact type not yet supported'), 405
if not body.get('rank'):
try:
count = g.db.query(self.models.Contact).filter_by(
uid=body.get('uid'), type=body.get('type')).count()
except Exception as ex:
return db_abort(str(ex), **logmsg)
body['rank'] = count + 1
if body.get('uid', acc.uid) != acc.uid and 'admin' not in acc.auth:
logging.warning(dict(message=_(u'access denied'), **logmsg))
return dict(message=_(u'access denied')), 403
if not body.get('status'):
body['status'] = 'unconfirmed'
return dict(status='ok'), 201
@staticmethod
def update_contact(id, body):
"""This is a special-case function for the contact-update resource
- validate sms carrier
- keep person identity in sync with primary contact
Args:
id (str): resource ID
body (dict): as defined in openapi.yaml
"""
logmsg = dict(action='update', id=id, info=body.get('info'))
if body.get('type') == 'sms':
if body.get('carrier') is None:
return dict(message=_(u'carrier required for sms')), 405
if not re.match(Constants.REGEX_PHONE, body['info']):
return dict(message=_(u'invalid mobile number')), 405
body['info'] = re.sub('[- ()]', '', body['info'])
elif body.get('type') == 'email':
body['info'] = utils.identity_normalize(body['info'].strip())
if not re.match(Constants.REGEX_EMAIL, body['info']):
return dict(message=_(u'invalid email address')), 405
if 'id' in body and body['id'] != id:
return dict(message='id is a read-only property',
title='Bad Request'), 405
self = state.controllers[request.url_rule.rule.split('/')[3]]
body['modified'] = utils.utcnow()
try:
query = g.db.query(self.model).filter_by(id=id)
if not AccessControl(model=self.model).with_permission(
'u', query=query):
return dict(message=_(u'access denied'), id=id), 403
prev_identity = query.one().info
if body.get('status') != 'disabled':
body['status'] = 'unconfirmed'
query.update(body)
try:
# If updating primary contact, also update identity
primary = g.db.query(self.models.Person).filter_by(
identity=prev_identity).one()
logging.info(dict(
resource='person', previous=prev_identity, **logmsg))
primary.identity = body.get('info')
except NoResultFound:
pass
g.db.commit()
logging.info(dict(resource=self.resource, **logmsg))
except Exception as ex:
logging.warning(dict(message=str(ex), **logmsg))
g.db.rollback()
return dict(message=_(u'conflict with existing')), 405
return dict(id=id, message=_(u'updated')), 200
def _update_related(self, id, attr, related_ids, limit=None):
"""Update the list of records in the many-to-many association
model for one column in the record. Adds and/or removes entries
from the association table to make the list match related_ids.
Args:
id (str): ID of record in current model
attr (str): relationship attribute name
related_ids (list): list of existing records in related table
limit (int): maximum number of allowed records
Returns: tuple
flask-compatible status dict, http return status
"""
if limit and len(related_ids) > limit:
msg = 'Limit exceeded'
logging.warn(dict(
action='update', resource=self.resource, allowed=limit,
field=attr, uid=AccessControl().uid, message=msg))
return dict(message=msg, allowed=limit), 405
if attr == 'members':
model = getattr(self.models, 'Person')
else:
model = getattr(self.models, attr.capitalize().strip('s'))
query = g.db.query(self.model).filter_by(id=id).one()
current = [item.id for item in getattr(query, attr)]
for missing_member in set(related_ids) - set(current):
getattr(query, attr).append(g.db.query(model).filter_by(
id=missing_member).one())
g.db.flush()
for removed_member in set(current) - set(related_ids):
getattr(query, attr).remove(g.db.query(model).filter_by(
id=removed_member).one())
g.db.commit()
return dict(id=id, status='ok', items=len(related_ids)), 200
@staticmethod
def _tob64(text):
return base64.b64encode(bytes(text, 'utf8')).decode('ascii')
@staticmethod
def _fromb64(text):
return base64.b64decode(bytes(text, 'utf8')).decode('ascii')
@staticmethod
def _fromdate(date_string):
"""Convert a serialized ISO 8601 date-string to datetime format;
handle variants where milliseconds are specified or hour/min/sec
are omitted
Args:
date_string (str): ISO 8601 string like 2021-06-02T22:15:00Z
Returns:
datetime object
"""
exp = date_string
if re.match('^2[0-9]{3}-((0[1-9])|(1[0-2]))-([0-2][1-9]|3[0-1])$',
exp):
exp += 'T23:59:59Z'
try:
expires = datetime.strptime(exp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
expires = datetime.strptime(exp, '%Y-%m-%dT%H:%M:%SZ')
return expires
def db_get(self, id):
"""Activate a SQLalchemy query object for the specified ID in
the current model
Args:
id (str): object ID
Returns:
obj: query object
"""
return g.db.query(self.model).filter_by(id=id)
| [
"flask_babel._",
"datetime.datetime.strptime",
"re.match",
"flask.g.db.query",
"flask.request.url_rule.rule.split",
"flask.g.db.commit",
"re.sub",
"flask.g.db.add",
"datetime.timedelta",
"flask.g.db.flush",
"flask.g.db.rollback"
] | [((6135, 6151), 'flask.g.db.add', 'g.db.add', (['record'], {}), '(record)\n', (6143, 6151), False, 'from flask import g, request\n'), ((15564, 15586), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (15574, 15586), False, 'from flask import g, request\n'), ((25563, 25575), 'flask.g.db.flush', 'g.db.flush', ([], {}), '()\n', (25573, 25575), False, 'from flask import g, request\n'), ((25758, 25771), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (25769, 25771), False, 'from flask import g, request\n'), ((26458, 26526), 're.match', 're.match', (['"""^2[0-9]{3}-((0[1-9])|(1[0-2]))-([0-2][1-9]|3[0-1])$"""', 'exp'], {}), "('^2[0-9]{3}-((0[1-9])|(1[0-2]))-([0-2][1-9]|3[0-1])$', exp)\n", (26466, 26526), False, 'import re\n'), ((4886, 4910), 'flask_babel._', '_', (['"""user limit exceeded"""'], {}), "('user limit exceeded')\n", (4887, 4910), False, 'from flask_babel import _\n'), ((6363, 6376), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (6374, 6376), False, 'from flask import g, request\n'), ((11015, 11028), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (11026, 11028), False, 'from flask import g, request\n'), ((13495, 13508), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (13506, 13508), False, 'from flask import g, request\n'), ((20647, 20681), 're.sub', 're.sub', (['"""[- ()]"""', '""""""', "body['info']"], {}), "('[- ()]', '', body['info'])\n", (20653, 20681), False, 'import re\n'), ((22432, 22466), 're.sub', 're.sub', (['"""[- ()]"""', '""""""', "body['info']"], {}), "('[- ()]', '', body['info'])\n", (22438, 22466), False, 'import re\n'), ((23841, 23854), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (23852, 23854), False, 'from flask import g, request\n'), ((26615, 26662), 'datetime.datetime.strptime', 'datetime.strptime', (['exp', '"""%Y-%m-%dT%H:%M:%S.%fZ"""'], {}), "(exp, '%Y-%m-%dT%H:%M:%S.%fZ')\n", (26632, 26662), False, 'from datetime import datetime, timedelta\n'), ((2496, 2528), 'flask.request.url_rule.rule.split', 'request.url_rule.rule.split', (['"""/"""'], {}), "('/')\n", (2523, 2528), False, 'from flask import g, request\n'), ((6211, 6224), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (6222, 6224), False, 'from flask import g, request\n'), ((7279, 7311), 'flask.request.url_rule.rule.split', 'request.url_rule.rule.split', (['"""/"""'], {}), "('/')\n", (7306, 7311), False, 'from flask import g, request\n'), ((9699, 9731), 'flask.request.url_rule.rule.split', 'request.url_rule.rule.split', (['"""/"""'], {}), "('/')\n", (9726, 9731), False, 'from flask import g, request\n'), ((11088, 11121), 'flask_babel._', '_', (['u"""duplicate or other conflict"""'], {}), "(u'duplicate or other conflict')\n", (11089, 11121), False, 'from flask_babel import _\n'), ((12365, 12397), 'flask.request.url_rule.rule.split', 'request.url_rule.rule.split', (['"""/"""'], {}), "('/')\n", (12392, 12397), False, 'from flask import g, request\n'), ((14450, 14482), 'flask.request.url_rule.rule.split', 'request.url_rule.rule.split', (['"""/"""'], {}), "('/')\n", (14477, 14482), False, 'from flask import g, request\n'), ((22288, 22333), 're.match', 're.match', (['Constants.REGEX_PHONE', "body['info']"], {}), "(Constants.REGEX_PHONE, body['info'])\n", (22296, 22333), False, 'import re\n'), ((22910, 22942), 'flask.request.url_rule.rule.split', 'request.url_rule.rule.split', (['"""/"""'], {}), "('/')\n", (22937, 22942), False, 'from flask import g, request\n'), ((24025, 24040), 'flask.g.db.rollback', 'g.db.rollback', ([], {}), '()\n', (24038, 24040), False, 'from flask import g, request\n'), ((26712, 26756), 'datetime.datetime.strptime', 'datetime.strptime', (['exp', '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(exp, '%Y-%m-%dT%H:%M:%SZ')\n", (26729, 26756), False, 'from datetime import datetime, timedelta\n'), ((27020, 27042), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (27030, 27042), False, 'from flask import g, request\n'), ((7498, 7513), 'flask_babel._', '_', (['u"""not found"""'], {}), "(u'not found')\n", (7499, 7513), False, 'from flask_babel import _\n'), ((10684, 10706), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (10694, 10706), False, 'from flask import g, request\n'), ((11696, 11709), 'flask_babel._', '_', (['u"""updated"""'], {}), "(u'updated')\n", (11697, 11709), False, 'from flask_babel import _\n'), ((15798, 15828), 'flask_babel._', '_', (['u"""invalid filter specified"""'], {}), "(u'invalid filter specified')\n", (15799, 15828), False, 'from flask_babel import _\n'), ((20503, 20548), 're.match', 're.match', (['Constants.REGEX_PHONE', "body['info']"], {}), "(Constants.REGEX_PHONE, body['info'])\n", (20511, 20548), False, 'import re\n'), ((20817, 20862), 're.match', 're.match', (['Constants.REGEX_EMAIL', "body['info']"], {}), "(Constants.REGEX_EMAIL, body['info'])\n", (20825, 20862), False, 'import re\n'), ((22602, 22647), 're.match', 're.match', (['Constants.REGEX_EMAIL', "body['info']"], {}), "(Constants.REGEX_EMAIL, body['info'])\n", (22610, 22647), False, 'import re\n'), ((23022, 23044), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (23032, 23044), False, 'from flask import g, request\n'), ((24144, 24157), 'flask_babel._', '_', (['u"""updated"""'], {}), "(u'updated')\n", (24145, 24157), False, 'from flask_babel import _\n'), ((3804, 3823), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (3805, 3823), False, 'from flask_babel import _\n'), ((8835, 8854), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (8836, 8854), False, 'from flask_babel import _\n'), ((12681, 12703), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (12691, 12703), False, 'from flask import g, request\n'), ((14917, 14936), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (14918, 14936), False, 'from flask_babel import _\n'), ((21509, 21528), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (21510, 21528), False, 'from flask_babel import _\n'), ((21573, 21592), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (21574, 21592), False, 'from flask_babel import _\n'), ((25274, 25296), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (25284, 25296), False, 'from flask import g, request\n'), ((8477, 8496), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (8478, 8496), False, 'from flask_babel import _\n'), ((10895, 10914), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (10896, 10914), False, 'from flask_babel import _\n'), ((17829, 17848), 'datetime.timedelta', 'timedelta', ([], {'hours': '(12)'}), '(hours=12)\n', (17838, 17848), False, 'from datetime import datetime, timedelta\n'), ((18143, 18173), 'flask_babel._', '_', (['u"""invalid filter specified"""'], {}), "(u'invalid filter specified')\n", (18144, 18173), False, 'from flask_babel import _\n'), ((20445, 20475), 'flask_babel._', '_', (['u"""carrier required for sms"""'], {}), "(u'carrier required for sms')\n", (20446, 20475), False, 'from flask_babel import _\n'), ((22232, 22262), 'flask_babel._', '_', (['u"""carrier required for sms"""'], {}), "(u'carrier required for sms')\n", (22233, 22262), False, 'from flask_babel import _\n'), ((22371, 22398), 'flask_babel._', '_', (['u"""invalid mobile number"""'], {}), "(u'invalid mobile number')\n", (22372, 22398), False, 'from flask_babel import _\n'), ((23205, 23224), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (23206, 23224), False, 'from flask_babel import _\n'), ((24073, 24101), 'flask_babel._', '_', (['u"""conflict with existing"""'], {}), "(u'conflict with existing')\n", (24074, 24101), False, 'from flask_babel import _\n'), ((3353, 3371), 'flask_babel._', '_', (['u"""invalid date"""'], {}), "(u'invalid date')\n", (3354, 3371), False, 'from flask_babel import _\n'), ((4452, 4471), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (4453, 4471), False, 'from flask_babel import _\n'), ((4791, 4813), 'flask.g.db.query', 'g.db.query', (['self.model'], {}), '(self.model)\n', (4801, 4813), False, 'from flask import g, request\n'), ((5623, 5643), 'flask_babel._', '_', (['u"""limit exceeded"""'], {}), "(u'limit exceeded')\n", (5624, 5643), False, 'from flask_babel import _\n'), ((9959, 9977), 'flask_babel._', '_', (['u"""invalid date"""'], {}), "(u'invalid date')\n", (9960, 9977), False, 'from flask_babel import _\n'), ((12905, 12924), 'flask_babel._', '_', (['u"""access denied"""'], {}), "(u'access denied')\n", (12906, 12924), False, 'from flask_babel import _\n'), ((16569, 16600), 'flask.g.db.query', 'g.db.query', (['self.models.Account'], {}), '(self.models.Account)\n', (16579, 16600), False, 'from flask import g, request\n'), ((16780, 16795), 'flask_babel._', '_', (['u"""not found"""'], {}), "(u'not found')\n", (16781, 16795), False, 'from flask_babel import _\n'), ((20586, 20613), 'flask_babel._', '_', (['u"""invalid mobile number"""'], {}), "(u'invalid mobile number')\n", (20587, 20613), False, 'from flask_babel import _\n'), ((20900, 20927), 'flask_babel._', '_', (['u"""invalid email address"""'], {}), "(u'invalid email address')\n", (20901, 20927), False, 'from flask_babel import _\n'), ((21153, 21184), 'flask.g.db.query', 'g.db.query', (['self.models.Contact'], {}), '(self.models.Contact)\n', (21163, 21184), False, 'from flask import g, request\n'), ((22685, 22712), 'flask_babel._', '_', (['u"""invalid email address"""'], {}), "(u'invalid email address')\n", (22686, 22712), False, 'from flask_babel import _\n'), ((23521, 23551), 'flask.g.db.query', 'g.db.query', (['self.models.Person'], {}), '(self.models.Person)\n', (23531, 23551), False, 'from flask import g, request\n'), ((25484, 25501), 'flask.g.db.query', 'g.db.query', (['model'], {}), '(model)\n', (25494, 25501), False, 'from flask import g, request\n'), ((25679, 25696), 'flask.g.db.query', 'g.db.query', (['model'], {}), '(model)\n', (25689, 25696), False, 'from flask import g, request\n'), ((4219, 4248), 'flask.g.db.query', 'g.db.query', (['self.models.Event'], {}), '(self.models.Event)\n', (4229, 4248), False, 'from flask import g, request\n')] |
"""Python SSH connector/wrapper for linux systems based on super fast SSH2
protocol library -> ssh2-python
"""
import errno
import os
import socket
import stat
from pathlib import Path
from typing import List, NamedTuple, Optional, Union
from ssh2.exceptions import SFTPProtocolError
from ssh2.session import Session
from ssh2.sftp import (
LIBSSH2_FXF_APPEND,
LIBSSH2_FXF_CREAT,
LIBSSH2_FXF_READ,
LIBSSH2_FXF_WRITE,
LIBSSH2_SFTP_S_IRGRP,
LIBSSH2_SFTP_S_IROTH,
LIBSSH2_SFTP_S_IRUSR,
LIBSSH2_SFTP_S_IWUSR,
)
from sshcon.exceptions import SshConError, SshConSftpError
class CompletedCommand(NamedTuple):
"""Class to represent ssh connection.
Args:
NamedTuple (rcode, stdout, stderr): Constructs all the necessary
attributes for the CompletedCommand
object.
"""
rcode: int
stdout: Union[str, bytes]
stderr: str
class SshCon:
"""A class to represent ssh connection."""
def __init__(
self, host: str, user: str, key: Union[Path, str], port: int = 22
) -> None:
"""Constructs all the necessary attributes for the SshCon object.
Args:
host (str): Hostname or IP adress of remote machine.
user (str): SSH user to use for the connection.
key (Union[Path, str]): File with SSH RSA private key.
port (int, optional): Port number for ssh connection. Defaults to 22.
"""
self.user = user
self.key = str(key)
self.host = host
self.port = port
self.sftp = None
self.session = self._make_session()
def _make_session(self) -> Session:
"""Makes SSH connection and handshake.
Returns:
Session: Succesfully connected Session object.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = Session()
session.handshake(sock)
session.userauth_publickey_fromfile(self.user, self.key)
return session
def _sftp_session(self):
"""Makes SFTP session.
Returns:
PySFTP: Object with SFTP session.
"""
if self.sftp is None:
self.sftp = self.session.sftp_init()
return self.sftp
def _lstat(self, path: Union[Path, str]):
"""Get file lstat
Args:
path (Union[Path, str]): File for which to get lstat.
Raises:
SshConSftpError: Raise when lstat command fails.
Returns:
attrs: File lstat.
"""
sftp = self._sftp_session()
try:
fstat = sftp.lstat(str(path).encode("utf-8"))
except SFTPProtocolError as msgerr:
err_code = sftp.last_error()
if err_code == 2:
raise FileNotFoundError(f"File {path} not found.") from msgerr
raise SshConSftpError("lsstat", err_code) from msgerr
else:
return fstat.permissions
def run(
self,
cmd: Union[List, str],
capture_output: bool = False,
check: bool = True,
user: Optional[str] = None,
encoding: Optional[str] = "utf-8",
) -> Optional[CompletedCommand]:
"""Run command on the remote machine.
Raises:
OSError: Raises error if command returns non-zero error code.
Returns:
CompletedCommand: Object CompletedCommand with rcode, stdout and stderr.
"""
if isinstance(cmd, list):
cmd = [str(item) for item in cmd]
cmd = " ".join(cmd)
if user is not None:
cmd = f"su - {user} -c '{cmd}'"
channel = self.session.open_session()
channel.execute(cmd)
channel.wait_eof()
channel.close()
channel.wait_closed()
rcode = channel.get_exit_status()
_buffsize, stderr = channel.read_stderr()
if check:
if rcode:
raise OSError(rcode, stderr.decode("utf-8").strip(), cmd)
if capture_output:
size, data = channel.read()
stdout = b""
while size > 0:
stdout += data
size, data = channel.read()
if encoding:
stderr = stderr.decode(encoding)
stdout = stdout.decode(encoding).rstrip()
return CompletedCommand(rcode, stdout, stderr)
return None
def mkdir(
self,
path: Union[Path, str],
mode: int = 511,
exist_ok: bool = True,
parents: bool = False,
) -> None:
"""Make dir in a remote machine.
Args:
path (Union[Path, str]): Path of directory to create.
mode (int, optional): Permissions mode of new directory. Defaults to 511.
exist_ok (bool, optional): No error if existing. Defaults to True.
parents (bool, optional): Make parent directories as needed. Defaults to False.
"""
mkdir_cmd = ["mkdir", "-m", mode, path]
if parents or exist_ok:
mkdir_cmd.insert(1, "-p")
self.run(mkdir_cmd, check=True)
def remove(
self,
path: Union[Path, str],
force: bool = False,
recursive: bool = False,
) -> None:
"""Remove files or directories.
Args:
path (Union[Path, str]): File or folder to remove.
force (bool, optional): Ignore nonexistent files, never prompt. Defaults to False.
recursive (bool, optional): Remove directories and their contents recursively.
Defaults to False.
"""
rm_cmd = ["rm", path]
if force:
rm_cmd.insert(1, "-f")
if recursive:
rm_cmd.insert(1, "-r")
self.run(rm_cmd, check=True)
def rmdir(self, path: Union[Path, str]) -> None:
"""Remove empty directories.
Args:
path (Union[Path, str]): Empty directory to remove.
"""
self._sftp_session().rmdir(str(path))
def isdir(self, path: Union[str, Path]) -> bool:
"""Check if path is directory.
Args:
path (Union[str, Path]): Target path.
Returns:
bool: True if directory else False.
"""
return stat.S_ISDIR(self._lstat(path))
def isfile(self, path: Union[str, Path]) -> bool:
"""Check if path is file.
Args:
path (Union[str, Path]): Target path.
Returns:
bool: True if file else False.
"""
return stat.S_ISREG(self._lstat(path))
def ismounted(self, mount) -> bool:
"""Check if path is mountpoint.
Args:
path (Union[str, Path]): Target path.
Returns:
bool: True if mountpoint else False.
"""
try:
self.run(["mountpoint", mount])
except OSError:
return False
else:
return True
def get_filemode(self, path: Union[Path, str]):
"""Get file status.
Args:
path (Union[Path, str]): Target path.
"""
fstat = self._sftp_session().lstat(path)
return stat.filemode(fstat.permissions)
def mount(
self,
source: str,
target: Union[Path, str],
force: bool = False,
mkdir: bool = False,
) -> None:
"""Mounts a filesystem in remote machine.
Args:
source (str): Source filesystem.
target (Union[Path, str]): Target filesystem.
force (bool, optional): Umount current filesystem and mount new
filesystem. Defaults to False.
mkdir (bool, optional): Make directory if not exist. Defaults to False.
Raises:
SshConError: Raises error if target is already a mountpoint and option
force is not used.
"""
if self.ismounted(target):
if force:
self.umount(target)
else:
raise SshConError("mount", f"Folder {target} is already mountpoint.")
if mkdir:
self.mkdir(target, exist_ok=True)
self.run(["mount", source, target], check=True)
def umount(self, target: Union[Path, str], rmdir: bool = False) -> None:
"""Unmount filesystems.
Args:
target (Union[Path, str]): Path to a filesystem to unmount.
rmdir (bool, optional): Remove directory after unmount. Defaults to False.
"""
self.run(["umount", target], check=True)
if rmdir:
self.rmdir(target)
def read_text(
self, file: Union[Path, str], encoding: str = "utf-8"
) -> Optional[str]:
"""Gets text from the remote file.
Args:
file (Union[Path, str]): Path to a file.
encoding (str, optional): Which encoding to use. Defaults to "utf-8".
Raises:
FileNotFoundError: Raises when file is not found.
Returns:
Optional[str]: File content as string.
"""
text = None
if self.isfile(file) is False:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), str(file))
with self._sftp_session().open(
str(file), LIBSSH2_FXF_READ, LIBSSH2_SFTP_S_IRUSR
) as text_file:
for _size, text_bytes in text_file:
text = text_bytes.decode(encoding)
return text
def write_text(
self,
data: str,
file: Union[Path, str],
append: bool = False,
encoding: str = "utf-8",
force: bool = False,
) -> None:
"""Write text to a remote file.
Args:
data (str): Content to write to a file.
file (Union[Path, str]): Path to a file.
append (bool, optional): If append text to a file instead of
rewrite a content. Defaults to False.
encoding (str, optional): Which encoding to use. Defaults to "utf-8".
Raises:
IsADirectoryError: If target file is a directory.
FileExistsError: If target file exists already.
"""
file = str(file)
try:
if self.isdir(file):
raise IsADirectoryError(errno.EISDIR, os.strerror(errno.EISDIR), file)
except FileNotFoundError:
pass
mode = (
LIBSSH2_SFTP_S_IRUSR
| LIBSSH2_SFTP_S_IWUSR
| LIBSSH2_SFTP_S_IRGRP
| LIBSSH2_SFTP_S_IROTH
)
f_flags = LIBSSH2_FXF_CREAT | LIBSSH2_FXF_WRITE
try:
if append:
f_flags = f_flags | LIBSSH2_FXF_APPEND
elif self.isfile(file) and not force:
raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST), file)
except FileNotFoundError:
pass
with self._sftp_session().open(file, f_flags, mode) as text_file:
text_file.write(data.encode(encoding))
def chmod(self, path: Union[Path, str], mode: int, recursive: bool = False) -> None:
"""Change file mode bits in remote machine.
Args:
path (Union[Path, str]): Path to a target.
mode (int): File mode bits.
recursive (bool, optional): Change file mod recursively. Defaults to False.
"""
chmod_cmd = ["chmod", mode, path]
if recursive:
chmod_cmd.insert(1, "-R")
self.run(chmod_cmd, check=True)
def chown(
self,
path: Union[Path, str],
owner: str,
group: str,
recursive: bool = False,
) -> None:
"""Change file owner and group in remote machine.
Args:
path (Union[Path, str]): Path to a target.
owner (str): Username of an owner.
group (str): Group to use.
recursive (bool, optional): Change owner/group mod recursively. Defaults to False.
"""
chown_cmd = ["chown", f"{owner}:{group}", path]
if recursive:
chown_cmd.insert(1, "-R")
self.run(chown_cmd, check=True)
def send_file(
self,
file: Union[Path, str],
destination: Union[Path, str],
force: bool = False,
) -> None:
"""Send local file to a remote location.
Args:
file (Union[Path, str]): File to send.
destination (Union[Path, str]): Target filename in remote machine.
force (bool, optional): Replace file if already exists. Defaults to False.
Raises:
IsADirectoryError: Raises if target destination is a directory.
FileExistsError: Raises if target destination exists.
"""
try:
if self.isdir(destination):
raise IsADirectoryError(
errno.EISDIR, os.strerror(errno.EISDIR), str(file)
)
except FileNotFoundError:
pass
if not force:
try:
if self.isfile(destination):
raise FileExistsError(
errno.EEXIST, os.strerror(errno.EEXIST), str(file)
)
except FileNotFoundError:
pass
fileinfo = os.stat(file)
chan = self.session.scp_send64(
str(destination),
fileinfo.st_mode & 0o777,
fileinfo.st_size,
fileinfo.st_mtime,
fileinfo.st_atime,
)
with open(file, "rb") as local_fh:
for data in local_fh:
chan.write(data)
def get_file(
self, file: Union[Path, str], destination: Union[Path, str], force: bool = False
) -> None:
"""Get remote file from a remote location.
Args:
file (Union[Path, str]): File to get from a remote.
destination (Union[Path, str]): Local destination.
force (bool): Rewrite the file, if exists. Defaults to False
Raises:
IsADirectoryError: Raises if file is a directory.
FileNotFoundError:: Raises if remote file not found.
"""
if self.isdir(file):
raise IsADirectoryError(errno.EISDIR, os.strerror(errno.EISDIR), str(file))
chan = self.session.scp_recv2(
str(file),
)
mode = "wb+" if force else "xb+"
with open(destination, mode) as local_fh:
size = 0
while True:
siz, buf = chan[0].read()
if siz < 0:
print("error code:", siz)
chan[0].close()
break
size += siz
if size > chan[1].st_size:
local_fh.write(buf[: (chan[1].st_size - size)])
else:
local_fh.write(buf)
if size >= chan[1].st_size:
chan[0].close()
break
| [
"sshcon.exceptions.SshConSftpError",
"socket.socket",
"stat.filemode",
"ssh2.session.Session",
"os.stat",
"os.strerror",
"sshcon.exceptions.SshConError"
] | [((1945, 1994), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1958, 1994), False, 'import socket\n'), ((2060, 2069), 'ssh2.session.Session', 'Session', ([], {}), '()\n', (2067, 2069), False, 'from ssh2.session import Session\n'), ((7548, 7580), 'stat.filemode', 'stat.filemode', (['fstat.permissions'], {}), '(fstat.permissions)\n', (7561, 7580), False, 'import stat\n'), ((13858, 13871), 'os.stat', 'os.stat', (['file'], {}), '(file)\n', (13865, 13871), False, 'import os\n'), ((3079, 3114), 'sshcon.exceptions.SshConSftpError', 'SshConSftpError', (['"""lsstat"""', 'err_code'], {}), "('lsstat', err_code)\n", (3094, 3114), False, 'from sshcon.exceptions import SshConError, SshConSftpError\n'), ((8449, 8512), 'sshcon.exceptions.SshConError', 'SshConError', (['"""mount"""', 'f"""Folder {target} is already mountpoint."""'], {}), "('mount', f'Folder {target} is already mountpoint.')\n", (8460, 8512), False, 'from sshcon.exceptions import SshConError, SshConSftpError\n'), ((9628, 9653), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (9639, 9653), False, 'import os\n'), ((14847, 14872), 'os.strerror', 'os.strerror', (['errno.EISDIR'], {}), '(errno.EISDIR)\n', (14858, 14872), False, 'import os\n'), ((10802, 10827), 'os.strerror', 'os.strerror', (['errno.EISDIR'], {}), '(errno.EISDIR)\n', (10813, 10827), False, 'import os\n'), ((13436, 13461), 'os.strerror', 'os.strerror', (['errno.EISDIR'], {}), '(errno.EISDIR)\n', (13447, 13461), False, 'import os\n'), ((11316, 11341), 'os.strerror', 'os.strerror', (['errno.EEXIST'], {}), '(errno.EEXIST)\n', (11327, 11341), False, 'import os\n'), ((13717, 13742), 'os.strerror', 'os.strerror', (['errno.EEXIST'], {}), '(errno.EEXIST)\n', (13728, 13742), False, 'import os\n')] |
import os
import datetime
import pandas as pd
import csv
class Data(object):
def __init__(self):
self.timestamp = datetime.datetime.now()
self.price = None
self.traffic = None
def to_dict(self):
return {
'timestamp': self.timestamp,
'price': self.price,
'traffic': self.traffic,
}
@staticmethod
def headers():
return ['timestamp', 'price', 'traffic']
def to_data_frame(self):
index = pd.DatetimeIndex([self.timestamp])
df = pd.DataFrame(self.to_dict(), index=index, columns=self.headers())
df.index = df['timestamp']
df.drop('timestamp', 1, inplace=True)
return df
class Dal(object):
data_file = None
def __init__(self, data_file=None):
self.data_file = data_file
def save_data(self, data, **kwargs):
self._writer(data, **kwargs)
def read_data(self, **kwargs):
df = self._reader(**kwargs)
df['traffic'] = df['traffic'].apply(lambda x: x / 60.)
df = df.resample('T').mean()
df.interpolate(inplace=True)
return df
def does_exist(self):
if not self.data_file or not os.path.isfile(self.data_file):
return False
return True
class CsvHandler(Dal):
def _writer(self, data):
if not self.does_exist():
mode = 'w'
header = True
else:
mode = 'a'
header = False
data.to_data_frame().to_csv(self.data_file, mode=mode, header=header)
def _reader(self):
if not self.does_exist():
return
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H:%M:%S.%f')
df = pd.read_csv(self.data_file, parse_dates='timestamp', index_col='timestamp', date_parser=dateparse)
return df
class HDF5Handler(Dal):
def _writer(self, data):
if not self.does_exist():
mode = 'w'
append = False
else:
mode = 'a'
append = True
data.to_data_frame().to_hdf(self.data_file, key='data', format='table', append=append, mode=mode)
def _reader(self):
if not self.does_exist():
return
return pd.read_hdf(self.data_file, 'data')
| [
"pandas.read_csv",
"pandas.DatetimeIndex",
"pandas.datetime.strptime",
"datetime.datetime.now",
"os.path.isfile",
"pandas.read_hdf"
] | [((129, 152), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (150, 152), False, 'import datetime\n'), ((502, 536), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[self.timestamp]'], {}), '([self.timestamp])\n', (518, 536), True, 'import pandas as pd\n'), ((1735, 1837), 'pandas.read_csv', 'pd.read_csv', (['self.data_file'], {'parse_dates': '"""timestamp"""', 'index_col': '"""timestamp"""', 'date_parser': 'dateparse'}), "(self.data_file, parse_dates='timestamp', index_col='timestamp',\n date_parser=dateparse)\n", (1746, 1837), True, 'import pandas as pd\n'), ((2252, 2287), 'pandas.read_hdf', 'pd.read_hdf', (['self.data_file', '"""data"""'], {}), "(self.data_file, 'data')\n", (2263, 2287), True, 'import pandas as pd\n'), ((1670, 1721), 'pandas.datetime.strptime', 'pd.datetime.strptime', (['dates', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(dates, '%Y-%m-%d %H:%M:%S.%f')\n", (1690, 1721), True, 'import pandas as pd\n'), ((1203, 1233), 'os.path.isfile', 'os.path.isfile', (['self.data_file'], {}), '(self.data_file)\n', (1217, 1233), False, 'import os\n')] |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'gdax-api',
version = '1.2.0.dev1',
description='GDAX public and authenticated client.',
long_description=long_description,
url='https://github.com/Jaewan-Yun/gdax-api-python',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
keywords='gdax',
packages=find_packages(exclude=['tests']),
install_requires=['requests']
) | [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((109, 131), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'from os import path\n'), ((144, 172), 'os.path.join', 'path.join', (['here', '"""README.md"""'], {}), "(here, 'README.md')\n", (153, 172), False, 'from os import path\n'), ((511, 543), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (524, 543), False, 'from setuptools import setup, find_packages\n')] |
"""
test factory pattern
"""
import pytest
from dpatterns_python.creational import factory
def test_main():
"""
call the main
:return:
"""
factory.main()
@pytest.mark.parametrize('animal', ['dog', 'cat'])
def test_get_pet(animal):
"""
test one pet at at time
:param animal:
:return:
"""
pet_animal = factory.get_pet(animal)
print(pet_animal.speak())
| [
"pytest.mark.parametrize",
"dpatterns_python.creational.factory.get_pet",
"dpatterns_python.creational.factory.main"
] | [((181, 230), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""animal"""', "['dog', 'cat']"], {}), "('animal', ['dog', 'cat'])\n", (204, 230), False, 'import pytest\n'), ((163, 177), 'dpatterns_python.creational.factory.main', 'factory.main', ([], {}), '()\n', (175, 177), False, 'from dpatterns_python.creational import factory\n'), ((350, 373), 'dpatterns_python.creational.factory.get_pet', 'factory.get_pet', (['animal'], {}), '(animal)\n', (365, 373), False, 'from dpatterns_python.creational import factory\n')] |
from django.contrib import admin
from .models import Meldung, Kommentar
class KommentarInline(admin.TabularInline):
model = Kommentar
extra = 1
class MeldungAdmin(admin.ModelAdmin):
inlines = [KommentarInline]
admin.site.register(Meldung, MeldungAdmin)
| [
"django.contrib.admin.site.register"
] | [((233, 275), 'django.contrib.admin.site.register', 'admin.site.register', (['Meldung', 'MeldungAdmin'], {}), '(Meldung, MeldungAdmin)\n', (252, 275), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python3
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torchvision.models as models
import os
import sys
import math
import numpy as np
import cv2
data = dset.CIFAR10(root='cifar', train=True, download=True, transform=transforms.ToTensor()).train_data
data = data.astype(np.float32)
img = data.mean(axis=0)
cv2.imwrite("/home/ANT.AMAZON.COM/ofririps/workspace/PipeCNN-DL/model/mean_data.png", img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
| [
"cv2.imwrite",
"torchvision.transforms.ToTensor"
] | [((559, 692), 'cv2.imwrite', 'cv2.imwrite', (['"""/home/ANT.AMAZON.COM/ofririps/workspace/PipeCNN-DL/model/mean_data.png"""', 'img', '[cv2.IMWRITE_PNG_COMPRESSION, 0]'], {}), "(\n '/home/ANT.AMAZON.COM/ofririps/workspace/PipeCNN-DL/model/mean_data.png',\n img, [cv2.IMWRITE_PNG_COMPRESSION, 0])\n", (570, 692), False, 'import cv2\n'), ((469, 490), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (488, 490), True, 'import torchvision.transforms as transforms\n')] |
import collections
from contextlib import contextmanager
import concurrent.futures as futures
import enum
import functools
import inspect
import importlib
import gettext
import multiprocessing
import os
from pathlib import Path
import pickle
import shutil
import signal
import sys
import tempfile
import traceback
import attr
import lib50
from . import internal, _exceptions, __version__
from ._api import log, Failure, _copy, _log, _data
_check_names = []
@attr.s(slots=True)
class CheckResult:
"""Record returned by each check"""
name = attr.ib()
description = attr.ib()
passed = attr.ib(default=None)
log = attr.ib(default=attr.Factory(list))
cause = attr.ib(default=None)
data = attr.ib(default=attr.Factory(dict))
dependency = attr.ib(default=None)
@classmethod
def from_check(cls, check, *args, **kwargs):
"""Create a check_result given a check function, automatically recording the name,
the dependency, and the (translated) description.
"""
return cls(name=check.__name__, description=_(check.__doc__ if check.__doc__ else check.__name__.replace("_", " ")),
dependency=check._check_dependency.__name__ if check._check_dependency else None,
*args,
**kwargs)
@classmethod
def from_dict(cls, d):
"""Create a CheckResult given a dict. Dict must contain at least the fields in the CheckResult.
Throws a KeyError if not."""
return cls(**{field.name: d[field.name] for field in attr.fields(cls)})
class Timeout(Failure):
def __init__(self, seconds):
super().__init__(rationale=_("check timed out after {} seconds").format(seconds))
@contextmanager
def _timeout(seconds):
"""Context manager that runs code block until timeout is reached.
Example usage::
try:
with _timeout(10):
do_stuff()
except Timeout:
print("do_stuff timed out")
"""
def _handle_timeout(*args):
raise Timeout(seconds)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
def check(dependency=None, timeout=60, max_log_lines=100):
"""Mark function as a check.
:param dependency: the check that this check depends on
:type dependency: function
:param timeout: maximum number of seconds the check can run
:type timeout: int
:param max_log_lines: maximum number of lines that can appear in the log
:type max_log_lines: int
When a check depends on another, the former will only run if the latter passes.
Additionally, the dependent check will inherit the filesystem of its dependency.
This is particularly useful when writing e.g., a ``compiles`` check that compiles a
student's program (and checks that it compiled successfully). Any checks that run the
student's program will logically depend on this check, and since they inherit the
resulting filesystem of the check, they will immidiately have access to the compiled
program without needing to recompile.
Example usage::
@check50.check() # Mark 'exists' as a check
def exists():
\"""hello.c exists\"""
check50.exists("hello.c")
@check50.check(exists) # Mark 'compiles' as a check that depends on 'exists'
def compiles():
\"""hello.c compiles\"""
check50.c.compile("hello.c")
@check50.check(compiles)
def prints_hello():
\"""prints "Hello, world!\\\\n\"""
# Since 'prints_hello', depends on 'compiles' it inherits the compiled binary
check50.run("./hello").stdout("[Hh]ello, world!?\\n", "hello, world\\n").exit()
"""
def decorator(check):
# Modules are evaluated from the top of the file down, so _check_names will
# contain the names of the checks in the order in which they are declared
_check_names.append(check.__name__)
check._check_dependency = dependency
@functools.wraps(check)
def wrapper(run_root_dir, dependency_state):
# Result template
result = CheckResult.from_check(check)
# Any shared (returned) state
state = None
try:
# Setup check environment, copying disk state from dependency
internal.run_dir = run_root_dir / check.__name__
src_dir = run_root_dir / (dependency.__name__ if dependency else "-")
shutil.copytree(src_dir, internal.run_dir)
os.chdir(internal.run_dir)
# Run registered functions before/after running check and set timeout
with internal.register, _timeout(seconds=timeout):
args = (dependency_state,) if inspect.getfullargspec(check).args else ()
state = check(*args)
except Failure as e:
result.passed = False
result.cause = e.payload
except BaseException as e:
result.passed = None
result.cause = {"rationale": _("check50 ran into an error while running checks!"),
"error": {
"type": type(e).__name__,
"value": str(e),
"traceback": traceback.format_tb(e.__traceback__),
"data" : e.payload if hasattr(e, "payload") else {}
}}
else:
result.passed = True
finally:
result.log = _log if len(_log) <= max_log_lines else ["..."] + _log[-max_log_lines:]
result.data = _data
return result, state
return wrapper
return decorator
class CheckRunner:
def __init__(self, checks_path, included_files):
self.checks_path = checks_path
self.included_files = included_files
def run(self, targets=None):
"""
Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module
targets allows you to limit which checks run. If targets is false-y, all checks are run.
"""
graph = self.build_subgraph(targets) if targets else self.dependency_graph
# Ensure that dictionary is ordered by check declaration order (via self.check_names)
# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.
results = {name: None for name in self.check_names}
try:
max_workers = int(os.environ.get("CHECK50_WORKERS"))
except (ValueError, TypeError):
max_workers = None
with futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
# Start all checks that have no dependencies
not_done = set(executor.submit(run_check(name, self.checks_spec))
for name in graph[None])
not_passed = []
while not_done:
done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED)
for future in done:
# Get result from completed check
result, state = future.result()
results[result.name] = result
if result.passed:
# Dispatch dependent checks
for child_name in graph[result.name]:
not_done.add(executor.submit(
run_check(child_name, self.checks_spec, state)))
else:
not_passed.append(result.name)
for name in not_passed:
self._skip_children(name, results)
# Don't include checks we don't have results for (i.e. in the case that targets != None) in the list.
return list(filter(None, results.values()))
def build_subgraph(self, targets):
"""
Build minimal subgraph of self.dependency_graph that contains each check in targets
"""
checks = self.dependencies_of(targets)
subgraph = collections.defaultdict(set)
for dep, children in self.dependency_graph.items():
# If dep is not a dependency of any target,
# none of its children will be either, may as well skip.
if dep is not None and dep not in checks:
continue
for child in children:
if child in checks:
subgraph[dep].add(child)
return subgraph
def dependencies_of(self, targets):
"""Get all unique dependencies of the targetted checks (tartgets)."""
inverse_graph = self._create_inverse_dependency_graph()
deps = set()
for target in targets:
if target not in inverse_graph:
raise _exceptions.Error(_("Unknown check: {}").format(e.args[0]))
curr_check = target
while curr_check is not None and curr_check not in deps:
deps.add(curr_check)
curr_check = inverse_graph[curr_check]
return deps
def _create_inverse_dependency_graph(self):
"""Build an inverse dependency map, from a check to its dependency."""
inverse_dependency_graph = {}
for check_name, dependents in self.dependency_graph.items():
for dependent_name in dependents:
inverse_dependency_graph[dependent_name] = check_name
return inverse_dependency_graph
def _skip_children(self, check_name, results):
"""
Recursively skip the children of check_name (presumably because check_name
did not pass).
"""
for name in self.dependency_graph[check_name]:
if results[name] is None:
results[name] = CheckResult(name=name, description=self.check_descriptions[name],
passed=None,
dependency=check_name,
cause={"rationale": _("can't check until a frown turns upside down")})
self._skip_children(name, results)
def __enter__(self):
# Remember the student's directory
internal.student_dir = Path.cwd()
# Set up a temp dir for the checks
self._working_area_manager = lib50.working_area(self.included_files, name='-')
internal.run_root_dir = self._working_area_manager.__enter__().parent
# Change current working dir to the temp dir
self._cd_manager = lib50.cd(internal.run_root_dir)
self._cd_manager.__enter__()
# TODO: Naming the module "checks" is arbitray. Better name?
self.checks_spec = importlib.util.spec_from_file_location("checks", self.checks_path)
# Clear check_names, import module, then save check_names. Not thread safe.
# Ideally, there'd be a better way to extract declaration order than @check mutating global state,
# but there are a lot of subtleties with using `inspect` or similar here
_check_names.clear()
check_module = importlib.util.module_from_spec(self.checks_spec)
self.checks_spec.loader.exec_module(check_module)
self.check_names = _check_names.copy()
_check_names.clear()
# Grab all checks from the module
checks = inspect.getmembers(check_module, lambda f: hasattr(f, "_check_dependency"))
# Map each check to tuples containing the names of the checks that depend on it
self.dependency_graph = collections.defaultdict(set)
for name, check in checks:
dependency = None if check._check_dependency is None else check._check_dependency.__name__
self.dependency_graph[dependency].add(name)
# Map each check name to its description
self.check_descriptions = {name: check.__doc__ for name, check in checks}
return self
def __exit__(self, type, value, tb):
# Destroy the temporary directory for the checks
self._working_area_manager.__exit__(type, value, tb)
# cd back to the directory check50 was called from
self._cd_manager.__exit__(type, value, tb)
class run_check:
"""
Check job that runs in a separate process.
This is only a class to get around the fact that `pickle` can't serialize closures.
This class is essentially a function that reimports the check module and runs the check.
"""
# All attributes shared between check50's main process and each checks' process
# Required for "spawn": https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
CROSS_PROCESS_ATTRIBUTES = (
"internal.check_dir",
"internal.slug",
"internal.student_dir",
"internal.run_root_dir",
"sys.excepthook",
"__version__"
)
def __init__(self, check_name, spec, state=None):
self.check_name = check_name
self.spec = spec
self.state = state
self._store_attributes()
def _store_attributes(self):
""""
Store all values from the attributes from run_check.CROSS_PROCESS_ATTRIBUTES on this object,
in case multiprocessing is using spawn as its starting method.
"""
# Attributes only need to be passed explicitely to child processes when using spawn
if multiprocessing.get_start_method() != "spawn":
return
self._attribute_values = [eval(name) for name in self.CROSS_PROCESS_ATTRIBUTES]
# Replace all unpickle-able values with nothing, assuming they've been set externally,
# and will be set again upon re-importing the checks module
# https://github.com/cs50/check50/issues/235
for i, value in enumerate(self._attribute_values):
try:
pickle.dumps(value)
except (pickle.PicklingError, AttributeError):
self._attribute_values[i] = None
self._attribute_values = tuple(self._attribute_values)
def _set_attributes(self):
"""
If the parent process set any values in self._attribute_values,
restore them in the child process.
"""
if not hasattr(self, "_attribute_values"):
return
for name, val in zip(self.CROSS_PROCESS_ATTRIBUTES, self._attribute_values):
self._set_attribute(name, val)
@staticmethod
def _set_attribute(name, value):
"""Get an attribute from a name in global scope and set its value."""
parts = name.split(".")
obj = sys.modules[__name__]
for part in parts[:-1]:
obj = getattr(obj, part)
setattr(obj, parts[-1], value)
def __call__(self):
# Restore any attributes from the parent process
self._set_attributes()
# Create the checks module
mod = importlib.util.module_from_spec(self.spec)
# Execute (effectively import) the checks module
self.spec.loader.exec_module(mod)
# Run just the check named self.check_name
internal.check_running = True
try:
return getattr(mod, self.check_name)(internal.run_root_dir, self.state)
finally:
internal.check_running = False
| [
"traceback.format_tb",
"pickle.dumps",
"importlib.util.spec_from_file_location",
"lib50.working_area",
"inspect.getfullargspec",
"signal.alarm",
"functools.wraps",
"attr.Factory",
"concurrent.futures.wait",
"multiprocessing.get_start_method",
"pathlib.Path.cwd",
"attr.fields",
"concurrent.futures.ProcessPoolExecutor",
"signal.signal",
"attr.s",
"os.environ.get",
"shutil.copytree",
"os.chdir",
"collections.defaultdict",
"importlib.util.module_from_spec",
"lib50.cd",
"attr.ib"
] | [((463, 481), 'attr.s', 'attr.s', ([], {'slots': '(True)'}), '(slots=True)\n', (469, 481), False, 'import attr\n'), ((552, 561), 'attr.ib', 'attr.ib', ([], {}), '()\n', (559, 561), False, 'import attr\n'), ((580, 589), 'attr.ib', 'attr.ib', ([], {}), '()\n', (587, 589), False, 'import attr\n'), ((603, 624), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (610, 624), False, 'import attr\n'), ((683, 704), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (690, 704), False, 'import attr\n'), ((769, 790), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (776, 790), False, 'import attr\n'), ((2061, 2107), 'signal.signal', 'signal.signal', (['signal.SIGALRM', '_handle_timeout'], {}), '(signal.SIGALRM, _handle_timeout)\n', (2074, 2107), False, 'import signal\n'), ((2112, 2133), 'signal.alarm', 'signal.alarm', (['seconds'], {}), '(seconds)\n', (2124, 2133), False, 'import signal\n'), ((2178, 2193), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (2190, 2193), False, 'import signal\n'), ((2202, 2247), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'signal.SIG_DFL'], {}), '(signal.SIGALRM, signal.SIG_DFL)\n', (2215, 2247), False, 'import signal\n'), ((4141, 4163), 'functools.wraps', 'functools.wraps', (['check'], {}), '(check)\n', (4156, 4163), False, 'import functools\n'), ((8349, 8377), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (8372, 8377), False, 'import collections\n'), ((10514, 10524), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (10522, 10524), False, 'from pathlib import Path\n'), ((10606, 10655), 'lib50.working_area', 'lib50.working_area', (['self.included_files'], {'name': '"""-"""'}), "(self.included_files, name='-')\n", (10624, 10655), False, 'import lib50\n'), ((10815, 10846), 'lib50.cd', 'lib50.cd', (['internal.run_root_dir'], {}), '(internal.run_root_dir)\n', (10823, 10846), False, 'import lib50\n'), ((10981, 11047), 'importlib.util.spec_from_file_location', 'importlib.util.spec_from_file_location', (['"""checks"""', 'self.checks_path'], {}), "('checks', self.checks_path)\n", (11019, 11047), False, 'import importlib\n'), ((11373, 11422), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['self.checks_spec'], {}), '(self.checks_spec)\n', (11404, 11422), False, 'import importlib\n'), ((11814, 11842), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (11837, 11842), False, 'import collections\n'), ((15167, 15209), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['self.spec'], {}), '(self.spec)\n', (15198, 15209), False, 'import importlib\n'), ((651, 669), 'attr.Factory', 'attr.Factory', (['list'], {}), '(list)\n', (663, 669), False, 'import attr\n'), ((732, 750), 'attr.Factory', 'attr.Factory', (['dict'], {}), '(dict)\n', (744, 750), False, 'import attr\n'), ((6916, 6968), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': 'max_workers'}), '(max_workers=max_workers)\n', (6943, 6968), True, 'import concurrent.futures as futures\n'), ((13640, 13674), 'multiprocessing.get_start_method', 'multiprocessing.get_start_method', ([], {}), '()\n', (13672, 13674), False, 'import multiprocessing\n'), ((4628, 4670), 'shutil.copytree', 'shutil.copytree', (['src_dir', 'internal.run_dir'], {}), '(src_dir, internal.run_dir)\n', (4643, 4670), False, 'import shutil\n'), ((4687, 4713), 'os.chdir', 'os.chdir', (['internal.run_dir'], {}), '(internal.run_dir)\n', (4695, 4713), False, 'import os\n'), ((6796, 6829), 'os.environ.get', 'os.environ.get', (['"""CHECK50_WORKERS"""'], {}), "('CHECK50_WORKERS')\n", (6810, 6829), False, 'import os\n'), ((7259, 7318), 'concurrent.futures.wait', 'futures.wait', (['not_done'], {'return_when': 'futures.FIRST_COMPLETED'}), '(not_done, return_when=futures.FIRST_COMPLETED)\n', (7271, 7318), True, 'import concurrent.futures as futures\n'), ((14111, 14130), 'pickle.dumps', 'pickle.dumps', (['value'], {}), '(value)\n', (14123, 14130), False, 'import pickle\n'), ((1547, 1563), 'attr.fields', 'attr.fields', (['cls'], {}), '(cls)\n', (1558, 1563), False, 'import attr\n'), ((4918, 4947), 'inspect.getfullargspec', 'inspect.getfullargspec', (['check'], {}), '(check)\n', (4940, 4947), False, 'import inspect\n'), ((5496, 5532), 'traceback.format_tb', 'traceback.format_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (5515, 5532), False, 'import traceback\n')] |
# encoding: utf-8
# pylint: disable=invalid-name,wrong-import-position
"""
Extensions setup
================
Extensions provide access to common resources of the application.
Please, put new extension instantiations and initializations here.
"""
from flask_cors import CORS
cross_origin_resource_sharing = CORS()
from sqlalchemy_utils import force_auto_coercion, force_instant_defaults
force_auto_coercion()
force_instant_defaults()
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(session_options={'autocommit': True})
from flask_marshmallow import Marshmallow
marshmallow = Marshmallow()
from . import api
import logging.config
import sys
import os
import settings
import modules
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from flask import Flask
from flask_cors import CORS
from extensions.api import api
from extensions import db
log_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'logging.conf')
logging.config.fileConfig(log_file_path)
log = logging.getLogger(__name__)
logging.getLogger('flask_cors').level = logging.DEBUG
# methods
def configure_app(flask_app):
flask_app.config['DEBUG'] = settings.FLASK_DEBUG
# flask_app.config['SERVER_NAME'] = settings.FLASK_SERVER_NAME
flask_app.config['SECRET_KEY'] = settings.FLASK_SECRET_KEY
flask_app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS
flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION
flask_app.config['RESTPLUS_VALIDATE'] = settings.RESTPLUS_VALIDATE
flask_app.config['RESTPLUS_MASK_SWAGGER'] = settings.RESTPLUS_MASK_SWAGGER
flask_app.config['ERROR_404_HELP'] = settings.RESTPLUS_ERROR_404_HELP
def initialize_app(flask_app):
configure_app(flask_app)
for extension in (
cross_origin_resource_sharing,
db,
marshmallow,
api,
):
extension.init_app(flask_app)
flask_app.extensions['migrate'] = AlembicDatabaseMigrationConfig(db, compare_type=True)
def create_app():
"""
Create app instance
"""
app = Flask(__name__)
initialize_app(app)
CORS(app, resources={r"/api/*": {"origins": "http://hotmaps.hevs.ch"}})
modules.init_app(app)
return app
class AlembicDatabaseMigrationConfig(object):
"""
Helper config holder that provides missing functions of Flask-Alembic
package since we use custom invoke tasks instead.
"""
def __init__(self, database, directory='migrations', **kwargs):
self.db = database
self.directory = directory
self.configure_args = kwargs | [
"flask_cors.CORS",
"flask.Flask",
"flask_marshmallow.Marshmallow",
"modules.init_app",
"os.path.dirname",
"sqlalchemy_utils.force_instant_defaults",
"sqlalchemy_utils.force_auto_coercion",
"os.path.abspath",
"flask_sqlalchemy.SQLAlchemy"
] | [((309, 315), 'flask_cors.CORS', 'CORS', ([], {}), '()\n', (313, 315), False, 'from flask_cors import CORS\n'), ((390, 411), 'sqlalchemy_utils.force_auto_coercion', 'force_auto_coercion', ([], {}), '()\n', (409, 411), False, 'from sqlalchemy_utils import force_auto_coercion, force_instant_defaults\n'), ((412, 436), 'sqlalchemy_utils.force_instant_defaults', 'force_instant_defaults', ([], {}), '()\n', (434, 436), False, 'from sqlalchemy_utils import force_auto_coercion, force_instant_defaults\n'), ((483, 531), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {'session_options': "{'autocommit': True}"}), "(session_options={'autocommit': True})\n", (493, 531), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((589, 602), 'flask_marshmallow.Marshmallow', 'Marshmallow', ([], {}), '()\n', (600, 602), False, 'from flask_marshmallow import Marshmallow\n'), ((2215, 2230), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2220, 2230), False, 'from flask import Flask\n'), ((2261, 2331), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/api/*': {'origins': 'http://hotmaps.hevs.ch'}}"}), "(app, resources={'/api/*': {'origins': 'http://hotmaps.hevs.ch'}})\n", (2265, 2331), False, 'from flask_cors import CORS\n'), ((2338, 2359), 'modules.init_app', 'modules.init_app', (['app'], {}), '(app)\n', (2354, 2359), False, 'import modules\n'), ((727, 752), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (742, 752), False, 'import os\n'), ((917, 942), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (932, 942), False, 'import os\n')] |
import pandas as pd
from pandas import DataFrame
import os,sys,inspect,pickle
import numpy as np
import itertools
from sklearn.metrics import mean_squared_error
from sklearn.externals import joblib
from math import sqrt
from statsmodels.tsa.api import VAR
from statsmodels.tsa.stattools import adfuller,acf
from statsmodels.tools.eval_measures import rmse, aic
from statsmodels.tsa.statespace.varmax import VARMAX
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import properties as pr
def VAR_Model(modeldata):
model = VAR(modeldata)
res = {}
AIC=[]
for i in range(100):
result = model.fit(i)
aic = result.aic
AIC.append(aic)
if (aic <= pr.AICvalue_limit) and (aic >= -pr.AICvalue_limit):
break
lag_order = i-1
varmodel = model.fit(lag_order)
residuals = DataFrame(varmodel.resid)
rmean = abs(residuals.mean())
#print("Residual Error = {}".format(rmean[0]))
res.update({'Residual Mean':rmean,'Lag Order':lag_order})
return varmodel,res
def VARMAXgridsearch(modeldata,cfg_list):
results = []
for index in range(len(cfg_list)):
order = cfg_list[index]
# define model
temp_dict = {}
varmaxmodel = VARMAX(modeldata, order = order).fit()
residuals = DataFrame(varmaxmodel.resid)
mean_error = abs(residuals.mean())
temp_dict.update({'order':order,'model':varmaxmodel,'meanError':mean_error[0]})
#print("\n {}".format(temp_dict))
results.append(temp_dict)
return results
def config_param(parameter):
"""Find THe various configuration of models p and q for VARMAX"""
p = parameter['p']
q = parameter['q']
pq = list(itertools.product(p,q)) #Generate all different combinations of p, q and q triplets
params = [(x[0], x[1]) for x in pq]
return params
def VAR_forecasting(Data,Time_Column,Target_Column):
#For VAR
finalVARparam = {}
fmodel = {}
VARModel,fresult = VAR_Model(Data)
fmodel.update({'VAR Model':VARModel})
finalVARparam.update({'VAR':fresult})
joblib.dump(fmodel['VAR Model'],pr.path_var)
#For VARMAX
config = config_param(pr.param)
scores = VARMAXgridsearch(Data,config)
min_err_param = min(scores, key=lambda x:x['meanError'])
fmodel.update({'VARMAX Model':min_err_param.pop('model')})
finalVARparam.update({'VARMAX':min_err_param})
joblib.dump(fmodel['VARMAX Model'],pr.path_model)
return finalVARparam | [
"sys.path.insert",
"statsmodels.tsa.api.VAR",
"inspect.currentframe",
"statsmodels.tsa.statespace.varmax.VARMAX",
"itertools.product",
"os.path.dirname",
"sklearn.externals.joblib.dump",
"pandas.DataFrame"
] | [((514, 541), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (529, 541), False, 'import os, sys, inspect, pickle\n'), ((542, 571), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (557, 571), False, 'import os, sys, inspect, pickle\n'), ((635, 649), 'statsmodels.tsa.api.VAR', 'VAR', (['modeldata'], {}), '(modeldata)\n', (638, 649), False, 'from statsmodels.tsa.api import VAR\n'), ((940, 965), 'pandas.DataFrame', 'DataFrame', (['varmodel.resid'], {}), '(varmodel.resid)\n', (949, 965), False, 'from pandas import DataFrame\n'), ((2190, 2235), 'sklearn.externals.joblib.dump', 'joblib.dump', (["fmodel['VAR Model']", 'pr.path_var'], {}), "(fmodel['VAR Model'], pr.path_var)\n", (2201, 2235), False, 'from sklearn.externals import joblib\n'), ((2515, 2565), 'sklearn.externals.joblib.dump', 'joblib.dump', (["fmodel['VARMAX Model']", 'pr.path_model'], {}), "(fmodel['VARMAX Model'], pr.path_model)\n", (2526, 2565), False, 'from sklearn.externals import joblib\n'), ((1388, 1416), 'pandas.DataFrame', 'DataFrame', (['varmaxmodel.resid'], {}), '(varmaxmodel.resid)\n', (1397, 1416), False, 'from pandas import DataFrame\n'), ((1809, 1832), 'itertools.product', 'itertools.product', (['p', 'q'], {}), '(p, q)\n', (1826, 1832), False, 'import itertools\n'), ((476, 498), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (496, 498), False, 'import os, sys, inspect, pickle\n'), ((1329, 1359), 'statsmodels.tsa.statespace.varmax.VARMAX', 'VARMAX', (['modeldata'], {'order': 'order'}), '(modeldata, order=order)\n', (1335, 1359), False, 'from statsmodels.tsa.statespace.varmax import VARMAX\n')] |
import numpy as np
EMPTY = 0
RIGHT = 1
DOWN = 2
def read_world(fname: str) -> np.ndarray:
r: list[list[int]] = []
with open(fname) as f:
for line in f:
options={'>': RIGHT, 'v': DOWN, '.': EMPTY}
v = [options[x] for x in line.strip()]
r.append(v)
return np.array(r, dtype=np.uint8)
def simulate_1_tick(world: np.ndarray) -> tuple[np.ndarray, bool]:
Y,X = world.shape
# we don't need to copy if we go from left to right...
right_result = np.zeros(world.shape, dtype=np.uint8) # 0=EMPTY
end_of_the_world = True
for y in range(Y):
for x in range(X):
w = world[y, x]
if w == RIGHT and world[y, (x+1) % X] == EMPTY:
right_result[y, (x+1) % X] = w
end_of_the_world = False
elif w != EMPTY:
right_result[y, x] = w
down_result = np.zeros(world.shape, dtype=np.uint8)
for y in range(Y):
for x in range(X):
w = right_result[y, x]
if w == DOWN and right_result[(y+1) % Y, x] == EMPTY:
down_result[(y+1) % Y, x] = w
end_of_the_world = False
elif w != EMPTY:
down_result[y,x] = w
return down_result, end_of_the_world
#
# MAIN
#
world = read_world('data.txt')
for i in range(10000):
world, end_of_the_world = simulate_1_tick(world)
if end_of_the_world:
print('Iterations:', i+1)
break
else:
print("FAILED!")
| [
"numpy.array",
"numpy.zeros"
] | [((312, 339), 'numpy.array', 'np.array', (['r'], {'dtype': 'np.uint8'}), '(r, dtype=np.uint8)\n', (320, 339), True, 'import numpy as np\n'), ((508, 545), 'numpy.zeros', 'np.zeros', (['world.shape'], {'dtype': 'np.uint8'}), '(world.shape, dtype=np.uint8)\n', (516, 545), True, 'import numpy as np\n'), ((896, 933), 'numpy.zeros', 'np.zeros', (['world.shape'], {'dtype': 'np.uint8'}), '(world.shape, dtype=np.uint8)\n', (904, 933), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Tests for :module:`pagure.lib.encoding_utils`.
"""
from __future__ import unicode_literals, absolute_import
import os
import unittest
import sys
cchardet = None
try:
import cchardet
except ImportError:
pass
import chardet
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
from pagure.lib import encoding_utils
class TestGuessEncoding(unittest.TestCase):
def test_guess_encoding_ascii(self):
"""
Assert when ascii-only data is provided ascii is the guessed encoding.
"""
data = "Twas bryllyg, and the slythy toves did gyre and gymble"
result = encoding_utils.guess_encoding(data.encode("ascii"))
if cchardet is not None:
self.assertEqual(result, "ASCII")
else:
self.assertEqual(result, "ascii")
def test_guess_encoding_favor_utf_8(self):
"""
Test that strings that could be UTF-8 or ISO-8859-* result in UTF-8.
python-chardet-3.0.4-2.fc27.noarch detects it as ISO-8859-9
python-chardet-2.2.1-1.el7_1.noarch detects it as ISO-8859-2
"""
data = "Šabata".encode("utf-8")
result = encoding_utils.guess_encoding(data)
chardet_result = chardet.detect(data)
if cchardet:
self.assertEqual(result, "WINDOWS-1250")
else:
self.assertEqual(result, "utf-8")
if chardet.__version__[0] in ("3", "4"):
self.assertEqual(chardet_result["encoding"], "ISO-8859-9")
else:
self.assertEqual(chardet_result["encoding"], "ISO-8859-2")
def test_guess_encoding_no_data(self):
""" Test encoding_utils.guess_encoding() with an empty string """
result = encoding_utils.guess_encoding("".encode("utf-8"))
self.assertEqual(result, "ascii")
class TestGuessEncodings(unittest.TestCase):
def test_guess_encodings(self):
""" Test the encoding_utils.guess_encodings() method. """
data = "Šabata".encode("utf-8")
result = encoding_utils.guess_encodings(data)
chardet_result = chardet.detect(data)
if cchardet is not None:
# The last one in the list (which apparently has only one)
self.assertEqual(result[-1].encoding, "WINDOWS-1250")
else:
if chardet.__version__[0] in ("3", "4"):
# The first three have different confidence values
expexted_list = ["utf-8", "ISO-8859-9", "ISO-8859-1"]
# This is the one with the least confidence
self.assertEqual(result[-1].encoding, "windows-1255")
self.assertListEqual(
[encoding.encoding for encoding in result][:3],
expexted_list,
)
# The values in the middle of the list all have the same confidence
# value and can't be sorted reliably: use sets.
expected_list = sorted(
[
"utf-8",
"ISO-8859-9",
"ISO-8859-1",
"MacCyrillic",
"IBM866",
"TIS-620",
"EUC-JP",
"EUC-KR",
"GB2312",
"KOI8-R",
"Big5",
"IBM855",
"ISO-8859-7",
"SHIFT_JIS",
"windows-1253",
"CP949",
"EUC-TW",
"ISO-8859-5",
"windows-1251",
"windows-1255",
]
)
self.assertListEqual(
sorted(set([encoding.encoding for encoding in result])),
expected_list,
)
self.assertEqual(chardet_result["encoding"], "ISO-8859-9")
else:
self.assertListEqual(
[encoding.encoding for encoding in result],
["utf-8", "ISO-8859-2", "windows-1252"],
)
self.assertEqual(chardet_result["encoding"], "ISO-8859-2")
def test_guess_encodings_no_data(self):
""" Test encoding_utils.guess_encodings() with an emtpy string """
result = encoding_utils.guess_encodings("".encode("utf-8"))
self.assertEqual([encoding.encoding for encoding in result], ["ascii"])
class TestDecode(unittest.TestCase):
def test_decode(self):
""" Test encoding_utils.decode() """
data = (
"This is a little longer text for testing Šabata's encoding. "
"With more characters, let's see if it become more clear as to what "
"encoding should be used for this. We'll include from french words "
"in there for non-ascii: français, gagné!"
)
self.assertEqual(data, encoding_utils.decode(data.encode("utf-8")))
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"pagure.lib.encoding_utils.guess_encodings",
"pagure.lib.encoding_utils.guess_encoding",
"chardet.detect",
"unittest.main",
"os.path.abspath"
] | [((5116, 5142), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5129, 5142), False, 'import unittest\n'), ((1205, 1240), 'pagure.lib.encoding_utils.guess_encoding', 'encoding_utils.guess_encoding', (['data'], {}), '(data)\n', (1234, 1240), False, 'from pagure.lib import encoding_utils\n'), ((1266, 1286), 'chardet.detect', 'chardet.detect', (['data'], {}), '(data)\n', (1280, 1286), False, 'import chardet\n'), ((2075, 2111), 'pagure.lib.encoding_utils.guess_encodings', 'encoding_utils.guess_encodings', (['data'], {}), '(data)\n', (2105, 2111), False, 'from pagure.lib import encoding_utils\n'), ((2137, 2157), 'chardet.detect', 'chardet.detect', (['data'], {}), '(data)\n', (2151, 2157), False, 'import chardet\n'), ((316, 341), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (331, 341), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 16:38:27 2019
@author: <NAME>
"""
#v1.01
import cv2
import numpy as np
from xml.dom import minidom
import glob
import os
import pandas as pd
from pathlib import Path
import sys
import av
class ROI:
xori = -112000
yori = -77571
rotation = 50
pixel_size = 0.11
dx_map = 600
dy_map = 300
def __init__(self, *initial_data, **kwargs):
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key, value in kwargs.items():
setattr(self, key, value)
self.dx = int(self.dx_map / self.pixel_size)
self.dy = int(self.dy_map / self.pixel_size)
def xyz2XYZ(self, xyz): #from local xyz to global coordinates XYZ
z = np.c_[xyz[:,2]]
ones = np.ones(shape=(len(xyz), 1))
xy1 = np.hstack([xyz[:,:2], ones])
atm = self.affine_trans_mat()
XYZ = atm.dot(xy1.T).T
XYZ = np.hstack([XYZ, z * self.pixel_size])
XYZ[:,0] = XYZ[:,0] + self.xori
XYZ[:,1] = XYZ[:,1] + self.yori
return XYZ
def XYZ2xyz(self, points): #from global XYZ to local coordinates xyz
XYZ = points.copy()
XYZ[:,0] = XYZ[:,0] - self.xori
XYZ[:,1] = XYZ[:,1] - self.yori
Z = np.c_[XYZ[:,2]] / self.pixel_size
ones = np.ones(shape=(len(XYZ), 1))
XY1 = np.hstack([XYZ[:,:2], ones])
rot_mat = cv2.getRotationMatrix2D((0, 0), self.rotation, 1 / self.pixel_size)
xyz = rot_mat.dot(XY1.T).T
xyz = np.hstack([xyz, Z])
return xyz
def affine_trans_mat(self): #from global XYZ to local coordinates xyz
return cv2.invertAffineTransform(cv2.getRotationMatrix2D((0, 0), self.rotation, 1 / self.pixel_size))
def save_world_file(self, filename):
atm = self.affine_trans_mat()
atm[0,2] = self.xori
atm[1,2] = self.yori
if Path(filename).suffix =='.jpg':
file = filename.replace('.jpg', '.jpw')
elif Path(filename).suffix =='.jpeg':
file = filename.replace('.jpeg', '.jpw')
elif Path(filename).suffix =='.tif':
file = filename.replace('.tif', '.tfw')
elif Path(filename).suffix =='.png':
file = filename.replace('.png', '.pngw')
else:
sys.exit("Unknow file type")
np.savetxt(file, atm.T.flatten())
class VideoImage:
camera_file = 'D:/Documentos/Modelos/Rectify/tutorial/Exercise/Cameras.xml'
camera = 'Mobo10_4_225_136'
corrected_images = True
camera_position = []
camera_rot = []
gcp_excel_file = ''
gcp_XYZ = []
gcp_uv = []
H = np.array([])
z_plane = 0
original_images_dir = 'D:/Documentos/Modelos/Rectify/tutorial/Exercise/ExtParDefinition/GCP_08Out2010/'
images_filename_extension = 'jpg'
images = ''
image_extensions = ('png', 'jpg', 'jpeg', 'tif')
video_extensions = ('3gp', 'avi')
image_display = False
rectification_method = 'from_gcp'
undistort_images_dir = 'D:/Documentos/Modelos/Rectify/'
undistort = True
write_echo = True
write_world_file = True
#interpolation = cv2.INTER_LINEAR
interpolation_method = cv2.INTER_NEAREST
rectified_images_dir = 'D:/Documentos/Modelos/Rectify/rectified/'
rectify = False
RANSAC_scheme = True
roi = ROI()
alpha = 0;
mtx = np.zeros((3,3))
dist = np.zeros((1,5))
newcameramtx = np.zeros((3,3))
mtx = np.zeros((3,3))
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def undistort_images(self):
self.read_camera()
self.read_images(self.original_images_dir)
Path(self.undistort_images_dir).mkdir(parents=True, exist_ok=True)
for fname in self.images:
# Undistort images
if fname.lower().endswith(self.image_extensions):
img = cv2.imread(fname)
dst = self.undistort(img)
basename = os.path.basename(fname)
img_name = self.undistort_images_dir + basename
writeStatus = cv2.imwrite(img_name, dst)
if self.image_display:
cv2.imshow('Undistorted image', dst)
if self.write_echo:
if writeStatus is True:
print("Undistort image " + img_name + " written")
else:
print("Problem written " + img_name) #
# Undistort videos
elif fname.lower().endswith(self.video_extensions):
print(fname)
container = av.open(fname)
for frame in container.decode(video=0):
if frame.index % 100 == 0:
print("processed frame index {}".format(frame.index))
img = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
dst = self.undistort(img)
if frame.index == 0:
h, w = dst.shape[:2]
basename = os.path.splitext(os.path.basename(fname))[0] + '.avi'
vid_name = self.undistort_images_dir + basename
out = cv2.VideoWriter(vid_name, cv2.VideoWriter_fourcc('M','J','P','G'), 2, (w,h))
out.write(dst)
if self.image_display:
cv2.imshow('Frame', dst)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
out.release()
else:
print('unknown file extension')
def rectify_images(self, method = 'from_gcp'):
if self.rectification_method == 'from_gcp':
self.read_camera()
self.dist = np.zeros((1,5))
self.compute_camera_matrices()
elif self.rectification_method == 'from_parameter_file':
parameters = np.load(self.rectification_parameter_file, allow_pickle=True)
self.H = parameters['H']
self.z_plane = parameters['z_plane']
self.roi = ROI(parameters['roi'].all())
if not self.H.size:
sys.exit('Empty Homography Matriz - cannot rectify images')
self.read_images(self.undistort_images_dir)
Path(self.rectified_images_dir).mkdir(parents=True, exist_ok=True)
for fname in self.images:
if fname.lower().endswith(self.image_extensions):
img = cv2.imread(fname)
dst = self.warp_image(img)
basename = os.path.basename(fname)
img_name = self.rectified_images_dir + basename
writeStatus = cv2.imwrite(img_name, dst)
if self.write_world_file:
self.roi.save_world_file(img_name)
if self.write_echo:
if writeStatus is True:
print("Rectified image " + img_name + " written")
else:
print("Problem written " + img_name) #
elif fname.lower().endswith('avi'): #only rectifies avi files
print(fname)
container = av.open(fname)
for frame in container.decode(video=0):
img = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
dst = self.warp_image(img)
if frame.index % 100 == 0:
print("processed frame index {}".format(frame.index))
if self.image_display:
cv2.imshow('Frame', dst)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if frame.index == 0:
h, w = dst.shape[:2]
basename = os.path.splitext(os.path.basename(fname))[0] + '.avi'
vid_name = self.rectified_images_dir + basename
out = cv2.VideoWriter(vid_name, cv2.VideoWriter_fourcc('M','J','P','G'), 2, (w,h))
out.write(dst)
out.release()
else:
print('unknown file extension')
def warp_image_points(self, img):
h, w = img.shape[:2]
u, v = np.meshgrid(np.arange(w),np.arange(h))
uv = np.vstack((u.flatten(), v.flatten())).T
xy = self.uv2xyz(uv.astype(np.float32))
x = xy[:,0]
y = xy[:,1]
ind = (x > self.roi.xori) & (x < self.roi.xori + self.roi.dx) & (y > self.roi.yori) & (y < self.roi.yori + self.roi.dy)
x = x[ind]
y = y[ind]
ptsRGB=img.reshape(-1,3)
ptsRGB = ptsRGB[ind,:]
return x,y, ptsRGB
def warp_image(self, img):
warp = cv2.warpPerspective(img, self.H, (self.roi.dx, self.roi.dy), flags=cv2.WARP_INVERSE_MAP + self.interpolation_method)
return warp
def uv2XYZ(self, points):
Hinv = np.linalg.inv(self.H)
xy = cv2.perspectiveTransform(np.array([points]), Hinv)[0]
z = self.z_plane * np.ones(shape=(len(xy), 1))
xyz = np.hstack([xy[:,:2], z])
XYZ = self.roi.xyz2XYZ(xyz)
return XYZ
def XY2uv(self, points):
points = self.roi.XYZ2xyz(points)
points = points[:,:2]
uv = cv2.perspectiveTransform(np.array([points]), self.H)[0]
return uv
def XYZ2uv(self, points):
points = self.roi.XYZ2xyz(points)
(uv, jacobian) = cv2.projectPoints(points, self.rvec, self.tvec, self.mtx, self.dist)
npoints = points.shape[0]
uv = uv.reshape(npoints, 2)
return uv
def undistort(self, img):
h, w = img.shape[:2]
self.newcameramtx, roi = cv2.getOptimalNewCameraMatrix(self.mtx, self.dist, (w,h), self.alpha,(w,h))
dst = cv2.undistort(img, self.mtx, self.dist, None, self.newcameramtx)
return dst
def read_images(self, images_dir):
self.images = glob.glob(images_dir + '*.' + self.images_filename_extension)
def compute_camera_matrices(self):
self.read_gcp()
xyz = self.roi.XYZ2xyz(self.gcp_XYZ)
if self.RANSAC_scheme:
retval, self.rvec, self.tvec, _ = cv2.solvePnPRansac(xyz, self.gcp_uv, self.mtx, self.dist)
else:
retval, self.rvec, self.tvec = cv2.solvePnP(xyz, self.gcp_uv, self.mtx, self.dist)
self.camera_rot = cv2.Rodrigues(self.rvec)[0]
camera_position = -self.camera_rot.T @ self.tvec
self.camera_position = self.roi.xyz2XYZ(camera_position.T)
Rt = self.camera_rot
Rt[:,2] = Rt[:,2] * self.z_plane / self.roi.pixel_size + self.tvec.flatten()
self.H = self.mtx @ Rt
self.H = self.H / self.H[2,2]
def save_rectification_parameter_file(self, filename = 'rectification_parameters'):
np.savez(filename, H = self.H, z_plane = self.z_plane, roi = self.roi.__dict__)
def gcp_reprojection_error(self):
#error in pixels
uv = self.XYZ2uv(self.gcp_XYZ)
dif = self.gcp_uv -uv
return dif
def read_gcp(self):
gcp = pd.read_excel(self.gcp_excel_file)
self.gcp_XYZ=gcp[['X', 'Y', 'Z']].values.astype('float64')
self.gcp_uv=gcp[['u', 'v']].values.astype('float64')
def read_camera(self):
self.mtx = np.zeros((3,3))
xmldoc = minidom.parse(self.camera_file)
itemlist = xmldoc.getElementsByTagName('Cameras')
camera_par = itemlist[0].getElementsByTagName(self.camera)
fx = camera_par[0].getElementsByTagName('fx')
self.mtx[0, 0] = fx[0].firstChild.data
fy = camera_par[0].getElementsByTagName('fy')
self.mtx[1, 1] = fy[0].firstChild.data
cx = camera_par[0].getElementsByTagName('cx')
self.mtx[0, 2] = cx[0].firstChild.data
cy = camera_par[0].getElementsByTagName('cy')
self.mtx[1, 2] = cy[0].firstChild.data
self.mtx[2, 2] = 1
k1 = camera_par[0].getElementsByTagName('k1')
self.dist[0, 0] = k1[0].firstChild.data
k2 = camera_par[0].getElementsByTagName('k2')
self.dist[0, 1] = k2[0].firstChild.data
k3 = camera_par[0].getElementsByTagName('k3')
self.dist[0, 2] = k3[0].firstChild.data
k4 = camera_par[0].getElementsByTagName('k4')
self.dist[0, 3] = k4[0].firstChild.data
k5 = camera_par[0].getElementsByTagName('k5')
self.dist[0, 4] = k5[0].firstChild.data
| [
"numpy.hstack",
"cv2.projectPoints",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"av.open",
"sys.exit",
"pandas.read_excel",
"numpy.arange",
"numpy.savez",
"xml.dom.minidom.parse",
"pathlib.Path",
"cv2.solvePnPRansac",
"cv2.undistort",
"cv2.solvePnP",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"glob.glob",
"cv2.getOptimalNewCameraMatrix",
"cv2.getRotationMatrix2D",
"cv2.imread",
"cv2.imwrite",
"numpy.zeros",
"numpy.linalg.inv",
"cv2.Rodrigues",
"os.path.basename",
"numpy.load"
] | [((2831, 2843), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2839, 2843), True, 'import numpy as np\n'), ((3623, 3639), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3631, 3639), True, 'import numpy as np\n'), ((3651, 3667), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (3659, 3667), True, 'import numpy as np\n'), ((3687, 3703), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3695, 3703), True, 'import numpy as np\n'), ((3714, 3730), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3722, 3730), True, 'import numpy as np\n'), ((938, 967), 'numpy.hstack', 'np.hstack', (['[xyz[:, :2], ones]'], {}), '([xyz[:, :2], ones])\n', (947, 967), True, 'import numpy as np\n'), ((1053, 1090), 'numpy.hstack', 'np.hstack', (['[XYZ, z * self.pixel_size]'], {}), '([XYZ, z * self.pixel_size])\n', (1062, 1090), True, 'import numpy as np\n'), ((1493, 1522), 'numpy.hstack', 'np.hstack', (['[XYZ[:, :2], ones]'], {}), '([XYZ[:, :2], ones])\n', (1502, 1522), True, 'import numpy as np\n'), ((1541, 1608), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(0, 0)', 'self.rotation', '(1 / self.pixel_size)'], {}), '((0, 0), self.rotation, 1 / self.pixel_size)\n', (1564, 1608), False, 'import cv2\n'), ((1662, 1681), 'numpy.hstack', 'np.hstack', (['[xyz, Z]'], {}), '([xyz, Z])\n', (1671, 1681), True, 'import numpy as np\n'), ((9743, 9864), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'self.H', '(self.roi.dx, self.roi.dy)'], {'flags': '(cv2.WARP_INVERSE_MAP + self.interpolation_method)'}), '(img, self.H, (self.roi.dx, self.roi.dy), flags=cv2.\n WARP_INVERSE_MAP + self.interpolation_method)\n', (9762, 9864), False, 'import cv2\n'), ((9934, 9955), 'numpy.linalg.inv', 'np.linalg.inv', (['self.H'], {}), '(self.H)\n', (9947, 9955), True, 'import numpy as np\n'), ((10095, 10120), 'numpy.hstack', 'np.hstack', (['[xy[:, :2], z]'], {}), '([xy[:, :2], z])\n', (10104, 10120), True, 'import numpy as np\n'), ((10481, 10549), 'cv2.projectPoints', 'cv2.projectPoints', (['points', 'self.rvec', 'self.tvec', 'self.mtx', 'self.dist'], {}), '(points, self.rvec, self.tvec, self.mtx, self.dist)\n', (10498, 10549), False, 'import cv2\n'), ((10751, 10829), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['self.mtx', 'self.dist', '(w, h)', 'self.alpha', '(w, h)'], {}), '(self.mtx, self.dist, (w, h), self.alpha, (w, h))\n', (10780, 10829), False, 'import cv2\n'), ((10842, 10906), 'cv2.undistort', 'cv2.undistort', (['img', 'self.mtx', 'self.dist', 'None', 'self.newcameramtx'], {}), '(img, self.mtx, self.dist, None, self.newcameramtx)\n', (10855, 10906), False, 'import cv2\n'), ((10996, 11057), 'glob.glob', 'glob.glob', (["(images_dir + '*.' + self.images_filename_extension)"], {}), "(images_dir + '*.' + self.images_filename_extension)\n", (11005, 11057), False, 'import glob\n'), ((11915, 11988), 'numpy.savez', 'np.savez', (['filename'], {'H': 'self.H', 'z_plane': 'self.z_plane', 'roi': 'self.roi.__dict__'}), '(filename, H=self.H, z_plane=self.z_plane, roi=self.roi.__dict__)\n', (11923, 11988), True, 'import numpy as np\n'), ((12203, 12237), 'pandas.read_excel', 'pd.read_excel', (['self.gcp_excel_file'], {}), '(self.gcp_excel_file)\n', (12216, 12237), True, 'import pandas as pd\n'), ((12418, 12434), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (12426, 12434), True, 'import numpy as np\n'), ((12452, 12483), 'xml.dom.minidom.parse', 'minidom.parse', (['self.camera_file'], {}), '(self.camera_file)\n', (12465, 12483), False, 'from xml.dom import minidom\n'), ((1827, 1894), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(0, 0)', 'self.rotation', '(1 / self.pixel_size)'], {}), '((0, 0), self.rotation, 1 / self.pixel_size)\n', (1850, 1894), False, 'import cv2\n'), ((6328, 6344), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (6336, 6344), True, 'import numpy as np\n'), ((6748, 6807), 'sys.exit', 'sys.exit', (['"""Empty Homography Matriz - cannot rectify images"""'], {}), "('Empty Homography Matriz - cannot rectify images')\n", (6756, 6807), False, 'import sys\n'), ((9192, 9204), 'numpy.arange', 'np.arange', (['w'], {}), '(w)\n', (9201, 9204), True, 'import numpy as np\n'), ((9205, 9217), 'numpy.arange', 'np.arange', (['h'], {}), '(h)\n', (9214, 9217), True, 'import numpy as np\n'), ((11258, 11315), 'cv2.solvePnPRansac', 'cv2.solvePnPRansac', (['xyz', 'self.gcp_uv', 'self.mtx', 'self.dist'], {}), '(xyz, self.gcp_uv, self.mtx, self.dist)\n', (11276, 11315), False, 'import cv2\n'), ((11376, 11427), 'cv2.solvePnP', 'cv2.solvePnP', (['xyz', 'self.gcp_uv', 'self.mtx', 'self.dist'], {}), '(xyz, self.gcp_uv, self.mtx, self.dist)\n', (11388, 11427), False, 'import cv2\n'), ((11456, 11480), 'cv2.Rodrigues', 'cv2.Rodrigues', (['self.rvec'], {}), '(self.rvec)\n', (11469, 11480), False, 'import cv2\n'), ((2055, 2069), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2059, 2069), False, 'from pathlib import Path\n'), ((3993, 4024), 'pathlib.Path', 'Path', (['self.undistort_images_dir'], {}), '(self.undistort_images_dir)\n', (3997, 4024), False, 'from pathlib import Path\n'), ((4244, 4261), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (4254, 4261), False, 'import cv2\n'), ((4333, 4356), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (4349, 4356), False, 'import os\n'), ((4453, 4479), 'cv2.imwrite', 'cv2.imwrite', (['img_name', 'dst'], {}), '(img_name, dst)\n', (4464, 4479), False, 'import cv2\n'), ((6483, 6544), 'numpy.load', 'np.load', (['self.rectification_parameter_file'], {'allow_pickle': '(True)'}), '(self.rectification_parameter_file, allow_pickle=True)\n', (6490, 6544), True, 'import numpy as np\n'), ((6872, 6903), 'pathlib.Path', 'Path', (['self.rectified_images_dir'], {}), '(self.rectified_images_dir)\n', (6876, 6903), False, 'from pathlib import Path\n'), ((7062, 7079), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (7072, 7079), False, 'import cv2\n'), ((7152, 7175), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (7168, 7175), False, 'import os\n'), ((7272, 7298), 'cv2.imwrite', 'cv2.imwrite', (['img_name', 'dst'], {}), '(img_name, dst)\n', (7283, 7298), False, 'import cv2\n'), ((9995, 10013), 'numpy.array', 'np.array', (['[points]'], {}), '([points])\n', (10003, 10013), True, 'import numpy as np\n'), ((10325, 10343), 'numpy.array', 'np.array', (['[points]'], {}), '([points])\n', (10333, 10343), True, 'import numpy as np\n'), ((2154, 2168), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2158, 2168), False, 'from pathlib import Path\n'), ((4559, 4595), 'cv2.imshow', 'cv2.imshow', (['"""Undistorted image"""', 'dst'], {}), "('Undistorted image', dst)\n", (4569, 4595), False, 'import cv2\n'), ((5014, 5028), 'av.open', 'av.open', (['fname'], {}), '(fname)\n', (5021, 5028), False, 'import av\n'), ((7795, 7809), 'av.open', 'av.open', (['fname'], {}), '(fname)\n', (7802, 7809), False, 'import av\n'), ((2256, 2270), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2260, 2270), False, 'from pathlib import Path\n'), ((2471, 2499), 'sys.exit', 'sys.exit', (['"""Unknow file type"""'], {}), "('Unknow file type')\n", (2479, 2499), False, 'import sys\n'), ((2356, 2370), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2360, 2370), False, 'from pathlib import Path\n'), ((5842, 5866), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'dst'], {}), "('Frame', dst)\n", (5852, 5866), False, 'import cv2\n'), ((5660, 5702), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (5682, 5702), False, 'import cv2\n'), ((8298, 8322), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'dst'], {}), "('Frame', dst)\n", (8308, 8322), False, 'import cv2\n'), ((8859, 8901), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (8881, 8901), False, 'import cv2\n'), ((5953, 5967), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5964, 5967), False, 'import cv2\n'), ((5493, 5516), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (5509, 5516), False, 'import os\n'), ((8425, 8439), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8436, 8439), False, 'import cv2\n'), ((8684, 8707), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (8700, 8707), False, 'import os\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
"""
Dataset and dataloader functions
"""
import os
import json
import random
random.seed(1234)
from random import choice
import pickle
from PIL import Image
import torch
from torch.utils.data import Dataset
from utils.utils import get_token_ids, list2Tensors
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import multiprocessing
class Recipe1M(Dataset):
"""Dataset class for Recipe1M
Parameters
----------
root : string
Path to Recipe1M dataset.
transform : (callable, optional)
A function/transform that takes in a PIL image and returns a transformed version.
split : string
Dataset split (train, val, or test).
max_ingrs : int
Maximum number of ingredients to use.
max_instrs : int
Maximum number of instructions to use.
max_length_ingrs : int
Maximum length of ingredient sentences.
max_length_instrs : int
Maximum length of instruction sentences.
text_only_data : bool
Whether to load paired or text-only samples.
"""
def __init__(self, root, transform=None, split='train',
max_ingrs=20,
max_instrs=20,
max_length_ingrs=15,
max_length_instrs=15,
text_only_data=False):
#load vocabulary
self.vocab_inv = pickle.load(open('../data/vocab.pkl', 'rb'))
self.vocab = {}
for k, v in self.vocab_inv.items():
if type(v) != str:
v = v[0]
self.vocab[v] = k
# suffix to load text only samples or paired samples
suf = '_noimages' if text_only_data else ''
self.data = pickle.load(open(os.path.join(root, 'traindata', split + suf + '.pkl'),
'rb'))
self.root = root
self.ids = list(self.data.keys())
self.split = split
self.transform = transform
self.max_ingrs = max_ingrs
self.max_instrs = max_instrs
self.max_length_ingrs = max_length_ingrs
self.max_length_instrs = max_length_instrs
self.text_only_data = text_only_data
def __getitem__(self, idx):
entry = self.data[self.ids[idx]]
if not self.text_only_data:
# loading images
if self.split == 'train':
# if training, pick an image randomly
img_name = choice(entry['images'])
else:
# if test or val we pick the first image
img_name = entry['images'][0]
img_name = '/'.join(img_name[:4])+'/'+img_name
img = Image.open(os.path.join(self.root, self.split, img_name))
if self.transform is not None:
img = self.transform(img)
else:
img = None
title = entry['title']
ingrs = entry['ingredients']
instrs = entry['instructions']
# turn text into indexes
title = torch.Tensor(get_token_ids(title, self.vocab)[:self.max_length_instrs])
instrs = list2Tensors([get_token_ids(instr, self.vocab)[:self.max_length_instrs] for instr in instrs[:self.max_instrs]])
ingrs = list2Tensors([get_token_ids(ingr, self.vocab)[:self.max_length_ingrs] for ingr in ingrs[:self.max_ingrs]])
return img, title, ingrs, instrs, self.ids[idx]
def __len__(self):
return len(self.ids)
def get_ids(self):
return self.ids
def get_vocab(self):
try:
return self.vocab_inv
except:
return None
def pad_input(input):
"""
creates a padded tensor to fit the longest sequence in the batch
"""
if len(input[0].size()) == 1:
l = [len(elem) for elem in input]
targets = torch.zeros(len(input), max(l)).long()
for i, elem in enumerate(input):
end = l[i]
targets[i, :end] = elem[:end]
else:
n, l = [], []
for elem in input:
n.append(elem.size(0))
l.append(elem.size(1))
targets = torch.zeros(len(input), max(n), max(l)).long()
for i, elem in enumerate(input):
targets[i, :n[i], :l[i]] = elem
return targets
def collate_fn(data):
""" collate to consume and batchify recipe data
"""
# Sort a data list by caption length (descending order).
image, titles, ingrs, instrs, ids = zip(*data)
if image[0] is not None:
# Merge images (from tuple of 3D tensor to 4D tensor).
image = torch.stack(image, 0)
else:
image = None
title_targets = pad_input(titles)
ingredient_targets = pad_input(ingrs)
instruction_targets = pad_input(instrs)
return image, title_targets, ingredient_targets, instruction_targets, ids
def get_loader(root, batch_size, resize, im_size, augment=True,
split='train', mode='train',
drop_last=True,
text_only_data=False):
"""Function to get dataset and dataloader for a data split
Parameters
----------
root : string
Path to Recipe1M dataset.
batch_size : int
Batch size.
resize : int
Image size for resizing (keeps aspect ratio)
im_size : int
Image size for cropping.
augment : bool
Description of parameter `augment`.
split : string
Dataset split (train, val, or test)
mode : string
Loading mode (impacts augmentations & random sampling)
drop_last : bool
Whether to drop the last batch of data.
text_only_data : type
Whether to load text-only or paired samples.
Returns
-------
loader : a pytorch DataLoader
ds : a pytorch Dataset
"""
transforms_list = [transforms.Resize((resize))]
if mode == 'train' and augment:
# Image preprocessing, normalization for pretrained resnet
transforms_list.append(transforms.RandomHorizontalFlip())
transforms_list.append(transforms.RandomCrop(im_size))
else:
transforms_list.append(transforms.CenterCrop(im_size))
transforms_list.append(transforms.ToTensor())
transforms_list.append(transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)))
transforms_ = transforms.Compose(transforms_list)
ds = Recipe1M(root, transform=transforms_, split=split,
text_only_data=text_only_data)
loader = DataLoader(ds, batch_size=batch_size, shuffle=False,
num_workers=multiprocessing.cpu_count(),
collate_fn=collate_fn, drop_last=drop_last)
return loader, ds
| [
"torchvision.transforms.CenterCrop",
"random.choice",
"torch.stack",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.join",
"random.seed",
"multiprocessing.cpu_count",
"torchvision.transforms.RandomCrop",
"utils.utils.get_token_ids",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Compose"
] | [((210, 227), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (221, 227), False, 'import random\n'), ((6431, 6466), 'torchvision.transforms.Compose', 'transforms.Compose', (['transforms_list'], {}), '(transforms_list)\n', (6449, 6466), True, 'import torchvision.transforms as transforms\n'), ((4665, 4686), 'torch.stack', 'torch.stack', (['image', '(0)'], {}), '(image, 0)\n', (4676, 4686), False, 'import torch\n'), ((5883, 5908), 'torchvision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (5900, 5908), True, 'import torchvision.transforms as transforms\n'), ((6246, 6267), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6265, 6267), True, 'import torchvision.transforms as transforms\n'), ((6296, 6362), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (6316, 6362), True, 'import torchvision.transforms as transforms\n'), ((6047, 6080), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (6078, 6080), True, 'import torchvision.transforms as transforms\n'), ((6113, 6143), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['im_size'], {}), '(im_size)\n', (6134, 6143), True, 'import torchvision.transforms as transforms\n'), ((6187, 6217), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['im_size'], {}), '(im_size)\n', (6208, 6217), True, 'import torchvision.transforms as transforms\n'), ((6680, 6707), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (6705, 6707), False, 'import multiprocessing\n'), ((1848, 1901), 'os.path.join', 'os.path.join', (['root', '"""traindata"""', "(split + suf + '.pkl')"], {}), "(root, 'traindata', split + suf + '.pkl')\n", (1860, 1901), False, 'import os\n'), ((2556, 2579), 'random.choice', 'choice', (["entry['images']"], {}), "(entry['images'])\n", (2562, 2579), False, 'from random import choice\n'), ((2791, 2836), 'os.path.join', 'os.path.join', (['self.root', 'self.split', 'img_name'], {}), '(self.root, self.split, img_name)\n', (2803, 2836), False, 'import os\n'), ((3131, 3163), 'utils.utils.get_token_ids', 'get_token_ids', (['title', 'self.vocab'], {}), '(title, self.vocab)\n', (3144, 3163), False, 'from utils.utils import get_token_ids, list2Tensors\n'), ((3221, 3253), 'utils.utils.get_token_ids', 'get_token_ids', (['instr', 'self.vocab'], {}), '(instr, self.vocab)\n', (3234, 3253), False, 'from utils.utils import get_token_ids, list2Tensors\n'), ((3349, 3380), 'utils.utils.get_token_ids', 'get_token_ids', (['ingr', 'self.vocab'], {}), '(ingr, self.vocab)\n', (3362, 3380), False, 'from utils.utils import get_token_ids, list2Tensors\n')] |
import os
import yaml
from core.topology import Topology
def create_topology(filename, **kwargs):
_kwargs = kwargs
topo = Topology()
topo.set_properties(_kwargs)
topo.validate()
stream = file(filename, 'w')
yaml.dump(topo.__dict__, stream, default_flow_style=False)
def read_topology(filename):
stream = file(filename, 'r')
topo_props = yaml.load(stream)
topo = Topology()
topo.set_properties(topo_props)
topo.validate()
return topo
def delete_topology(filename):
os.remove(filename)
| [
"os.remove",
"core.topology.Topology",
"yaml.load",
"yaml.dump"
] | [((132, 142), 'core.topology.Topology', 'Topology', ([], {}), '()\n', (140, 142), False, 'from core.topology import Topology\n'), ((233, 291), 'yaml.dump', 'yaml.dump', (['topo.__dict__', 'stream'], {'default_flow_style': '(False)'}), '(topo.__dict__, stream, default_flow_style=False)\n', (242, 291), False, 'import yaml\n'), ((373, 390), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (382, 390), False, 'import yaml\n'), ((402, 412), 'core.topology.Topology', 'Topology', ([], {}), '()\n', (410, 412), False, 'from core.topology import Topology\n'), ((522, 541), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (531, 541), False, 'import os\n')] |
import numpy as np
class Naive_Bayes(object):
def __init__(self, type = "Gaussian", prior = []):
self.type = type
self.prior = prior
def fit(self, X, y):
if((self.type).lower() == "multinomial"):
count_sample = X.shape[0]
separated = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
if len(self.prior)==0:
self.class_log_prior_ = [np.log(len(i) / count_sample) for i in separated]
else:
self.class_log_prior_ = self.prior
count = np.array([np.array(i).sum(axis=0) for i in separated]) + 1.0
self.feature_log_prob_ = np.log(count / count.sum(axis=1)[np.newaxis].T)
return self
if((self.type).lower() == "gaussian"):
separated = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
self.model = np.array([np.c_[np.mean(i, axis=0), np.std(i, axis=0)]
for i in separated])
return self
def _prob(self, x, mean, std):
if((self.type).lower() == "gaussian"):
exponent = np.exp(- ((x - mean)**2 / (2 * std**2)))
return np.log(exponent / (np.sqrt(2 * np.pi) * std))
def predict_log_proba(self, X):
if((self.type).lower() == "multinomial"):
return [(self.feature_log_prob_ * x).sum(axis=1) + self.class_log_prior_
for x in X]
if((self.type).lower() == "gaussian"):
return [[sum(self._prob(i, *s) for s, i in zip(summaries, x))
for summaries in self.model] for x in X]
def predict(self, X):
return np.argmax(self.predict_log_proba(X), axis=1)
def score(self, X, y):
return sum(self.predict(X) == y) / len(y) | [
"numpy.mean",
"numpy.sqrt",
"numpy.unique",
"numpy.exp",
"numpy.array",
"numpy.std"
] | [((942, 985), 'numpy.exp', 'np.exp', (['(-((x - mean) ** 2 / (2 * std ** 2)))'], {}), '(-((x - mean) ** 2 / (2 * std ** 2)))\n', (948, 985), True, 'import numpy as np\n'), ((297, 309), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (306, 309), True, 'import numpy as np\n'), ((729, 741), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (738, 741), True, 'import numpy as np\n'), ((1012, 1030), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1019, 1030), True, 'import numpy as np\n'), ((775, 793), 'numpy.mean', 'np.mean', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (782, 793), True, 'import numpy as np\n'), ((795, 812), 'numpy.std', 'np.std', (['i'], {'axis': '(0)'}), '(i, axis=0)\n', (801, 812), True, 'import numpy as np\n'), ((485, 496), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (493, 496), True, 'import numpy as np\n')] |
"""microservices URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from rebu import views as rebu_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/users/<int:id>/', rebu_views.users, name='user'),
path('api/v1/users/create/', rebu_views.create_user, name='user_create'),
path('api/v1/cooks/<int:id>/', rebu_views.cooks, name='cook'),
path('api/v1/cooks/create/', rebu_views.create_cook, name='cook_create'),
path('api/v1/eaters/<int:id>/', rebu_views.eaters, name='eater'),
path('api/v1/eaters/create/', rebu_views.create_eater, name='eater_create'),
path('api/v1/meals/<int:id>/', rebu_views.meals, name='meal'),
path('api/v1/meals/create/', rebu_views.create_meal, name='meal_create'),
path('api/v1/plates/<int:id>/', rebu_views.plates, name='plate'),
path('api/v1/plates/create/', rebu_views.create_plate, name='plate_create'),
path('api/v1/eater_ratings/<int:id>/', rebu_views.eater_ratings, name='eater_rating'),
path('api/v1/eater_ratings/create/', rebu_views.create_eater_rating, name='eater_rating_create'),
path('api/v1/reviews/<int:id>/', rebu_views.reviews, name='review'),
path('api/v1/reviews/create/', rebu_views.create_review, name='review_create'),
path('api/v1/meals/all/', rebu_views.all_meals, name='all'),
path('api/v1/meals/newest/', rebu_views.newest_meals, name='newest'),
path('api/v1/login/', rebu_views.login, name='login'),
path('api/v1/authenticate/', rebu_views.authenticate, name='authenticate'),
path('api/v1/recommendations/<int:id>/', rebu_views.recommendations, name='recommendation'),
path('api/v1/recommendations/create/', rebu_views.create_recommendation, name='recommendation_create'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"django.conf.urls.static.static",
"django.urls.path"
] | [((2441, 2504), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (2447, 2504), False, 'from django.conf.urls.static import static\n'), ((834, 865), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (838, 865), False, 'from django.urls import path\n'), ((871, 932), 'django.urls.path', 'path', (['"""api/v1/users/<int:id>/"""', 'rebu_views.users'], {'name': '"""user"""'}), "('api/v1/users/<int:id>/', rebu_views.users, name='user')\n", (875, 932), False, 'from django.urls import path\n'), ((938, 1010), 'django.urls.path', 'path', (['"""api/v1/users/create/"""', 'rebu_views.create_user'], {'name': '"""user_create"""'}), "('api/v1/users/create/', rebu_views.create_user, name='user_create')\n", (942, 1010), False, 'from django.urls import path\n'), ((1016, 1077), 'django.urls.path', 'path', (['"""api/v1/cooks/<int:id>/"""', 'rebu_views.cooks'], {'name': '"""cook"""'}), "('api/v1/cooks/<int:id>/', rebu_views.cooks, name='cook')\n", (1020, 1077), False, 'from django.urls import path\n'), ((1083, 1155), 'django.urls.path', 'path', (['"""api/v1/cooks/create/"""', 'rebu_views.create_cook'], {'name': '"""cook_create"""'}), "('api/v1/cooks/create/', rebu_views.create_cook, name='cook_create')\n", (1087, 1155), False, 'from django.urls import path\n'), ((1161, 1225), 'django.urls.path', 'path', (['"""api/v1/eaters/<int:id>/"""', 'rebu_views.eaters'], {'name': '"""eater"""'}), "('api/v1/eaters/<int:id>/', rebu_views.eaters, name='eater')\n", (1165, 1225), False, 'from django.urls import path\n'), ((1231, 1306), 'django.urls.path', 'path', (['"""api/v1/eaters/create/"""', 'rebu_views.create_eater'], {'name': '"""eater_create"""'}), "('api/v1/eaters/create/', rebu_views.create_eater, name='eater_create')\n", (1235, 1306), False, 'from django.urls import path\n'), ((1312, 1373), 'django.urls.path', 'path', (['"""api/v1/meals/<int:id>/"""', 'rebu_views.meals'], {'name': '"""meal"""'}), "('api/v1/meals/<int:id>/', rebu_views.meals, name='meal')\n", (1316, 1373), False, 'from django.urls import path\n'), ((1379, 1451), 'django.urls.path', 'path', (['"""api/v1/meals/create/"""', 'rebu_views.create_meal'], {'name': '"""meal_create"""'}), "('api/v1/meals/create/', rebu_views.create_meal, name='meal_create')\n", (1383, 1451), False, 'from django.urls import path\n'), ((1457, 1521), 'django.urls.path', 'path', (['"""api/v1/plates/<int:id>/"""', 'rebu_views.plates'], {'name': '"""plate"""'}), "('api/v1/plates/<int:id>/', rebu_views.plates, name='plate')\n", (1461, 1521), False, 'from django.urls import path\n'), ((1527, 1602), 'django.urls.path', 'path', (['"""api/v1/plates/create/"""', 'rebu_views.create_plate'], {'name': '"""plate_create"""'}), "('api/v1/plates/create/', rebu_views.create_plate, name='plate_create')\n", (1531, 1602), False, 'from django.urls import path\n'), ((1608, 1698), 'django.urls.path', 'path', (['"""api/v1/eater_ratings/<int:id>/"""', 'rebu_views.eater_ratings'], {'name': '"""eater_rating"""'}), "('api/v1/eater_ratings/<int:id>/', rebu_views.eater_ratings, name=\n 'eater_rating')\n", (1612, 1698), False, 'from django.urls import path\n'), ((1699, 1800), 'django.urls.path', 'path', (['"""api/v1/eater_ratings/create/"""', 'rebu_views.create_eater_rating'], {'name': '"""eater_rating_create"""'}), "('api/v1/eater_ratings/create/', rebu_views.create_eater_rating, name=\n 'eater_rating_create')\n", (1703, 1800), False, 'from django.urls import path\n'), ((1801, 1868), 'django.urls.path', 'path', (['"""api/v1/reviews/<int:id>/"""', 'rebu_views.reviews'], {'name': '"""review"""'}), "('api/v1/reviews/<int:id>/', rebu_views.reviews, name='review')\n", (1805, 1868), False, 'from django.urls import path\n'), ((1874, 1952), 'django.urls.path', 'path', (['"""api/v1/reviews/create/"""', 'rebu_views.create_review'], {'name': '"""review_create"""'}), "('api/v1/reviews/create/', rebu_views.create_review, name='review_create')\n", (1878, 1952), False, 'from django.urls import path\n'), ((1958, 2017), 'django.urls.path', 'path', (['"""api/v1/meals/all/"""', 'rebu_views.all_meals'], {'name': '"""all"""'}), "('api/v1/meals/all/', rebu_views.all_meals, name='all')\n", (1962, 2017), False, 'from django.urls import path\n'), ((2023, 2091), 'django.urls.path', 'path', (['"""api/v1/meals/newest/"""', 'rebu_views.newest_meals'], {'name': '"""newest"""'}), "('api/v1/meals/newest/', rebu_views.newest_meals, name='newest')\n", (2027, 2091), False, 'from django.urls import path\n'), ((2097, 2150), 'django.urls.path', 'path', (['"""api/v1/login/"""', 'rebu_views.login'], {'name': '"""login"""'}), "('api/v1/login/', rebu_views.login, name='login')\n", (2101, 2150), False, 'from django.urls import path\n'), ((2156, 2230), 'django.urls.path', 'path', (['"""api/v1/authenticate/"""', 'rebu_views.authenticate'], {'name': '"""authenticate"""'}), "('api/v1/authenticate/', rebu_views.authenticate, name='authenticate')\n", (2160, 2230), False, 'from django.urls import path\n'), ((2236, 2332), 'django.urls.path', 'path', (['"""api/v1/recommendations/<int:id>/"""', 'rebu_views.recommendations'], {'name': '"""recommendation"""'}), "('api/v1/recommendations/<int:id>/', rebu_views.recommendations, name=\n 'recommendation')\n", (2240, 2332), False, 'from django.urls import path\n'), ((2333, 2439), 'django.urls.path', 'path', (['"""api/v1/recommendations/create/"""', 'rebu_views.create_recommendation'], {'name': '"""recommendation_create"""'}), "('api/v1/recommendations/create/', rebu_views.create_recommendation,\n name='recommendation_create')\n", (2337, 2439), False, 'from django.urls import path\n')] |
from datalad.tests.utils_pytest import eq_
from ..renderer import (
NA_STRING,
nadict,
nagen,
)
def test_nagen():
na = nagen()
eq_(str(na), NA_STRING)
eq_(repr(na), 'nagen()')
assert na.unknown is na
assert na['unknown'] is na
eq_(str(nagen('-')), '-')
def test_nadict():
d = nadict({1: 2})
eq_(d[1], 2)
eq_(str(d[2]), NA_STRING)
| [
"datalad.tests.utils_pytest.eq_"
] | [((341, 353), 'datalad.tests.utils_pytest.eq_', 'eq_', (['d[1]', '(2)'], {}), '(d[1], 2)\n', (344, 353), False, 'from datalad.tests.utils_pytest import eq_\n')] |
def patch():
import session_csrf
session_csrf.monkeypatch()
| [
"session_csrf.monkeypatch"
] | [((43, 69), 'session_csrf.monkeypatch', 'session_csrf.monkeypatch', ([], {}), '()\n', (67, 69), False, 'import session_csrf\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .base.exchange import *
import time
import requests
from datetime import datetime
import calendar
import jwt
QUOINE_REST_URL = 'api.quoine.com'
class Quoine(Exchange):
def __init__(self, apikey, secretkey):
def httpGet(url, resource, params, apikey, sign):
payload = {'nonce': str(int("{:.6f}".format(
time.time()).replace('.', ''))), 'path': resource, 'token_id': self._apikey}
headers = {
'Accept': 'application/json',
'X-Quoine-API-Version': '2',
"X-Quoine-Auth": apikey,
"Sign": jwt.encode(payload, self._secretkey, 'HS256'),
}
return self.session.get('https://' + url + resource,
headers=headers, data=params).json()
def httpPost(url, resource, params, apikey):
payload = {'nonce': str(int("{:.6f}".format(
time.time()).replace('.', ''))), 'path': resource, 'token_id': self._apikey}
headers = {
'Accept': 'application/json',
'X-Quoine-API-Version': '2',
"X-Quoine-Auth": apikey,
"Sign": jwt.encode(payload, self._secretkey, 'HS256'),
}
return self.session.post('https://' + url + resource,
headers=headers, data=params).json()
def httpPut(url, resource, params, apikey):
payload = {'nonce': str(int("{:.6f}".format(
time.time()).replace('.', ''))), 'path': resource, 'token_id': self._apikey}
headers = {
'Accept': 'application/json',
'X-Quoine-API-Version': '2',
"X-Quoine-Auth": apikey,
"Sign": jwt.encode(payload, self._secretkey, 'HS256'),
}
return self.session.put('https://' + url + resource, headers=headers, data=params).json()
super(Quoine, self).__init__(apikey, secretkey)
self.session = requests.session()
self.httpPost = httpPost
self.httpGet = httpGet
self.httpPut = httpPut
def __del__(self):
self.session.close()
def markets(self):
MARKETS_RESOURCE = "/products"
json = self.session.get('https://' + QUOINE_REST_URL +
MARKETS_RESOURCE).json()
li = [[j['id'], j['currency_pair_code']] for j in json]
self.market_dict = dict(li)
return tuple([CurrencyPair(trading=j['base_currency'], settlement=j["quoted_currency"]) for j in json])
def settlements(self):
MARKETS_RESOURCE = "/products"
json = self.session.get('https://' + QUOINE_REST_URL +
MARKETS_RESOURCE).json()
li = [j['quoted_currency'] for j in json]
return tuple(set(li))
def ticker(self, trading, settlement):
TICKER_RESOURCE = "/products/code/CASH/%s" % (trading + settlement)
json = self.session.get('https://' + QUOINE_REST_URL +
TICKER_RESOURCE).json()
utc = datetime.utcfromtimestamp(time.time())
return Ticker(
timestamp=calendar.timegm(utc.timetuple()),
last=float(json["last_traded_price"]),
ask=float(json["market_ask"]),
bid=float(json["market_bid"]),
high=float(json["high_market_ask"]),
low=float(json["low_market_bid"]),
volume=float(json["volume_24h"]),
)
def board(self, item='BTCUSD'):
if not self.market_dict:
self.markets()
product_id = tuple(self.market_dict.keys())[
tuple(self.market_dict.values()).index(item)]
BOARD_RESOURCE = "/products/%s/price_levels" % product_id
json = self.session.get('https://' + QUOINE_REST_URL +
BOARD_RESOURCE).json()
return Board(
asks=[Ask(price=float(ask[0]), size=float(ask[1]))
for ask in json["buy_price_levels"]],
bids=[Bid(price=float(bid[0]), size=float(bid[1]))
for bid in json["sell_price_levels"]],
mid_price=(float(json["buy_price_levels"][0][0]) +
float(json["sell_price_levels"][0][0])) / 2
)
def balance(self):
BALANCE_RESOURCE = "/accounts/balance"
params = {
}
json = self.httpGet(QUOINE_REST_URL,
BALANCE_RESOURCE, params, self._apikey, self._secretkey)
balances = {}
for j in json:
balances[j['currency']] = [
float(j["balance"]), float(j["balance"])]
return balances
def order(self, trading, settlement, order_type, side, price, size):
PRODUCTS_RESOURCE = "/products"
product_json = self.httpGet(QUOINE_REST_URL,
PRODUCTS_RESOURCE, {}, self._apikey, self._secretkey)
products = {}
for j in product_json:
products[j['currency_pair_code']] = j['id']
ORDER_RESOURCE = "/orders"
params = {
"order_type": order_type.lower(),
"product_id": products[trading.upper() + settlement.upper()],
"side": side.lower(),
"price": price,
"quantity": size
}
json = self.httpPost(QUOINE_REST_URL,
ORDER_RESOURCE, params, self._apikey)
return json["id"]
def get_open_orders(self, symbol="BTC_JPY"):
OPEN_ORDERS_RESOURCE = "/orders"
params = {"status": "live"}
json = self.httpGet(QUOINE_REST_URL,
OPEN_ORDERS_RESOURCE, params, self._apikey, self._secretkey)
return json
def cancel_order(self, symbol, order_id):
CANCEL_ORDERS_RESOURCE = "/orders/{0}/cancel".format(order_id)
self.httpPut(QUOINE_REST_URL, CANCEL_ORDERS_RESOURCE, {}, self._apikey, self._secretkey)
def get_fee(self, symbol="BTC_JPY"):
GET_FEE_RESOURCE = "/products"
json = self.httpGet(QUOINE_REST_URL, GET_FEE_RESOURCE, {}, self._apikey, self._secretkey)
res = []
for j in json:
if j["currency_pair_code"] == symbol:
res = [j["taker_fee"], j["maker_fee"]]
break
return res
| [
"requests.session",
"time.time",
"jwt.encode"
] | [((2058, 2076), 'requests.session', 'requests.session', ([], {}), '()\n', (2074, 2076), False, 'import requests\n'), ((3167, 3178), 'time.time', 'time.time', ([], {}), '()\n', (3176, 3178), False, 'import time\n'), ((649, 694), 'jwt.encode', 'jwt.encode', (['payload', 'self._secretkey', '"""HS256"""'], {}), "(payload, self._secretkey, 'HS256')\n", (659, 694), False, 'import jwt\n'), ((1232, 1277), 'jwt.encode', 'jwt.encode', (['payload', 'self._secretkey', '"""HS256"""'], {}), "(payload, self._secretkey, 'HS256')\n", (1242, 1277), False, 'import jwt\n'), ((1816, 1861), 'jwt.encode', 'jwt.encode', (['payload', 'self._secretkey', '"""HS256"""'], {}), "(payload, self._secretkey, 'HS256')\n", (1826, 1861), False, 'import jwt\n'), ((392, 403), 'time.time', 'time.time', ([], {}), '()\n', (401, 403), False, 'import time\n'), ((975, 986), 'time.time', 'time.time', ([], {}), '()\n', (984, 986), False, 'import time\n'), ((1559, 1570), 'time.time', 'time.time', ([], {}), '()\n', (1568, 1570), False, 'import time\n')] |
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import requests
import os
import pixiedust
import pytz
from dateutil.parser import parse
import datetime
from pixiedust_flightpredict import Configuration
myLogger=pixiedust.getLogger(__name__)
flexBaseUrl = "https://api.flightstats.com/flex/"
def buildUrl(path, **kwargs):
appId=os.environ.get("appId") or Configuration["appId"]
appKey=os.environ.get("appKey") or Configuration["appKey"]
if appId is None or appKey is None:
raise ValueError("appId or appKey is not defined")
return (flexBaseUrl+path).format(**kwargs) + "?appId={0}&appKey={1}".format(appId, appKey)
def toUTC(sDateTime, timeZoneRegionName):
return pytz.timezone( timeZoneRegionName ).localize(parse(sDateTime)).astimezone (pytz.utc)
airportCache={}
def getAirportInfoFromFlightStats(code):
if code in airportCache:
return airportCache[code]
airportPath = "airports/rest/v1/json/{code}/today"
url = buildUrl(airportPath, code=code)
myLogger.debug("Calling airport api with url: " + url)
response = requests.get(url)
if response.status_code != 200:
msg = "Error while trying to get airport info for {0}. Error is {1}".format(code, str(response.reason))
myLogger.error(msg)
raise requests.HTTPError(msg, response=response)
airportCache[code] = response.json()["airport"];
return airportCache[code]
def parseDate(date, departureAirport = None):
dt = pytz.utc.localize(datetime.datetime.utcfromtimestamp(date/1000))
if departureAirport:
info = getAirportInfoFromFlightStats( departureAirport )
dt = dt.astimezone(pytz.timezone(info['timeZoneRegionName']))
return (dt.year,dt.month,dt.day,dt.hour)
def getFlightSchedule(flight, date, departureAirport):
myLogger.debug("getFlightSchedule with args: {0}, {1}".format( flight, date ) )
#parse the flight and date
index = flight.rfind(" ")
carrier = flight[:index]
flightnumber = flight[index+1:]
(year,month,day,hour) = parseDate(date, departureAirport)
(yearDT,monthDT,dayDT,hourDT) = parseDate(date)
schedulesPath = "schedules/rest/v1/json/flight/{carrier}/{flightnumber}/departing/{year}/{month}/{day}"
url = buildUrl(schedulesPath, carrier=carrier, flightnumber=flightnumber, year=year, month=month, day=day)
myLogger.debug("Calling flight stats with url: " + url)
response = requests.get( url )
if response.status_code != 200:
msg = "Error while trying to get schedule for flight {0}. Error is {1}".format(flight, str(response.reason))
myLogger.error(msg)
raise requests.HTTPError(msg, response=response)
payload = response.json()
def findAirport(airportCode):
for airport in payload['appendix']['airports']:
if airport['fs'] == airportCode:
return airport
myLogger.error("error find airport {0} from getFlightSchedule".format(airportCode))
def findEquipment(code):
for equipment in payload['appendix']['equipments']:
if equipment['iata'] == code:
return equipment
myLogger.error("error find equipment {0} from getFlightSchedule".format(code))
return {}
def findAirline(code):
for airline in payload['appendix']['airlines']:
if airline['fs'] == code:
return airline
myLogger.error("error find airline {0} from getFlightSchedule".format(code))
return {}
#find the right flight as there may be more than one
scheduledFlights = payload.pop("scheduledFlights")
thisFlight = None
if len(scheduledFlights)>1:
for scheduledFlight in scheduledFlights:
airport = findAirport(scheduledFlight['departureAirportFsCode'])
if airport is not None:
utcDT = toUTC( scheduledFlight["departureTime"], airport['timeZoneRegionName'])
myLogger.info("Comparing time for airport {0} between {1} and {2}".format( airport['name'], utcDT.hour, hourDT))
if utcDT.hour == hourDT:
thisFlight = scheduledFlight
thisFlight['departureTimeUTC'] = str(utcDT)
arrAirport = findAirport( scheduledFlight['arrivalAirportFsCode'])
thisFlight['arrivalTimeUTC'] = str( toUTC(scheduledFlight['arrivalTime'], arrAirport['timeZoneRegionName']))
break
else:
thisFlight = scheduledFlights[0]
if thisFlight:
airport = findAirport(thisFlight['departureAirportFsCode'])
thisFlight['departureTimeUTC'] = str(toUTC( thisFlight["departureTime"], airport['timeZoneRegionName']))
arrAirport = findAirport( thisFlight['arrivalAirportFsCode'])
thisFlight['arrivalTimeUTC'] = str( toUTC(thisFlight['arrivalTime'], arrAirport['timeZoneRegionName']))
myLogger.info("Found flight corresponding to flight {0}: {1}".format(flight, thisFlight))
payload['scheduledFlight']=thisFlight
#find equipment and airline info for this flight
thisFlight['equipmentInfo']=findEquipment(thisFlight['flightEquipmentIataCode'])
thisFlight['airlineInfo']=findAirline(thisFlight['carrierFsCode'])
else:
raise Exception("Unable to find flight corresponding to flight {0}".format(flight))
payload.pop('request')
return payload
flightsCache={}
def getFlights(airport, date, hour):
parts=date.split("-")
if len(parts)!=3:
raise ValueError("Invalid date {0}".format(date))
(year,month,day) = (parts[0], parts[1], parts[2])
#check the cache first
if len(hour)>2:
hour = hour[:2]
key = airport+date+str(hour)
if key in flightsCache:
return flightsCache[key]
path = "schedules/rest/v1/json/from/{departureAirportCode}/departing/{year}/{month}/{day}/{hourOfDay}"
url = buildUrl(path, departureAirportCode=airport, year=year,month=month,day=day,hourOfDay=hour)
myLogger.debug("Calling getFlights with url: " + url)
response = requests.get( url )
if response.status_code != 200:
msg = "Error while trying to get flights for airport {0} at hour {1}. Error is {2}".format(airport, hour, str(response.reason))
myLogger.error(msg)
raise requests.HTTPError(msg, response=response)
payload = response.json()
if "error" in payload:
msg = "Error while trying to get flights for airport {0} at hour {1}. Error is {2}".format(airport, hour, payload['error']['errorMessage'])
myLogger.error(msg)
raise requests.HTTPError(msg, response = response)
#convert departuretimes from local to UTC
def findAirport(airportCode):
for airport in payload['appendix']['airports']:
if airport['fs'] == airportCode:
return airport
for flight in payload['scheduledFlights']:
airport = findAirport( flight["departureAirportFsCode"] )
if airport is not None:
dt = parse(flight["departureTime"])
flight["departureTimeUTC"] = str(pytz.timezone(airport['timeZoneRegionName']).localize(dt).astimezone (pytz.utc))
else:
myLogger.error("Unable to resolve airport code {0} because it is not in the appendix returned by flightstats".format(flight["departureAirportFsCode"]))
ret = {
"scheduledFlights": payload['scheduledFlights'],
"airports": payload['appendix']['airports'] if 'airports' in payload['appendix'] else [],
"airlines": payload['appendix']['airlines'] if 'airlines' in payload['appendix'] else []
}
flightsCache[key]=ret
return ret
| [
"datetime.datetime.utcfromtimestamp",
"pytz.timezone",
"dateutil.parser.parse",
"os.environ.get",
"requests.get",
"requests.HTTPError",
"pixiedust.getLogger"
] | [((902, 931), 'pixiedust.getLogger', 'pixiedust.getLogger', (['__name__'], {}), '(__name__)\n', (921, 931), False, 'import pixiedust\n'), ((1765, 1782), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1777, 1782), False, 'import requests\n'), ((3106, 3123), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3118, 3123), False, 'import requests\n'), ((6750, 6767), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6762, 6767), False, 'import requests\n'), ((1024, 1047), 'os.environ.get', 'os.environ.get', (['"""appId"""'], {}), "('appId')\n", (1038, 1047), False, 'import os\n'), ((1085, 1109), 'os.environ.get', 'os.environ.get', (['"""appKey"""'], {}), "('appKey')\n", (1099, 1109), False, 'import os\n'), ((1973, 2015), 'requests.HTTPError', 'requests.HTTPError', (['msg'], {'response': 'response'}), '(msg, response=response)\n', (1991, 2015), False, 'import requests\n'), ((2178, 2225), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(date / 1000)'], {}), '(date / 1000)\n', (2212, 2225), False, 'import datetime\n'), ((3321, 3363), 'requests.HTTPError', 'requests.HTTPError', (['msg'], {'response': 'response'}), '(msg, response=response)\n', (3339, 3363), False, 'import requests\n'), ((6984, 7026), 'requests.HTTPError', 'requests.HTTPError', (['msg'], {'response': 'response'}), '(msg, response=response)\n', (7002, 7026), False, 'import requests\n'), ((7275, 7317), 'requests.HTTPError', 'requests.HTTPError', (['msg'], {'response': 'response'}), '(msg, response=response)\n', (7293, 7317), False, 'import requests\n'), ((2342, 2383), 'pytz.timezone', 'pytz.timezone', (["info['timeZoneRegionName']"], {}), "(info['timeZoneRegionName'])\n", (2355, 2383), False, 'import pytz\n'), ((7695, 7725), 'dateutil.parser.parse', 'parse', (["flight['departureTime']"], {}), "(flight['departureTime'])\n", (7700, 7725), False, 'from dateutil.parser import parse\n'), ((1431, 1447), 'dateutil.parser.parse', 'parse', (['sDateTime'], {}), '(sDateTime)\n', (1436, 1447), False, 'from dateutil.parser import parse\n'), ((1386, 1419), 'pytz.timezone', 'pytz.timezone', (['timeZoneRegionName'], {}), '(timeZoneRegionName)\n', (1399, 1419), False, 'import pytz\n'), ((7771, 7815), 'pytz.timezone', 'pytz.timezone', (["airport['timeZoneRegionName']"], {}), "(airport['timeZoneRegionName'])\n", (7784, 7815), False, 'import pytz\n')] |
from ..models import Teacher, Student, ClassRoom, User
import datetime
'''
Create classrooms that teaches two subjects for all teachers.
Each student will study 4 different classes.
Each class will hold 15 students for now.
'''
#
#
# def create_classes_and_class_rooms():
# teachers = Teacher.objects.filter(school_id=1).prefetch_related('subjects')
# total_classes = 60
# # class_room = ClassRoom.objects.create(name='class 1', school_id=1)
# classes_added = 0
# class_index = 1
# classes = []
# for teacher in teachers:
# for subject in teacher.subjects.all():
# cls = Class(name='Class '+str(class_index), school_id=1)
# cls.subject = subject
# classes.append(cls)
# class_index +=1
# classes_added += 1
# Class.manager.bulk_create(classes)
def create_students(standard, classes, student_index):
student_count = 15
iter_index = 1
current_index = student_index
students = []
all_mappings = []
for i in range(student_count):
student = Student()
student.doj = datetime.date(1970, 1, 1)
# user = User()
# user.name = 'Student '+str(current_index)
# user.save()
student.name = 'Student '+str(current_index)
student.ranking = i
student.roll_num = i
student.standard = standard
student.cls = classes
all_mappings.append(student)
current_index += 1
Student.manager.bulk_create(all_mappings)
return current_index
def create_class_mappings():
classes = ClassRoom.objects.all()
class_index = 0
standard = 1
student_index=1
class_count = ClassRoom.objects.all()
for cls in classes:
student_index = create_students(standard, cls, student_index)
# class_index +=4
standard += 1
def run():
# create_classes_and_class_rooms()
create_class_mappings() | [
"datetime.date"
] | [((1103, 1128), 'datetime.date', 'datetime.date', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (1116, 1128), False, 'import datetime\n')] |
# projected by egoriwe999
# my telegram - https://t.me/egoriwe999
# my telegram channel - https://t.me/whitehacker999
# Данный вирус написан и опубликован с целью изучения компьютерной безопасности.
# Не запускать на своем устройсве , рекомендую запускать на виртуальной машине\песочнице
import random
import os
import threading
from multiprocessing import Process
import pyAesCrypt # Данный модуль необходимо скачать (pip3 install pyAesCrypt)
# Диски которые будут шифроваться , вы их можете зашифровать сколько угодно , искользуем dir_(Диск)
dir_e = 'E:/'
dir_f = 'F:/'
# Символы, цифры и буквы для ввода пароля
chars = '+-/*!@#$&=<>abcdifghijklmnopqrstuvwxyzABCDIFGHIJKLMNOPQRSTUVWXYZ1234567890'
# Генерируем наш пароль
def generate_pass():
password = '<PASSWORD>'
for i in range(128):
password += random.choice(chars)
return password
# Теперь переходим к шифрованию
def crypt_file(file, password):
try:
bufferSize = 512 * 1024
pyAesCrypt.encryptFile(str(file), str(file) + ".zvp",
password, bufferSize)
os.remove(file)
except:
pass
# Проходимся по директориям дисков
def crypt_disk(dir, password):
try:
for file in os.listdir(dir):
if os.path.isdir(dir + '\\' + file):
crypt_disk(dir + '\\' + file, password)
if os.path.isfile(dir + '\\' + file):
try:
crypt_file(dir + '\\' + file, password)
except:
pass
except OSError:
pass
# Потоки
def crypting(dir, password):
pycrypt = threading.Thread(target=crypt_disk, args=(dir, password))
pycrypt.start()
# Используем процессы
def crypting(dir, password):
pycrypt = Process(target=crypt_disk, args=(dir, password))
pycrypt.start()
try:
crypting(dir_e, password=generate_pass())
#crypting(dir_f, password=generate_pass())
crypt_disk(dir_e, password=generate_pass()) # запускаем без потоков и процессов
#crypt_disk(dir_f, password=generate_pass()) # диски будут шифроваться по очереди
except Exception as e:
pass | [
"random.choice",
"os.listdir",
"multiprocessing.Process",
"os.path.isfile",
"os.path.isdir",
"threading.Thread",
"os.remove"
] | [((1673, 1730), 'threading.Thread', 'threading.Thread', ([], {'target': 'crypt_disk', 'args': '(dir, password)'}), '(target=crypt_disk, args=(dir, password))\n', (1689, 1730), False, 'import threading\n'), ((1822, 1870), 'multiprocessing.Process', 'Process', ([], {'target': 'crypt_disk', 'args': '(dir, password)'}), '(target=crypt_disk, args=(dir, password))\n', (1829, 1870), False, 'from multiprocessing import Process\n'), ((849, 869), 'random.choice', 'random.choice', (['chars'], {}), '(chars)\n', (862, 869), False, 'import random\n'), ((1128, 1143), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1137, 1143), False, 'import os\n'), ((1272, 1287), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1282, 1287), False, 'import os\n'), ((1305, 1337), 'os.path.isdir', 'os.path.isdir', (["(dir + '\\\\' + file)"], {}), "(dir + '\\\\' + file)\n", (1318, 1337), False, 'import os\n'), ((1412, 1445), 'os.path.isfile', 'os.path.isfile', (["(dir + '\\\\' + file)"], {}), "(dir + '\\\\' + file)\n", (1426, 1445), False, 'import os\n')] |
from django.utils.translation import ugettext_lazy as _
from rest_framework import response, status
from rest_framework.decorators import action
from core import models as core
from core.api.mixins import (
CompanyReadOnlyViewSet, CompanyViewSet, NestedReadOnlyViewset
)
from core.constants import LEVEL_SUCCESS
from panel.api import serializers, filters
class EventViewSet(CompanyViewSet):
filter_class = filters.EventFilterSet
model = core.Event
permissions_required = 'core:view_event'
queryset = model.objects.all()
serializer_class = serializers.EventSerializer
class LinkViewSet(NestedReadOnlyViewset):
model = core.Link
parent_model = core.Message
parent_relation_field = 'message'
permission_required = 'core:view_link'
queryset = model.objects.all()
serializer_class = serializers.LinkSerializer
class MessageViewSet(CompanyReadOnlyViewSet):
model = core.Message
permission_required = 'core:view_message'
queryset = model.objects.all()
class NotificationViewSet(CompanyViewSet):
filter_class = filters.NotificationFilterSet
model = core.Notification
queryset = model.objects.all()
bypass_permissions = True
serializer_class = serializers.NotificationSerializer
def delete(self, *args, **kwargs):
return response.Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(user=self.request.user)
def update(self, *args, **kwargs):
return response.Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
@action(methods=['post'], detail=False, url_path='set-all-read')
def set_all_read(self, request, *args, **kwargs):
self.model.objects.set_all_read(
company=request.company,
user=request.user
)
return response.Response(
dict(detail=_("All notifications were marked as read.")),
status=status.HTTP_202_ACCEPTED
)
@action(methods=['post'], detail=True, url_path='set-read')
def set_read(self, request, *args, **kwargs):
obj = self.get_object()
level, msg = obj.set_read()
return response.Response(
dict(detail=msg),
status=(
status.HTTP_202_ACCEPTED
if level is LEVEL_SUCCESS else status.HTTP_400_BAD_REQUEST
)
)
class VisitViewSet(NestedReadOnlyViewset):
company_field = 'link__company'
model = core.Visit
parent_model = core.Link
parent_relation_field = 'link'
permission_required = 'core:view_visit'
queryset = model.objects.all()
serializer_class = serializers.VisitSerializer
| [
"rest_framework.response.Response",
"rest_framework.decorators.action",
"django.utils.translation.ugettext_lazy"
] | [((1612, 1675), 'rest_framework.decorators.action', 'action', ([], {'methods': "['post']", 'detail': '(False)', 'url_path': '"""set-all-read"""'}), "(methods=['post'], detail=False, url_path='set-all-read')\n", (1618, 1675), False, 'from rest_framework.decorators import action\n'), ((2013, 2071), 'rest_framework.decorators.action', 'action', ([], {'methods': "['post']", 'detail': '(True)', 'url_path': '"""set-read"""'}), "(methods=['post'], detail=True, url_path='set-read')\n", (2019, 2071), False, 'from rest_framework.decorators import action\n'), ((1315, 1375), 'rest_framework.response.Response', 'response.Response', ([], {'status': 'status.HTTP_405_METHOD_NOT_ALLOWED'}), '(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n', (1332, 1375), False, 'from rest_framework import response, status\n'), ((1545, 1605), 'rest_framework.response.Response', 'response.Response', ([], {'status': 'status.HTTP_405_METHOD_NOT_ALLOWED'}), '(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n', (1562, 1605), False, 'from rest_framework import response, status\n'), ((1907, 1950), 'django.utils.translation.ugettext_lazy', '_', (['"""All notifications were marked as read."""'], {}), "('All notifications were marked as read.')\n", (1908, 1950), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import logging
from voluptuous import UNDEFINED, Schema
from configpp.tree.custom_item_factories import DateTimeLeafFactory, Enum, EnumLeafFactory, LeafBaseFactory, datetime
from configpp.tree.exceptions import ConfigTreeBuilderException
from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory
from configpp.tree.items import LeafBase
from configpp.tree.settings import Settings
logger = logging.getLogger(__name__)
class Tree():
def __init__(self, settings: Settings = None):
self._settings = settings or Settings()
self._root = None # type: NodeFactory
self._extra_items = {}
self._leaf_factory_registry = {
datetime: DateTimeLeafFactory,
Enum: EnumLeafFactory,
LeafBase: LeafBaseFactory,
} # type: LeafFactoryRegistry
def set_root(self, value: NodeFactory):
self._root = value
def register_leaf_factory(self, type_: type, factory: LeafFactory):
self._leaf_factory_registry[type_] = factory
def build_schema(self) -> Schema:
if self._root is None:
raise ConfigTreeBuilderException("There is no root!")
# TODO: resolve this problem somehow else (AttrNodeFactory gives back Schema but the DictNodeFactory and the ListNodeFactory dont)
schema = self._root.create_schema()
if not callable(schema):
schema = Schema(schema)
return schema
def load(self, raw_data: dict):
schema = self.build_schema()
data = schema(raw_data)
return self._root.process_value(data)
def dump(self, data) -> dict:
return self._root.dump(data)
def root(self, excluded_attributes: list = None):
if self._root is not None:
logger.warning("Root node has been set already to: %s", self._root)
def decor(cls):
self._root = AttrNodeFactory(cls, self._settings, self._leaf_factory_registry, excluded_attributes,
external_item_registry = self._extra_items)
return cls
return decor
def dict_root(self, key_type, value_type, default = UNDEFINED):
if self._root is not None:
logger.warning("Root node has been set already to: %s", self._root)
def decor(cls):
self._root = DictNodeFactory(key_type, value_type, self._settings, self._leaf_factory_registry, default)
return cls
return decor
def list_root(self, value_types, default = UNDEFINED):
if self._root is not None:
logger.warning("Root node has been set already to: %s", self._root)
def decor(cls):
self._root = ListNodeFactory(value_types, self._settings, self._leaf_factory_registry, default)
return cls
return decor
def node(self, excluded_attributes: list = None, default = UNDEFINED):
def wrapper(cls: type):
node = AttrNodeFactory(cls, self._settings, self._leaf_factory_registry, excluded_attributes, default,
external_item_registry = self._extra_items)
cls._configpp_tree_item = node
return cls
return wrapper
def dict_node(self, key_type, value_type, default = UNDEFINED):
return DictNodeFactory(key_type, value_type, self._settings, self._leaf_factory_registry, default)
def list_node(self, value_types: list, default = UNDEFINED):
"""TODO"""
return ListNodeFactory(value_types, self._settings, self._leaf_factory_registry, default)
def leaf(self, validator = None, default = UNDEFINED):
return LeafFactory(validator, default)
| [
"logging.getLogger",
"configpp.tree.item_factory.DictNodeFactory",
"configpp.tree.exceptions.ConfigTreeBuilderException",
"voluptuous.Schema",
"configpp.tree.item_factory.ListNodeFactory",
"configpp.tree.settings.Settings",
"configpp.tree.item_factory.LeafFactory",
"configpp.tree.item_factory.AttrNodeFactory"
] | [((471, 498), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (488, 498), False, 'import logging\n'), ((3350, 3446), 'configpp.tree.item_factory.DictNodeFactory', 'DictNodeFactory', (['key_type', 'value_type', 'self._settings', 'self._leaf_factory_registry', 'default'], {}), '(key_type, value_type, self._settings, self.\n _leaf_factory_registry, default)\n', (3365, 3446), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n'), ((3542, 3628), 'configpp.tree.item_factory.ListNodeFactory', 'ListNodeFactory', (['value_types', 'self._settings', 'self._leaf_factory_registry', 'default'], {}), '(value_types, self._settings, self._leaf_factory_registry,\n default)\n', (3557, 3628), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n'), ((3700, 3731), 'configpp.tree.item_factory.LeafFactory', 'LeafFactory', (['validator', 'default'], {}), '(validator, default)\n', (3711, 3731), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n'), ((603, 613), 'configpp.tree.settings.Settings', 'Settings', ([], {}), '()\n', (611, 613), False, 'from configpp.tree.settings import Settings\n'), ((1174, 1221), 'configpp.tree.exceptions.ConfigTreeBuilderException', 'ConfigTreeBuilderException', (['"""There is no root!"""'], {}), "('There is no root!')\n", (1200, 1221), False, 'from configpp.tree.exceptions import ConfigTreeBuilderException\n'), ((1459, 1473), 'voluptuous.Schema', 'Schema', (['schema'], {}), '(schema)\n', (1465, 1473), False, 'from voluptuous import UNDEFINED, Schema\n'), ((1939, 2071), 'configpp.tree.item_factory.AttrNodeFactory', 'AttrNodeFactory', (['cls', 'self._settings', 'self._leaf_factory_registry', 'excluded_attributes'], {'external_item_registry': 'self._extra_items'}), '(cls, self._settings, self._leaf_factory_registry,\n excluded_attributes, external_item_registry=self._extra_items)\n', (1954, 2071), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n'), ((2388, 2484), 'configpp.tree.item_factory.DictNodeFactory', 'DictNodeFactory', (['key_type', 'value_type', 'self._settings', 'self._leaf_factory_registry', 'default'], {}), '(key_type, value_type, self._settings, self.\n _leaf_factory_registry, default)\n', (2403, 2484), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n'), ((2748, 2834), 'configpp.tree.item_factory.ListNodeFactory', 'ListNodeFactory', (['value_types', 'self._settings', 'self._leaf_factory_registry', 'default'], {}), '(value_types, self._settings, self._leaf_factory_registry,\n default)\n', (2763, 2834), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n'), ((3002, 3143), 'configpp.tree.item_factory.AttrNodeFactory', 'AttrNodeFactory', (['cls', 'self._settings', 'self._leaf_factory_registry', 'excluded_attributes', 'default'], {'external_item_registry': 'self._extra_items'}), '(cls, self._settings, self._leaf_factory_registry,\n excluded_attributes, default, external_item_registry=self._extra_items)\n', (3017, 3143), False, 'from configpp.tree.item_factory import AttrNodeFactory, DictNodeFactory, LeafFactory, LeafFactoryRegistry, ListNodeFactory, NodeFactory\n')] |
import requests
import os
class ApiBase(object):
"""
This class provides the basis for every remote entity. The resource simply
specifies its ``attributes`` and path, as well as provides setters for the
attributes.
It might also provide custom methods as well as a custom initiator. It is
mandatory that the inheriting entity calls this class's init method as
well::
ApiBase.__init__(self, api_base, _id=_id)
"""
_id = None
api_base = None
def __init__(self, api_base, _id=None):
"""
Creates a new entitiy. If ``_id`` is provided, an already existing
remote entity is fetched, otherwise a new one is created.
:param api_base: URI to the Pythia API instance
:param _id: if given, attempts to fetch an already existing model.
"""
self.api_base = api_base
# init attributes to prevent "no such attribute" errors.
for a in self._attributes:
setattr(self, a, None)
self.check_api_compatibility()
if not _id:
self._create_resource()
else:
self._id = _id
self._fetch_self()
def _fetch_self(self):
"""
Fetches the resource from the remote and sets all fields which are
returned to synch the object with the remote.
"""
res = requests.get(self._resource_path())
res.raise_for_status
data = res.json()
assert '_id' in data, 'Invalid _id'
for attribute in self._attributes:
if attribute.startswith('_'):
_attribute = attribute[1:]
else:
_attribute = attribute
if _attribute in data:
setattr(self, attribute, data[_attribute])
def _update_self(self, payload):
"""
Sends all current attribute values to the remote.
"""
res = requests.put(self._resource_path(), data=payload)
def _create_resource(self):
"""
Creates the resource on the remote. The remote returns a dict with
the initial/sent fields, which, if in ``_attributes`` listed, are set
for the current object, thus synched with the remote.
"""
res = requests.post(self._resource_path())
res.raise_for_status
data = res.json()
assert '_id' in data, 'Remote entity could not be registered'
self._id = data['_id']
for attribute in self._attributes:
if attribute.startswith('_'):
_attribute = attribute[1:]
else:
_attribute = attribute
if _attribute in data:
setattr(self, attribute, data[_attribute])
def _join_url(self, *args):
"""
Joins an arbritary number of URI fragments
:param: One or more URI fragments that ought to be joined with a single
slash in between
:return: Joined URI
"""
return os.path.join(*args)
def check_api_compatibility(self):
"""
Checks the API version to guarantee compatibility.
"""
res = requests.get(self.api_base)
assert "v1" in res.text, "incompatible or missing API version"
def _resource_path(self):
"""
Returns the full path where the resource can be accessed via the API.
"""
raise NotImplementedError()
| [
"os.path.join",
"requests.get"
] | [((2987, 3006), 'os.path.join', 'os.path.join', (['*args'], {}), '(*args)\n', (2999, 3006), False, 'import os\n'), ((3144, 3171), 'requests.get', 'requests.get', (['self.api_base'], {}), '(self.api_base)\n', (3156, 3171), False, 'import requests\n')] |
# ---------------------------------------------------------------------------
# Created By: The ArcGIS Data Reviewer Team
# Copyright 2020 Esri
# Licensed under the Apache License, Version 2.0 (the "License"); You
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# A copy of the license is available in the repository's
# LICENSE file.
# Description:
# Copies the records from the selected Reviewer Sessions that meet the optional
# SQL Expression into the chosen output Reviewer Workspace. Output workspace
# can be the same as the input workspace. You have the option to create a
# logfile that records information about the imported records. You also have
# the option to delete the copied records from the input Reviewer Workspace.
# Disclaimer:
# Due to the complex relationship of tables within the Reviewer Workspace,
# modifying the content of this script is not recommended. If you would like
# changes or enhancements to this script, please comment on the template
# through the Resource Center.
# Minimum ArcGIS Version: 10.6
# Last Modified: 11/27/2019
# ---------------------------------------------------------------------------
# Import necessary modules
import arcpy
import os
import datetime
import time
import sys
import uuid
from arcpy import env
#-------------------------------------
# get full path to tables - including qualified table name
# -----------------------------------
def getFullPath(in_workspace, table_name, no_exist_error=False):
full_path = ''
"""In 10.6, the walk function does not return any tables if
connecting to a SQL Express database as a database server. However,
it works when using a .sde connection. This is a workaround"""
if arcpy.Describe(in_workspace).workspaceType == 'RemoteDatabase' and not str(in_workspace).upper().endswith('.SDE'):
## arcpy.AddMessage("list")
arcpy.env.workspace = in_workspace
# table_name will either be a stand-alone table
# list the tables
tables = arcpy.ListTables()
for table in tables:
## arcpy.AddMessage(table)
#find the table that ends with the table name
# this ignores table qualification and GDB_ name changes
if table.upper().endswith(table_name.upper()):
full_path = os.path.join(in_workspace, table)
break
# if table_name does not exist, check to see if it is one of the
# reviewer geometries in the REVDATASET
if full_path == '':
fds = arcpy.ListDatasets("*REVDATASET", "Feature")
for fd in fds:
fcs = arcpy.ListFeatureClasses("", "", fd)
for fc in fcs:
if fc.endswith(table_name):
full_path = os.path.join(in_workspace, fd, fc)
break
else:
## arcpy.AddMessage('walk')
walk = arcpy.da.Walk(in_workspace)
for dirpath, dirnames, filenames in walk:
for name in filenames:
if name.upper().endswith(table_name.upper()) :
full_path = (os.path.join(dirpath, name))
break
# if the table cannot be found in the workspace
if no_exist_error and (full_path == '' or not arcpy.Exists(full_path)):
arcpy.AddError("Cannot find table {} in workspace {}. Please ensure workspace is a valid Reviewer workspace.".format(table_name, in_workspace))
sys.exit(0)
## arcpy.AddMessage(full_path)
return full_path
# ---------------------------------------------------------------------------
# This function determines if the version of the Reviewer Workspace
# ---------------------------------------------------------------------------
def DetermineVersion(RevWorkspace):
version = 'Pre10.6'
VERSIONTABLE = getFullPath(RevWorkspace, "REVWORKSPACEVERSION")
# if the version table exists, the database is at least a 10.6 database
if VERSIONTABLE != '' :
schema_version = [row[0] for row in arcpy.da.SearchCursor(VERSIONTABLE, ['SCHEMAHASH'])]
schema_version = set(schema_version)
if len(schema_version) != 1:
arcpy.AddWarning('Reviewer Version is inconsistent')
if '{DDC860BD-4C40-302F-B5BE-3D0EDA623B6B}' in schema_version:
version = '10.6'
else:
version = 'Unsupported'
else:
main_table = getFullPath(RevWorkspace, "REVTABLEMAIN", True)
fields =[x.name.upper() for x in arcpy.ListFields(main_table)]
if "LIFECYCLEPHASE" not in fields:
version = 'Pre10.3'
## arcpy.AddMessage('database {} is version {}'.format(RevWorkspace, version))
return version
# ---------------------------------------------------------------------------
# This function determines if the Spatial Reference of Input and Output match
# ---------------------------------------------------------------------------
def CompareSR(InFeatures, OutFeatures):
# Get the spatial reference name from the first feature class
InDesc = arcpy.Describe(InFeatures)
InSR = InDesc.spatialReference.name
# Get the spatial reference name from the second feature class
OutDesc = arcpy.Describe(OutFeatures)
OutSR = OutDesc.spatialReference.name
# Do the feature class names match?
if InSR == OutSR:
match = True
else:
match = False
arcpy.AddWarning("Spatial reference of input and output Reveiwer workspaces do not match. Reviewer geometries will be projected")
arcpy.AddWarning("Input Spatial Reference: {}".format(InSR))
arcpy.AddWarning("Output Spatial Reference: {}".format(OutSR))
return match
# -----------------------------------------------------------
# This function is for writing lists of values to the logfile
# also gathers summary information about each dictionary
# -----------------------------------------------------------
def SummarizeDictionaries(logfile, matches, summarydict):
if 'tableName' in matches:
name = matches.pop('tableName')
in_field_name = 'Input ID'
out_field_name = 'Output ID'
if "InIDField" in matches:
in_field_name = matches.pop("InIDField")
if "OutIDField" in matches:
out_field_name = matches.pop("OutIDField")
if len(matches) > 0:
if logfile != '':
logfile.write("\n{}...\n".format(name))
logfile.write(" {} - {} \n".format(in_field_name, out_field_name))
for InItem, OutItem in matches.items():
logfile.write(" {} - {}\n".format(InItem, OutItem))
summarydict[name] = str(len(matches))
return summarydict
# ------------------------------------------------------------------------------
# Copies reviewer geometry features to the output reviewer workspace and session
# ------------------------------------------------------------------------------
def CopyGeometryFeatures(inFeatures, outFeatures, sessionWhereClause, idMap, outSessionID, matchDict):
# determine fields from input feature class
in_names =[x.name for x in arcpy.ListFields(inFeatures)]
if "LINKID" in in_names:
in_link_name = "LINKID"
else:
in_link_name = "LINKGUID"
if 'BITMAP' in in_names:
in_fields = ("OID@", in_link_name, 'BITMAP')
else:
in_fields = ("OID@", in_link_name, "SHAPE@")
## arcpy.AddMessage(in_fields)
# determine fields from output feature class
out_names =[x.name for x in arcpy.ListFields(outFeatures)]
if "LINKID" in out_names:
out_link_name = "LINKID"
else:
out_link_name = "LINKGUID"
if 'BITMAP' in out_names:
out_fields = (out_link_name, "SESSIONID", 'BITMAP')
else:
out_fields = (out_link_name, "SESSIONID", "SHAPE@")
## arcpy.AddMessage(out_fields)
matchDict["InIDField"] = in_link_name
matchDict["OutIDField"] = out_link_name
# open insert cursor
insert = arcpy.da.InsertCursor(outFeatures, out_fields)
try:
with arcpy.da.SearchCursor(inFeatures, in_fields, sessionWhereClause) as cursor:
for row in cursor:
# get linkID value for record
linkID = row[1]
## arcpy.AddMessage(linkID)
# if the link ID is in the idMap, then the record for this geometry
# was ported to the target reviewer workspace
if linkID in idMap:
outLinkID = idMap[linkID]
## arcpy.AddMessage(outLinkID)
# add new row to output feature class
new_row = [outLinkID, outSessionID, row[2]]
outID = insert.insertRow(new_row)
matchDict[linkID] = outLinkID
## inIDs.append(row[0])
## outIDs.append(outID)
finally:
del insert
# ---------------------------------------------------
# Makes a SQL IN clause from a list of values
# ---------------------------------------------------
def MakeInClause(inFC, intFieldName, inList):
whereClause = None
try:
if len(inList) >= 1:
# determine field type
fields = arcpy.ListFields(inFC)
field_type = None
for field in fields:
if field.name.upper() == intFieldName.upper():
field_type = field.type
if field_type:
csv = ""
if field_type in ('Double', 'Integer', 'OID', 'Single', 'SmallInteger'):
for i in inList:
csv += "{0},".format(i)
# Remove trailing comma
csv = csv[:-1]
elif field_type in ('Date', 'GlobalID', 'OID', 'Guid', 'String'):
for i in inList:
csv += "'{0}',".format(i)
# Remove trailing comma
csv = csv[:-1]
else:
arcpy.AddWarning('Query field {} has an unsupported field type {}. Query will not be created.'.format(intFieldName, field_type))
if not csv == "":
whereClause = '{0} IN ({1})'.format(arcpy.AddFieldDelimiters(inFC, intFieldName), csv)
else:
arcpy.AddMessage("Cannot find field {} in {}. Unable to create query.".format(intFieldName, inFC))
else:
arcpy.AddWarning("Unable to create query for field {}. No values to query.".format(intFieldName))
finally:
## arcpy.AddMessage(whereClause)
return whereClause
# ------------------------------------------------------------------
# Deletes rows from an input table/feature class given a list of IDs
# ------------------------------------------------------------------
def DeleteRows(inWorkspace, dictionary):
del_count = 0
dict_count = 0
table = None
edit = arcpy.da.Editor(inWorkspace)
#In Python 3, dict.keys() returns a dict_keys object (a view of the dictionary) which does not have remove method;
# unlike Python 2, where dict.keys() returns a list object.
if arcpy.GetInstallInfo()['ProductName'] == 'Desktop':
idList = dictionary.keys()
else:
idList = list(dictionary)
try:
if 'tableName' in dictionary:
table = dictionary['tableName']
if "InIDField" in dictionary:
field = dictionary['InIDField']
else:
field = 'OID@'
if 'tableName' in idList:
idList.remove('tableName')
if "InIDField" in idList:
idList.remove("InIDField")
if "OutIDField" in idList:
idList.remove("OutIDField")
dict_cnt = len(idList)
if table and len(idList) >= 1:
table_path = getFullPath(inWorkspace, table)
if table_path != '':
# Start an edit session
desc = arcpy.Describe(table_path)
if desc.canVersion == 1 and desc.isVersioned == 1:
edit.startEditing(False, True)
edit.startOperation()
else:
edit.startEditing(False, False)
edit.startOperation()
arcpy.AddMessage("Deleting records from {}".format(table_path))
with arcpy.da.UpdateCursor(table_path, field) as cursor:
for row in cursor:
if row[0] in idList:
idList.remove(row[0])
del_count += 1
cursor.deleteRow()
except Exception as e:
if edit.isEditing:
edit.stopEditing(False)
arcpy.AddError('{}'.format(e))
tb = sys.exc_info()[2]
arcpy.AddError("Failed at Line %i" % tb.tb_lineno)
finally:
if del_count != dict_cnt:
arcpy.AddWarning("Copied {} records from {} but deleted {} records".format(dict_cnt, table, del_count))
if edit.isEditing:
edit.stopEditing(True)
# -----------------------------
# Update REVCHECKRUNTABLE and REVBATCHRUNTABLE records
# -----------------------------
def CopyRunTables(Reviewer_Workspace, Out_Reviewer_Workspace, SessionClause, OutSessionID, CheckRunMap, BatchRunMatches, CheckRunMatches):
try:
REVCHECKRUN = getFullPath(Reviewer_Workspace, "REVCHECKRUNTABLE")
REVBATCHRUN = getFullPath(Reviewer_Workspace, "REVBATCHRUNTABLE")
Out_REVCHECKRUN = getFullPath(Out_Reviewer_Workspace, "REVCHECKRUNTABLE")
Out_REVBATCHRUN = getFullPath(Out_Reviewer_Workspace, "REVBATCHRUNTABLE")
if REVCHECKRUN != '' and REVBATCHRUN != '' and Out_REVCHECKRUN != '' and Out_REVBATCHRUN != '':
CheckRunIDsSelected = CheckRunMap.keys()
# See if there are CHECKRUNIDs that did not return errors
with arcpy.da.SearchCursor(REVCHECKRUN, ["CHECKRUNID"], SessionClause) as cursor:
for row in cursor:
# if the check run ID is in the sessions but not copied, skip the id
if not row[0] in CheckRunIDsSelected:
check_guid = '{' + str(uuid.uuid4()).upper() + '}'
CheckRunMap[row[0]] = check_guid
# Get a list of the batch run IDs for the chosen sessions
BatchRunIDs = []
CheckRunIDs = CheckRunMap.keys()
with arcpy.da.SearchCursor(REVCHECKRUN, ['CHECKRUNID', 'BATCHRUNID'], SessionClause) as cursor:
for row in cursor:
if row[0] in CheckRunIDs:
BatchRunIDs.append(row[1])
BatchRunIDs = list(set(BatchRunIDs))
# ------------------------
# Copy REVBATCHRUN records
# ------------------------
if len(BatchRunIDs) > 0:
# Get the fields from the input and output databases
batchrun_fieldnames = [x.name for x in arcpy.ListFields(REVBATCHRUN)]
out_batchrun_fieldnames = [x.name for x in arcpy.ListFields(Out_REVBATCHRUN)]
REVBATCHRUN_FIELDS = sorted(batchrun_fieldnames)
OUT_REVBATCHRUN_FIELDS = sorted(batchrun_fieldnames)
REVBATCHRUN_RECORDID_INDEX = REVBATCHRUN_FIELDS.index("RECORDID")
in_id_field = 'GLOBALID'
out_id_field = 'GLOBALID'
if in_id_field not in batchrun_fieldnames:
in_id_field = 'ID'
REVBATCHRUN_UID_INDEX = REVBATCHRUN_FIELDS.index(in_id_field)
# at 10.6 the field named GlobalID changed to be ID
if out_id_field not in out_batchrun_fieldnames:
out_id_field = 'ID'
OUT_REVBATCHRUN_FIELDS.remove(in_id_field)
OUT_REVBATCHRUN_FIELDS.insert(REVBATCHRUN_UID_INDEX, out_id_field)
# Find the batch run records that related to the copied check run records
whereClause = MakeInClause(REVBATCHRUN, in_id_field, BatchRunIDs)
# Used to track the new GlobalIDs
batchRunOrigGlobalIDsByNewRecordID = {}
BatchRunMatches["InIDField"] = "RECORDID"
BatchRunMatches["OutIDField"] = "RECORDID"
insert = arcpy.da.InsertCursor(Out_REVBATCHRUN, OUT_REVBATCHRUN_FIELDS)
newGlobalIDsByOrigGlobalID = {}
try:
with arcpy.da.SearchCursor(REVBATCHRUN, REVBATCHRUN_FIELDS, whereClause) as cursor:
for row in cursor:
rowValues = list(row)
# get the original values
batchRunRecordID = row[REVBATCHRUN_RECORDID_INDEX]
origGlobalID = row[REVBATCHRUN_UID_INDEX]
# if the output field is named ID, will not auto populate
# new guid. Create a new guid
if out_id_field == 'ID':
newGlobalID = '{' + str(uuid.uuid4()).upper() + '}'
rowValues[REVBATCHRUN_UID_INDEX] = newGlobalID
newGlobalIDsByOrigGlobalID[origGlobalID] = newGlobalID
# insert a new row
newRecordID = insert.insertRow((rowValues))
# create lists and dict to make old and new values
BatchRunMatches[batchRunRecordID] = newRecordID
# if the field is GlobalID, a new guid was autogenerated
# need to do extra steps to map to new GUID. Get list of record ID
if out_id_field == 'GLOBALID':
batchRunOrigGlobalIDsByNewRecordID[newRecordID] = origGlobalID
finally:
del insert
if out_id_field == 'GLOBALID' and len(batchRunOrigGlobalIDsByNewRecordID) >= 1:
outBatchRunRecordIDs = batchRunOrigGlobalIDsByNewRecordID.keys()
# Get a map of original GlobalIDs to new GlobalIDs
whereClause = MakeInClause(Out_REVBATCHRUN, "RECORDID", outBatchRunRecordIDs)
with arcpy.da.SearchCursor(Out_REVBATCHRUN, ['RECORDID',out_id_field], whereClause) as cursor:
for row in cursor:
recID = row[0]
if recID in batchRunOrigGlobalIDsByNewRecordID:
origGlobalID = batchRunOrigGlobalIDsByNewRecordID[recID]
newGlobalID = row[1]
newGlobalIDsByOrigGlobalID[origGlobalID] = newGlobalID
else:
arcpy.AddWarning("Unable to find original GLOBALID for RECORDID {0}".format(recID))
# ------------------------
# Copy REVCHECKRUN records and update BatchRunID
# ------------------------
if len(CheckRunMap) >= 1:
REVCHECKRUN_FIELDS = [x.name for x in arcpy.ListFields(REVCHECKRUN)]
REVCHECKRUN_RECORDID_INDEX = REVCHECKRUN_FIELDS.index("RECORDID")
REVCHECKRUN_CHECKRUNID_INDEX = REVCHECKRUN_FIELDS.index("CHECKRUNID")
REVCHECKRUN_SESSIONID_INDEX = REVCHECKRUN_FIELDS.index("SESSIONID")
REVCHECKRUN_BATCHRUNID_INDEX = REVCHECKRUN_FIELDS.index("BATCHRUNID")
REVCHECKRUN_CHECKRUNPROPS_INDEX = REVCHECKRUN_FIELDS.index("CHECKRUNPROPERTIES")
insert = arcpy.da.InsertCursor(Out_REVCHECKRUN, REVCHECKRUN_FIELDS)
CheckRunMatches["InIDField"] = "RECORDID"
CheckRunMatches["OutIDField"] = "RECORDID"
try:
with arcpy.da.SearchCursor(REVCHECKRUN, REVCHECKRUN_FIELDS, SessionClause) as cursor:
for row in cursor:
rowValues = list(row)
# get check run ids for records
checkRunID = rowValues[REVCHECKRUN_CHECKRUNID_INDEX]
if checkRunID in CheckRunMap:
newCheckRunID = CheckRunMap[checkRunID]
rowValues[REVCHECKRUN_CHECKRUNID_INDEX] = newCheckRunID
batchRunRecordID = rowValues[REVCHECKRUN_RECORDID_INDEX]
# get batch run ids for records and add to list
batchRunID = rowValues[REVCHECKRUN_BATCHRUNID_INDEX]
if batchRunID in newGlobalIDsByOrigGlobalID:
rowValues[REVCHECKRUN_BATCHRUNID_INDEX] = newGlobalIDsByOrigGlobalID[batchRunID]
# update the session Id
rowValues[REVCHECKRUN_SESSIONID_INDEX] = OutSessionID
# Check BLOB field, BLOB fields cannot be set to None
if rowValues[REVCHECKRUN_CHECKRUNPROPS_INDEX] is None:
rowValues[REVCHECKRUN_CHECKRUNPROPS_INDEX] = bytearray()
# add row
newRecordID = insert.insertRow(rowValues)
CheckRunMatches[batchRunRecordID] = newRecordID
finally:
del insert
else:
arcpy.AddWarning("Unable to identify REVCHECKRUNTABLE or REVBATCHRUNTABLE in Reviewer Workspace "
" No records from these tables will be copied.")
except Exception as e:
arcpy.AddError('{}'.format(e))
tb = sys.exc_info()[2]
arcpy.AddError("Failed at Line %i" % tb.tb_lineno)
finally:
return CheckRunMatches, BatchRunMatches
# ------------------------------------------------------------------
# Deletes rows from an input table/feature class given a list of IDs
# ------------------------------------------------------------------
def main():
# Script arguments
Reviewer_Workspace = arcpy.GetParameterAsText(0)
Sessions = arcpy.GetParameterAsText(1)
RecordClause = arcpy.GetParameterAsText(3)
Out_Reviewer_Workspace = arcpy.GetParameterAsText(4)
Out_Exist_Session = arcpy.GetParameterAsText(5)
Delete = arcpy.GetParameterAsText(6)
createLog = arcpy.GetParameterAsText(7)
# Input sessions to Python list
SessionsList = Sessions.split(";")
# Strip any trailing/leading ' that might exist
for i,value in enumerate(SessionsList):
SessionsList[i] = value.strip("'")
# ----------------------------------------
# Check for version compatablity
# ----------------------------------------
in_version = DetermineVersion(Reviewer_Workspace)
out_version = DetermineVersion(Out_Reviewer_Workspace)
#Check compatablity of databases.
# check to see if either database is pre 10.3
if in_version == 'Pre10.3' or out_version == 'Pre10.3':
if in_version == 'Pre10.3':
db_compatability = 'Incompatable'
arcpy.AddError("Input workspace is out of date."
"Please upgrade the workspace {} to version 10.3 or higher".format(Reviewer_Workspace))
if out_version == 'Pre10.3':
db_compatability = 'Incompatable'
arcpy.AddError("Output workspace is out of date."
"Please upgrade the workspace {} to version 10.3 or higher".format(Out_Reviewer_Workspace))
# if one or more of the reviewer workspaces has a schema newer than 10.6 we
# do not know what has changed so we will not support it
elif in_version == 'Unsupported' or out_version == 'Unsupported':
if in_version == 'Unsupported':
db_compatability = 'Incompatable'
arcpy.AddError("The version of the reviewer workspace {} is not supported."
"The tool is designed for earlier version of the Reviewer Workspace Schema".format(Reviewer_Workspace))
if out_version == 'Unsupported':
db_compatability = 'Incompatable'
arcpy.AddError("The version of the reviewer workspace {} is not supported."
" The tool is designed for earlier version of the Reviewer Workspace Schema".format(Out_Reviewer_Workspace))
# if the output version is newer than the input version, will require upgrade
elif in_version == 'Pre10.6' and out_version != 'Pre10.6':
db_compatability = '10.6Upgrade'
# if the output version is before 10.6 and the input version is newer, cannot migrate records
elif in_version != 'Pre10.6' and out_version == 'Pre10.6':
db_compatability = 'Incompatable'
arcpy.AddError("Input workspace is newer than the output workspace."
"Please upgrade the output workspace {} to the latest version or select a different output workspace".format(Out_Reviewer_Workspace, in_version))
# if both versions are Pre 10.6
elif in_version == 'Pre10.6' and out_version == 'Pre10.6':
db_compatability = 'Old'
# if both versions are Post 10.6
else:
db_compatability = 'New'
# ----------------------------------------
# If versions are compatable, copy records
# ----------------------------------------
if db_compatability != 'Incompatable':
# --- Paths to tables in Input Reviewer workspace tables ---
REVTABLEMAIN = getFullPath(Reviewer_Workspace, "REVTABLEMAIN", True)
SessionsTable = getFullPath(Reviewer_Workspace, "REVSESSIONTABLE", True)
REVTABLELOC = getFullPath(Reviewer_Workspace, "REVTABLELOCATION")
REVTABLEPOINT = getFullPath(Reviewer_Workspace, "REVTABLEPOINT")
REVTABLELINE = getFullPath(Reviewer_Workspace, "REVTABLELINE")
REVTABLEPOLY = getFullPath(Reviewer_Workspace, "REVTABLEPOLY")
# --- Paths to tables in Output Reviewer workspace tables ---
Out_REVTABLEMAIN = getFullPath(Out_Reviewer_Workspace, "REVTABLEMAIN", True)
Out_SessionsTable = getFullPath(Out_Reviewer_Workspace, "REVSESSIONTABLE", True)
Out_REVTABLELOC = getFullPath(Out_Reviewer_Workspace, "REVTABLELOCATION")
Out_REVTABLEPOINT = getFullPath(Out_Reviewer_Workspace, "REVTABLEPOINT")
Out_REVTABLELINE = getFullPath(Out_Reviewer_Workspace, "REVTABLELINE")
Out_REVTABLEPOLY = getFullPath(Out_Reviewer_Workspace, "REVTABLEPOLY")
# List of selected session IDs
sessionIDs = []
# The main (REVTABLEMAIN) where clause
WhereClause = ""
# Output session ID
OutSessionID = 0
# Variables used for logging purposes
PointMatches = {}
PointMatches['tableName'] = 'REVTABLEPOINT'
LineMatches = {}
LineMatches['tableName'] = 'REVTABLELINE'
PolyMatches = {}
PolyMatches['tableName'] = 'REVTABLEPOLY'
MisMatches = {}
MisMatches['tableName'] = 'REVTABLELOCATION'
RowMatches = {}
RowMatches['tableName'] = 'REVTABLEMAIN'
BatchRunMatches = {}
BatchRunMatches['tableName'] = 'REVBATCHRUNTABLE'
CheckRunMatches = {}
CheckRunMatches['tableName'] = 'REVCHECKRUNTABLE'
log_dicts = [RowMatches, PointMatches, LineMatches, PolyMatches, MisMatches, BatchRunMatches, CheckRunMatches]
ErrorCount = 0
# Get editor for editing
edit = arcpy.da.Editor(Out_Reviewer_Workspace)
try:
# Start an edit session
desc = arcpy.Describe(Out_REVTABLEMAIN)
if desc.canVersion == 1 and desc.isVersioned == 1:
edit.startEditing(False, True)
edit.startOperation()
else:
edit.startEditing(False, False)
edit.startOperation()
# ----------------------------------------
# Build Where Clause for selecting records
# ----------------------------------------
# Get the IDs for the input session(s)
rowcount = int(arcpy.GetCount_management(SessionsTable).getOutput(0))
inSession_dict = {}
with arcpy.da.SearchCursor(SessionsTable, ["SESSIONID", "SESSIONNAME"]) as rows:
for row in rows:
# I am interested in value in column SessionName
if row[1] in SessionsList:
sessionIDs.append(row[0])
inSession_dict[row[0]] = row[1]
sessioncount = len(sessionIDs)
# If you did not select all the sessions, make a whereclause to select
# only features from the desired sessions
WhereClause = ''
if sessioncount != rowcount:
WhereClause = MakeInClause(Out_SessionsTable, "SESSIONID", sessionIDs)
SessionClause = WhereClause
# Append any information from the entered expression to the where clause
if RecordClause:
if WhereClause != '':
WhereClause = WhereClause + " AND " + RecordClause
else:
WhereClause = RecordClause
wherecount = len(WhereClause)
# Limit the length of the where clause to 1000 characters.
# Certain dbms types limit the length of where clause predicates.
# Predicates that use IN or OR operators may be limited to 1000 candidates.
if wherecount > 1000:
arcpy.AddError("The where clause is too long. There are either too many sessions selected or the Expression parameter (RecordClause) is too long.")
sys.exit(0)
else:
# Get output session id
outSession_dict = {}
with arcpy.da.SearchCursor(Out_SessionsTable, ["SESSIONID", "SESSIONNAME"]) as rows:
for row in rows:
# I am interested in value in column SessionName
if row[1] == Out_Exist_Session:
OutSessionID = row[0]
outSession_dict[row[0]] = row[1]
arcpy.AddMessage("Output Reviewer Session id is {0}".format(OutSessionID))
Match = CompareSR(REVTABLEPOINT, Out_REVTABLEPOINT)
# -------------------------
# Copy RevTableMain records
# -------------------------
arcpy.AddMessage("Copying RevTableMain Records")
in_revtable_fields = [x.name for x in arcpy.ListFields(REVTABLEMAIN)]
out_revtable_fields = [x.name for x in arcpy.ListFields(Out_REVTABLEMAIN)]
UNIQUE_REVTABLEMAIN_FIELDS = (set(in_revtable_fields) & set(out_revtable_fields))
READ_REVTABLEMAIN_FIELDS = sorted(list(UNIQUE_REVTABLEMAIN_FIELDS))
WRITE_REVTABLEMAIN_FIELDS = sorted(list(UNIQUE_REVTABLEMAIN_FIELDS))
REVTABLEMAIN_SESSIONID_INDEX = READ_REVTABLEMAIN_FIELDS.index("SESSIONID")
REVTABLEMAIN_CHECKRUNID_INDEX = READ_REVTABLEMAIN_FIELDS.index("CHECKRUNID")
REVTABLEMAIN_GEOMETRYTYPE_INDEX = READ_REVTABLEMAIN_FIELDS.index("GEOMETRYTYPE")
in_id_field = 'RECORDID'
if in_version != 'Pre10.6':
in_id_field = 'ID'
REVTABLEMAIN_ID_INDEX = READ_REVTABLEMAIN_FIELDS.index(in_id_field)
out_id_field = 'RECORDID'
if out_version != 'Pre10.6':
RECORD_GUID_FIELD = 'ID'
if 'ID' not in WRITE_REVTABLEMAIN_FIELDS:
idx = WRITE_REVTABLEMAIN_FIELDS.index("RECORDID")
WRITE_REVTABLEMAIN_FIELDS.remove("RECORDID")
WRITE_REVTABLEMAIN_FIELDS.insert(idx, u'ID')
out_id_field = "ID"
REVTABLEMAIN_ID_INDEX = READ_REVTABLEMAIN_FIELDS.index(in_id_field)
CheckRunMap = {}
RowMatches["InIDField"] = in_id_field
inID_index = READ_REVTABLEMAIN_FIELDS.index(in_id_field)
RowMatches["OutIDField"] = out_id_field
outID_index = WRITE_REVTABLEMAIN_FIELDS.index(out_id_field)
insert = arcpy.da.InsertCursor(Out_REVTABLEMAIN, WRITE_REVTABLEMAIN_FIELDS)
try:
with arcpy.da.SearchCursor(REVTABLEMAIN, READ_REVTABLEMAIN_FIELDS, where_clause=WhereClause) as scursor:
for row in scursor:
ErrorCount += 1
# Data Access SearchCursor's return a tuple which are immutable. We need to create a mutable type so
# we can update the SESSIONID value before inserting the record into the output table.
rowValues = list(row)
sessionID = rowValues[REVTABLEMAIN_SESSIONID_INDEX]
checkRunID = rowValues[REVTABLEMAIN_CHECKRUNID_INDEX]
inRecordID = rowValues[REVTABLEMAIN_ID_INDEX]
# Get CHECKRUNID value
checkRunID = rowValues[REVTABLEMAIN_CHECKRUNID_INDEX]
if checkRunID :
# Create new check run IDs
if checkRunID in CheckRunMap:
check_guid = CheckRunMap[checkRunID]
else:
check_guid = '{' + str(uuid.uuid4()).upper() + '}'
CheckRunMap[checkRunID] = check_guid
rowValues[REVTABLEMAIN_CHECKRUNID_INDEX] = check_guid
# Update the record id map
geomType = rowValues[REVTABLEMAIN_GEOMETRYTYPE_INDEX]
rowValues[REVTABLEMAIN_SESSIONID_INDEX] = OutSessionID
if db_compatability != 'Old':
record_guid = '{' + str(uuid.uuid4()).upper() + '}'
rowValues[REVTABLEMAIN_ID_INDEX] = record_guid
outRecordID = insert.insertRow(rowValues)
if db_compatability == 'Old':
outID = outRecordID
else:
outID = record_guid
RowMatches[inRecordID] = outID
finally:
del insert
# ---------------------------
# Copy REVTABLEPOINT features
# ---------------------------
arcpy.AddMessage("Copying Point Geometries")
CopyGeometryFeatures(REVTABLEPOINT, Out_REVTABLEPOINT, SessionClause, RowMatches, OutSessionID, PointMatches)
# --------------------------
# Copy REVTABLELINE features
# --------------------------
arcpy.AddMessage("Copying Line Geometries")
CopyGeometryFeatures(REVTABLELINE, Out_REVTABLELINE, SessionClause, RowMatches, OutSessionID, LineMatches)
# --------------------------
# Copy REVTABLEPOLY features
# --------------------------
arcpy.AddMessage("Copying Polygon Geometries")
CopyGeometryFeatures(REVTABLEPOLY, Out_REVTABLEPOLY, SessionClause, RowMatches, OutSessionID, PolyMatches)
# ------------------------
# Copy REVTABLELOC records
# ------------------------
arcpy.AddMessage("Copying Location Records")
CopyGeometryFeatures(REVTABLELOC, Out_REVTABLELOC, SessionClause, RowMatches, OutSessionID, MisMatches)
# ------------------------
# Copy Batch Job info records
# ------------------------
CopyRunTables(Reviewer_Workspace, Out_Reviewer_Workspace, SessionClause, OutSessionID, CheckRunMap, BatchRunMatches, CheckRunMatches)
# Save edits
if edit.isEditing:
edit.stopEditing(True)
# If successfully make it to the end of the script and delete is set to
# true - delete the records
if Delete == "true":
for dictionary in log_dicts:
DeleteRows(Reviewer_Workspace, dictionary)
# --------------
# Create logfile
# --------------
if createLog == "true":
# Determine output folder
(filepath, filename) = os.path.split(Out_Reviewer_Workspace)
# Does user have write-access to the output folder?
if not os.access(filepath, os.W_OK):
# Determine where this user has access to write
scratch = arcpy.env.scratchWorkspace
try:
if os.access(scratch, os.W_OK):
(filepath, fileName) = os.path.split(scratch)
else:
createLog = 'false'
except Exception as e:
arcpy.AddWarning("Cannot write logfile. An error occurred while trying to access the geoprocessing scratch workspace: " + e.message)
createLog = "false"
# if we will be able to write output log
if createLog == "true":
now = datetime.datetime.now()
time_str = now.strftime("%Y%m%dT%H%M%S")
logfile = filepath + "\\CopyDataReviewerRecordsLog_" + time_str \
+ ".txt"
log = open(logfile, "w")
# Write Header information
log.write("Source Workspace: " + Reviewer_Workspace + "\n")
log.write("Input Session(s): \n")
for sessionId, sessionName in inSession_dict.items():
log.write(" {}: {}\n".format(sessionId, sessionName))
log.write("\nTarget Workspace: " + Out_Reviewer_Workspace + "\n")
log.write("Output Session: \n")
for sessionId, sessionName in outSession_dict.items():
log.write(" {}: {}\n".format(sessionId, sessionName))
del sessionId
del sessionName
else:
log = ''
# if there is at least one error
#loop through logging dictionaries and get counts
log_dicts = [PointMatches, LineMatches, PolyMatches, MisMatches, RowMatches]
summarydict = {}
for matches in log_dicts:
summarydict = SummarizeDictionaries(log, matches, summarydict)
arcpy.AddMessage("\n")
for dict_name, cnt in summarydict.items():
msg = "Total Records from {}: {}".format(dict_name, cnt)
arcpy.AddMessage(msg)
if createLog == "true":
log.write(msg + "\n")
if createLog == "true":
log.close()
arcpy.AddMessage("\n")
arcpy.AddMessage("Logfile created at: " + logfile)
except Exception as e:
if edit.isEditing:
arcpy.AddMessage("Rolling back edits made to " + Out_Reviewer_Workspace)
edit.stopEditing(False)
arcpy.AddError('{}'.format(e))
tb = sys.exc_info()[2]
arcpy.AddError("Failed at Line %i" % tb.tb_lineno)
if __name__ == '__main__':
main()
| [
"arcpy.ListTables",
"arcpy.da.SearchCursor",
"arcpy.da.Walk",
"arcpy.ListFields",
"sys.exc_info",
"arcpy.ListDatasets",
"sys.exit",
"arcpy.da.Editor",
"arcpy.da.UpdateCursor",
"arcpy.AddMessage",
"arcpy.AddError",
"arcpy.ListFeatureClasses",
"os.path.split",
"arcpy.GetCount_management",
"arcpy.GetInstallInfo",
"arcpy.Describe",
"os.access",
"uuid.uuid4",
"arcpy.Exists",
"arcpy.AddWarning",
"arcpy.GetParameterAsText",
"arcpy.AddFieldDelimiters",
"arcpy.da.InsertCursor",
"os.path.join",
"datetime.datetime.now"
] | [((5490, 5516), 'arcpy.Describe', 'arcpy.Describe', (['InFeatures'], {}), '(InFeatures)\n', (5504, 5516), False, 'import arcpy\n'), ((5639, 5666), 'arcpy.Describe', 'arcpy.Describe', (['OutFeatures'], {}), '(OutFeatures)\n', (5653, 5666), False, 'import arcpy\n'), ((8394, 8440), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['outFeatures', 'out_fields'], {}), '(outFeatures, out_fields)\n', (8415, 8440), False, 'import arcpy\n'), ((11374, 11402), 'arcpy.da.Editor', 'arcpy.da.Editor', (['inWorkspace'], {}), '(inWorkspace)\n', (11389, 11402), False, 'import arcpy\n'), ((22884, 22911), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(0)'], {}), '(0)\n', (22908, 22911), False, 'import arcpy\n'), ((22927, 22954), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(1)'], {}), '(1)\n', (22951, 22954), False, 'import arcpy\n'), ((22974, 23001), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(3)'], {}), '(3)\n', (22998, 23001), False, 'import arcpy\n'), ((23031, 23058), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(4)'], {}), '(4)\n', (23055, 23058), False, 'import arcpy\n'), ((23083, 23110), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(5)'], {}), '(5)\n', (23107, 23110), False, 'import arcpy\n'), ((23124, 23151), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(6)'], {}), '(6)\n', (23148, 23151), False, 'import arcpy\n'), ((23168, 23195), 'arcpy.GetParameterAsText', 'arcpy.GetParameterAsText', (['(7)'], {}), '(7)\n', (23192, 23195), False, 'import arcpy\n'), ((2422, 2440), 'arcpy.ListTables', 'arcpy.ListTables', ([], {}), '()\n', (2438, 2440), False, 'import arcpy\n'), ((3320, 3347), 'arcpy.da.Walk', 'arcpy.da.Walk', (['in_workspace'], {}), '(in_workspace)\n', (3333, 3347), False, 'import arcpy\n'), ((3883, 3894), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3891, 3894), False, 'import sys\n'), ((5833, 5973), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Spatial reference of input and output Reveiwer workspaces do not match. Reviewer geometries will be projected"""'], {}), "(\n 'Spatial reference of input and output Reveiwer workspaces do not match. Reviewer geometries will be projected'\n )\n", (5849, 5973), False, 'import arcpy\n'), ((28182, 28221), 'arcpy.da.Editor', 'arcpy.da.Editor', (['Out_Reviewer_Workspace'], {}), '(Out_Reviewer_Workspace)\n', (28197, 28221), False, 'import arcpy\n'), ((2947, 2991), 'arcpy.ListDatasets', 'arcpy.ListDatasets', (['"""*REVDATASET"""', '"""Feature"""'], {}), "('*REVDATASET', 'Feature')\n", (2965, 2991), False, 'import arcpy\n'), ((4601, 4653), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Reviewer Version is inconsistent"""'], {}), "('Reviewer Version is inconsistent')\n", (4617, 4653), False, 'import arcpy\n'), ((7525, 7553), 'arcpy.ListFields', 'arcpy.ListFields', (['inFeatures'], {}), '(inFeatures)\n', (7541, 7553), False, 'import arcpy\n'), ((7928, 7957), 'arcpy.ListFields', 'arcpy.ListFields', (['outFeatures'], {}), '(outFeatures)\n', (7944, 7957), False, 'import arcpy\n'), ((8464, 8528), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['inFeatures', 'in_fields', 'sessionWhereClause'], {}), '(inFeatures, in_fields, sessionWhereClause)\n', (8485, 8528), False, 'import arcpy\n'), ((9652, 9674), 'arcpy.ListFields', 'arcpy.ListFields', (['inFC'], {}), '(inFC)\n', (9668, 9674), False, 'import arcpy\n'), ((11594, 11616), 'arcpy.GetInstallInfo', 'arcpy.GetInstallInfo', ([], {}), '()\n', (11614, 11616), False, 'import arcpy\n'), ((13373, 13423), 'arcpy.AddError', 'arcpy.AddError', (["('Failed at Line %i' % tb.tb_lineno)"], {}), "('Failed at Line %i' % tb.tb_lineno)\n", (13387, 13423), False, 'import arcpy\n'), ((22237, 22390), 'arcpy.AddWarning', 'arcpy.AddWarning', (['"""Unable to identify REVCHECKRUNTABLE or REVBATCHRUNTABLE in Reviewer Workspace No records from these tables will be copied."""'], {}), "(\n 'Unable to identify REVCHECKRUNTABLE or REVBATCHRUNTABLE in Reviewer Workspace No records from these tables will be copied.'\n )\n", (22253, 22390), False, 'import arcpy\n'), ((22502, 22552), 'arcpy.AddError', 'arcpy.AddError', (["('Failed at Line %i' % tb.tb_lineno)"], {}), "('Failed at Line %i' % tb.tb_lineno)\n", (22516, 22552), False, 'import arcpy\n'), ((28291, 28323), 'arcpy.Describe', 'arcpy.Describe', (['Out_REVTABLEMAIN'], {}), '(Out_REVTABLEMAIN)\n', (28305, 28323), False, 'import arcpy\n'), ((39736, 39758), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""\n"""'], {}), "('\\n')\n", (39752, 39758), False, 'import arcpy\n'), ((2129, 2157), 'arcpy.Describe', 'arcpy.Describe', (['in_workspace'], {}), '(in_workspace)\n', (2143, 2157), False, 'import arcpy\n'), ((2723, 2756), 'os.path.join', 'os.path.join', (['in_workspace', 'table'], {}), '(in_workspace, table)\n', (2735, 2756), False, 'import os\n'), ((3041, 3077), 'arcpy.ListFeatureClasses', 'arcpy.ListFeatureClasses', (['""""""', '""""""', 'fd'], {}), "('', '', fd)\n", (3065, 3077), False, 'import arcpy\n'), ((3688, 3711), 'arcpy.Exists', 'arcpy.Exists', (['full_path'], {}), '(full_path)\n', (3700, 3711), False, 'import arcpy\n'), ((4454, 4505), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['VERSIONTABLE', "['SCHEMAHASH']"], {}), "(VERSIONTABLE, ['SCHEMAHASH'])\n", (4475, 4505), False, 'import arcpy\n'), ((4927, 4955), 'arcpy.ListFields', 'arcpy.ListFields', (['main_table'], {}), '(main_table)\n', (4943, 4955), False, 'import arcpy\n'), ((13347, 13361), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13359, 13361), False, 'import sys\n'), ((14481, 14546), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['REVCHECKRUN', "['CHECKRUNID']", 'SessionClause'], {}), "(REVCHECKRUN, ['CHECKRUNID'], SessionClause)\n", (14502, 14546), False, 'import arcpy\n'), ((15036, 15115), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['REVCHECKRUN', "['CHECKRUNID', 'BATCHRUNID']", 'SessionClause'], {}), "(REVCHECKRUN, ['CHECKRUNID', 'BATCHRUNID'], SessionClause)\n", (15057, 15115), False, 'import arcpy\n'), ((16944, 17006), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['Out_REVBATCHRUN', 'OUT_REVBATCHRUN_FIELDS'], {}), '(Out_REVBATCHRUN, OUT_REVBATCHRUN_FIELDS)\n', (16965, 17006), False, 'import arcpy\n'), ((20337, 20395), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['Out_REVCHECKRUN', 'REVCHECKRUN_FIELDS'], {}), '(Out_REVCHECKRUN, REVCHECKRUN_FIELDS)\n', (20358, 20395), False, 'import arcpy\n'), ((22476, 22490), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (22488, 22490), False, 'import sys\n'), ((28925, 28991), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['SessionsTable', "['SESSIONID', 'SESSIONNAME']"], {}), "(SessionsTable, ['SESSIONID', 'SESSIONNAME'])\n", (28946, 28991), False, 'import arcpy\n'), ((30261, 30418), 'arcpy.AddError', 'arcpy.AddError', (['"""The where clause is too long. There are either too many sessions selected or the Expression parameter (RecordClause) is too long."""'], {}), "(\n 'The where clause is too long. There are either too many sessions selected or the Expression parameter (RecordClause) is too long.'\n )\n", (30275, 30418), False, 'import arcpy\n'), ((30425, 30436), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (30433, 30436), False, 'import sys\n'), ((31220, 31268), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Copying RevTableMain Records"""'], {}), "('Copying RevTableMain Records')\n", (31236, 31268), False, 'import arcpy\n'), ((33062, 33128), 'arcpy.da.InsertCursor', 'arcpy.da.InsertCursor', (['Out_REVTABLEMAIN', 'WRITE_REVTABLEMAIN_FIELDS'], {}), '(Out_REVTABLEMAIN, WRITE_REVTABLEMAIN_FIELDS)\n', (33083, 33128), False, 'import arcpy\n'), ((35562, 35606), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Copying Point Geometries"""'], {}), "('Copying Point Geometries')\n", (35578, 35606), False, 'import arcpy\n'), ((35885, 35928), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Copying Line Geometries"""'], {}), "('Copying Line Geometries')\n", (35901, 35928), False, 'import arcpy\n'), ((36204, 36250), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Copying Polygon Geometries"""'], {}), "('Copying Polygon Geometries')\n", (36220, 36250), False, 'import arcpy\n'), ((36520, 36564), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""Copying Location Records"""'], {}), "('Copying Location Records')\n", (36536, 36564), False, 'import arcpy\n'), ((37568, 37605), 'os.path.split', 'os.path.split', (['Out_Reviewer_Workspace'], {}), '(Out_Reviewer_Workspace)\n', (37581, 37605), False, 'import os\n'), ((38443, 38466), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (38464, 38466), False, 'import datetime\n'), ((39904, 39925), 'arcpy.AddMessage', 'arcpy.AddMessage', (['msg'], {}), '(msg)\n', (39920, 39925), False, 'import arcpy\n'), ((40089, 40111), 'arcpy.AddMessage', 'arcpy.AddMessage', (['"""\n"""'], {}), "('\\n')\n", (40105, 40111), False, 'import arcpy\n'), ((40128, 40178), 'arcpy.AddMessage', 'arcpy.AddMessage', (["('Logfile created at: ' + logfile)"], {}), "('Logfile created at: ' + logfile)\n", (40144, 40178), False, 'import arcpy\n'), ((40465, 40515), 'arcpy.AddError', 'arcpy.AddError', (["('Failed at Line %i' % tb.tb_lineno)"], {}), "('Failed at Line %i' % tb.tb_lineno)\n", (40479, 40515), False, 'import arcpy\n'), ((3530, 3557), 'os.path.join', 'os.path.join', (['dirpath', 'name'], {}), '(dirpath, name)\n', (3542, 3557), False, 'import os\n'), ((12456, 12482), 'arcpy.Describe', 'arcpy.Describe', (['table_path'], {}), '(table_path)\n', (12470, 12482), False, 'import arcpy\n'), ((30553, 30623), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['Out_SessionsTable', "['SESSIONID', 'SESSIONNAME']"], {}), "(Out_SessionsTable, ['SESSIONID', 'SESSIONNAME'])\n", (30574, 30623), False, 'import arcpy\n'), ((37698, 37726), 'os.access', 'os.access', (['filepath', 'os.W_OK'], {}), '(filepath, os.W_OK)\n', (37707, 37726), False, 'import os\n'), ((40261, 40333), 'arcpy.AddMessage', 'arcpy.AddMessage', (["('Rolling back edits made to ' + Out_Reviewer_Workspace)"], {}), "('Rolling back edits made to ' + Out_Reviewer_Workspace)\n", (40277, 40333), False, 'import arcpy\n'), ((40435, 40449), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (40447, 40449), False, 'import sys\n'), ((3194, 3228), 'os.path.join', 'os.path.join', (['in_workspace', 'fd', 'fc'], {}), '(in_workspace, fd, fc)\n', (3206, 3228), False, 'import os\n'), ((10666, 10710), 'arcpy.AddFieldDelimiters', 'arcpy.AddFieldDelimiters', (['inFC', 'intFieldName'], {}), '(inFC, intFieldName)\n', (10690, 10710), False, 'import arcpy\n'), ((12907, 12947), 'arcpy.da.UpdateCursor', 'arcpy.da.UpdateCursor', (['table_path', 'field'], {}), '(table_path, field)\n', (12928, 12947), False, 'import arcpy\n'), ((15589, 15618), 'arcpy.ListFields', 'arcpy.ListFields', (['REVBATCHRUN'], {}), '(REVBATCHRUN)\n', (15605, 15618), False, 'import arcpy\n'), ((15679, 15712), 'arcpy.ListFields', 'arcpy.ListFields', (['Out_REVBATCHRUN'], {}), '(Out_REVBATCHRUN)\n', (15695, 15712), False, 'import arcpy\n'), ((17102, 17169), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['REVBATCHRUN', 'REVBATCHRUN_FIELDS', 'whereClause'], {}), '(REVBATCHRUN, REVBATCHRUN_FIELDS, whereClause)\n', (17123, 17169), False, 'import arcpy\n'), ((18977, 19056), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['Out_REVBATCHRUN', "['RECORDID', out_id_field]", 'whereClause'], {}), "(Out_REVBATCHRUN, ['RECORDID', out_id_field], whereClause)\n", (18998, 19056), False, 'import arcpy\n'), ((19845, 19874), 'arcpy.ListFields', 'arcpy.ListFields', (['REVCHECKRUN'], {}), '(REVCHECKRUN)\n', (19861, 19874), False, 'import arcpy\n'), ((20561, 20630), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['REVCHECKRUN', 'REVCHECKRUN_FIELDS', 'SessionClause'], {}), '(REVCHECKRUN, REVCHECKRUN_FIELDS, SessionClause)\n', (20582, 20630), False, 'import arcpy\n'), ((28821, 28861), 'arcpy.GetCount_management', 'arcpy.GetCount_management', (['SessionsTable'], {}), '(SessionsTable)\n', (28846, 28861), False, 'import arcpy\n'), ((31324, 31354), 'arcpy.ListFields', 'arcpy.ListFields', (['REVTABLEMAIN'], {}), '(REVTABLEMAIN)\n', (31340, 31354), False, 'import arcpy\n'), ((31411, 31445), 'arcpy.ListFields', 'arcpy.ListFields', (['Out_REVTABLEMAIN'], {}), '(Out_REVTABLEMAIN)\n', (31427, 31445), False, 'import arcpy\n'), ((33176, 33268), 'arcpy.da.SearchCursor', 'arcpy.da.SearchCursor', (['REVTABLEMAIN', 'READ_REVTABLEMAIN_FIELDS'], {'where_clause': 'WhereClause'}), '(REVTABLEMAIN, READ_REVTABLEMAIN_FIELDS, where_clause=\n WhereClause)\n', (33197, 33268), False, 'import arcpy\n'), ((37905, 37932), 'os.access', 'os.access', (['scratch', 'os.W_OK'], {}), '(scratch, os.W_OK)\n', (37914, 37932), False, 'import os\n'), ((37985, 38007), 'os.path.split', 'os.path.split', (['scratch'], {}), '(scratch)\n', (37998, 38007), False, 'import os\n'), ((38153, 38296), 'arcpy.AddWarning', 'arcpy.AddWarning', (["('Cannot write logfile. An error occurred while trying to access the geoprocessing scratch workspace: '\n + e.message)"], {}), "(\n 'Cannot write logfile. An error occurred while trying to access the geoprocessing scratch workspace: '\n + e.message)\n", (38169, 38296), False, 'import arcpy\n'), ((14787, 14799), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (14797, 14799), False, 'import uuid\n'), ((17734, 17746), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (17744, 17746), False, 'import uuid\n'), ((34916, 34928), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34926, 34928), False, 'import uuid\n'), ((34390, 34402), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (34400, 34402), False, 'import uuid\n')] |
from typing import List
import heapq
class Solution:
# minheap1 O(klogn)
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
heap = [(p[0] ** 2 + p[1] ** 2, *p) for p in points]
heapq.heapify(heap)
return [heapq.heappop(heap)[1:] for _ in range(k)]
# minheap2 O(nlogk)
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
return heapq.nsmallest(k, points, lambda p: p[0] ** 2 + p[1] ** 2)
# sorted O(nlogn)
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
return sorted(points, key=lambda p: p[0] ** 2 + p[1] ** 2)[:k]
| [
"heapq.heappop",
"heapq.heapify",
"heapq.nsmallest"
] | [((225, 244), 'heapq.heapify', 'heapq.heapify', (['heap'], {}), '(heap)\n', (238, 244), False, 'import heapq\n'), ((421, 480), 'heapq.nsmallest', 'heapq.nsmallest', (['k', 'points', '(lambda p: p[0] ** 2 + p[1] ** 2)'], {}), '(k, points, lambda p: p[0] ** 2 + p[1] ** 2)\n', (436, 480), False, 'import heapq\n'), ((261, 280), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (274, 280), False, 'import heapq\n')] |
"""
Time-dependent boundary conditions
==================================
This example solves a simple diffusion equation in one dimensions with time-dependent
boundary conditions.
"""
from pde import PDE, CartesianGrid, MemoryStorage, ScalarField, plot_kymograph
grid = CartesianGrid([[0, 10]], [64]) # generate grid
state = ScalarField(grid) # generate initial condition
eq = PDE({"c": "laplace(c)"}, bc={"value_expression": "sin(t)"})
storage = MemoryStorage()
eq.solve(state, t_range=20, dt=1e-4, tracker=storage.tracker(0.1))
# plot the trajectory as a space-time plot
plot_kymograph(storage)
| [
"pde.MemoryStorage",
"pde.plot_kymograph",
"pde.CartesianGrid",
"pde.PDE",
"pde.ScalarField"
] | [((274, 304), 'pde.CartesianGrid', 'CartesianGrid', (['[[0, 10]]', '[64]'], {}), '([[0, 10]], [64])\n', (287, 304), False, 'from pde import PDE, CartesianGrid, MemoryStorage, ScalarField, plot_kymograph\n'), ((330, 347), 'pde.ScalarField', 'ScalarField', (['grid'], {}), '(grid)\n', (341, 347), False, 'from pde import PDE, CartesianGrid, MemoryStorage, ScalarField, plot_kymograph\n'), ((384, 443), 'pde.PDE', 'PDE', (["{'c': 'laplace(c)'}"], {'bc': "{'value_expression': 'sin(t)'}"}), "({'c': 'laplace(c)'}, bc={'value_expression': 'sin(t)'})\n", (387, 443), False, 'from pde import PDE, CartesianGrid, MemoryStorage, ScalarField, plot_kymograph\n'), ((455, 470), 'pde.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (468, 470), False, 'from pde import PDE, CartesianGrid, MemoryStorage, ScalarField, plot_kymograph\n'), ((582, 605), 'pde.plot_kymograph', 'plot_kymograph', (['storage'], {}), '(storage)\n', (596, 605), False, 'from pde import PDE, CartesianGrid, MemoryStorage, ScalarField, plot_kymograph\n')] |
from lib.utils.config import cfg
from lib.utils.base_utils import PoseTransformer, read_pose, read_pickle, save_pickle
import os
import numpy as np
from transforms3d.quaternions import mat2quat
import glob
from PIL import Image
from scipy import stats
import OpenEXR
import Imath
from multiprocessing.dummy import Pool
import struct
import scipy.io as sio
class DataStatistics(object):
# world_to_camera_pose = np.array([[-1.19209304e-07, 1.00000000e+00, -2.98023188e-08, 1.19209304e-07],
# [-8.94069672e-08, 2.22044605e-16, -1.00000000e+00, 8.94069672e-08],
# [-1.00000000e+00, -8.94069672e-08, 1.19209304e-07, 1.00000000e+00]])
world_to_camera_pose = np.array([[-1.00000024e+00, -8.74227979e-08, -5.02429621e-15, 8.74227979e-08],
[5.02429621e-15, 1.34358856e-07, -1.00000012e+00, -1.34358856e-07],
[8.74227979e-08, -1.00000012e+00, 1.34358856e-07, 1.00000012e+00]])
def __init__(self, class_type):
self.class_type = class_type
self.mask_path = os.path.join(cfg.LINEMOD,'{}/mask/*.png'.format(class_type))
self.dir_path = os.path.join(cfg.LINEMOD_ORIG,'{}/data'.format(class_type))
dataset_pose_dir_path = os.path.join(cfg.DATA_DIR, 'dataset_poses')
os.system('mkdir -p {}'.format(dataset_pose_dir_path))
self.dataset_poses_path = os.path.join(dataset_pose_dir_path, '{}_poses.npy'.format(class_type))
blender_pose_dir_path = os.path.join(cfg.DATA_DIR, 'blender_poses')
os.system('mkdir -p {}'.format(blender_pose_dir_path))
self.blender_poses_path = os.path.join(blender_pose_dir_path, '{}_poses.npy'.format(class_type))
os.system('mkdir -p {}'.format(blender_pose_dir_path))
self.pose_transformer = PoseTransformer(class_type)
def get_proper_crop_size(self):
mask_paths = glob.glob(self.mask_path)
widths = []
heights = []
for mask_path in mask_paths:
mask = Image.open(mask_path).convert('1')
mask = np.array(mask).astype(np.int32)
row_col = np.argwhere(mask == 1)
min_row, max_row = np.min(row_col[:, 0]), np.max(row_col[:, 0])
min_col, max_col = np.min(row_col[:, 1]), np.max(row_col[:, 1])
width = max_col - min_col
height = max_row - min_row
widths.append(width)
heights.append(height)
widths = np.array(widths)
heights = np.array(heights)
print('min width: {}, max width: {}'.format(np.min(widths), np.max(widths)))
print('min height: {}, max height: {}'.format(np.min(heights), np.max(heights)))
def get_quat_translation(self, object_to_camera_pose):
object_to_camera_pose = np.append(object_to_camera_pose, [[0, 0, 0, 1]], axis=0)
world_to_camera_pose = np.append(self.world_to_camera_pose, [[0, 0, 0, 1]], axis=0)
object_to_world_pose = np.dot(np.linalg.inv(world_to_camera_pose), object_to_camera_pose)
quat = mat2quat(object_to_world_pose[:3, :3])
translation = object_to_world_pose[:3, 3]
return quat, translation
def get_dataset_poses(self):
if os.path.exists(self.dataset_poses_path):
poses = np.load(self.dataset_poses_path)
return poses[:, :3], poses[:, 3:]
eulers = []
translations = []
train_set = np.loadtxt(os.path.join(cfg.LINEMOD, '{}/training_range.txt'.format(self.class_type)),np.int32)
for idx in train_set:
rot_path = os.path.join(self.dir_path, 'rot{}.rot'.format(idx))
tra_path = os.path.join(self.dir_path, 'tra{}.tra'.format(idx))
pose = read_pose(rot_path, tra_path)
euler = self.pose_transformer.orig_pose_to_blender_euler(pose)
eulers.append(euler)
translations.append(pose[:, 3])
eulers = np.array(eulers)
translations = np.array(translations)
np.save(self.dataset_poses_path, np.concatenate([eulers, translations], axis=-1))
return eulers, translations
def sample_sphere(self, num_samples):
""" sample angles from the sphere
reference: https://zhuanlan.zhihu.com/p/25988652?group_id=828963677192491008
"""
flat_objects = ['037_scissors', '051_large_clamp', '052_extra_large_clamp']
if self.class_type in flat_objects:
begin_elevation = 30
else:
begin_elevation = 0
ratio = (begin_elevation + 90) / 180
num_points = int(num_samples // (1 - ratio))
phi = (np.sqrt(5) - 1.0) / 2.
azimuths = []
elevations = []
for n in range(num_points - num_samples, num_points):
z = 2. * n / num_points - 1.
azimuths.append(np.rad2deg(2 * np.pi * n * phi % (2 * np.pi)))
elevations.append(np.rad2deg(np.arcsin(z)))
return np.array(azimuths), np.array(elevations)
def sample_poses(self):
eulers, translations = self.get_dataset_poses()
num_samples = cfg.NUM_SYN
azimuths, elevations = self.sample_sphere(num_samples)
euler_sampler = stats.gaussian_kde(eulers.T)
eulers = euler_sampler.resample(num_samples).T
eulers[:, 0] = azimuths
eulers[:, 1] = elevations
translation_sampler = stats.gaussian_kde(translations.T)
translations = translation_sampler.resample(num_samples).T
np.save(self.blender_poses_path, np.concatenate([eulers, translations], axis=-1))
class YCBDataStatistics(DataStatistics):
def __init__(self, class_type):
super(YCBDataStatistics, self).__init__(class_type)
self.dir_path = os.path.join(cfg.LINEMOD_ORIG, '{}/data'.format(class_type))
self.class_types = np.loadtxt(os.path.join(cfg.YCB, 'image_sets/classes.txt'), dtype=np.str)
self.class_types = np.insert(self.class_types, 0, 'background')
self.train_set = np.loadtxt(os.path.join(cfg.YCB, 'image_sets/train.txt'), dtype=np.str)
self.meta_pattern = os.path.join(cfg.YCB, 'data/{}-meta.mat')
self.dataset_poses_pattern = os.path.join(cfg.DATA_DIR, 'dataset_poses/{}_poses.npy')
def get_dataset_poses(self):
if os.path.exists(self.dataset_poses_path):
poses = np.load(self.dataset_poses_pattern.format(self.class_type))
return poses[:, :3], poses[:, 3:]
dataset_poses = {}
for i in self.train_set:
meta_path = self.meta_pattern.format(i)
meta = sio.loadmat(meta_path)
classes = meta['cls_indexes'].ravel()
poses = meta['poses']
for idx, cls_idx in enumerate(classes):
cls_poses = dataset_poses.setdefault(self.class_types[cls_idx], [[], []])
pose = poses[..., idx]
euler = self.pose_transformer.blender_pose_to_blender_euler(pose)
cls_poses[0].append(euler)
cls_poses[1].append(pose[:, 3])
for class_type, cls_poses in dataset_poses.items():
np.save(self.dataset_poses_pattern.format(class_type), np.concatenate(cls_poses, axis=-1))
cls_poses = dataset_poses[self.class_type]
eulers = np.array(cls_poses[0])
translations = np.array(cls_poses[1])
return eulers, translations
class Renderer(object):
intrinsic_matrix = {
'linemod': np.array([[572.4114, 0., 325.2611],
[0., 573.57043, 242.04899],
[0., 0., 1.]]),
# 'blender': np.array([[280.0, 0.0, 128.0],
# [0.0, 280.0, 128.0],
# [0.0, 0.0, 1.0]]),
'blender': np.array([[700., 0., 320.],
[0., 700., 240.],
[0., 0., 1.]])
}
def __init__(self, class_type):
self.class_type = class_type
self.bg_imgs_path = os.path.join(cfg.DATA_DIR, 'bg_imgs.npy')
self.poses_path = os.path.join(cfg.DATA_DIR, 'blender_poses', '{}_poses.npy').format(class_type)
self.output_dir_path = os.path.join(cfg.LINEMOD,'renders/{}_new').format(class_type)
self.blender_path = '/home/pengsida/Software/blender-2.79a-linux-glibc219-x86_64/blender'
self.blank_blend = os.path.join(cfg.DATA_DIR, 'blank.blend')
self.py_path = os.path.join(cfg.UTILS_DIR, 'render_backend.py')
self.obj_path = os.path.join(cfg.LINEMOD,'{}/{}.ply').format(class_type, class_type)
self.plane_height_path = os.path.join(cfg.DATA_DIR, 'plane_height.pkl')
def get_bg_imgs(self):
if os.path.exists(self.bg_imgs_path):
return
img_paths = glob.glob(os.path.join('/home/liuyuan/data/SUN2012pascalformat/JPEGImages', '*'))
bg_imgs = []
for img_path in img_paths:
img = Image.open(img_path)
row, col = img.size
if row > 500 and col > 500:
bg_imgs.append(img_path)
np.save(self.bg_imgs_path, bg_imgs)
def project_model(self, model_3d, pose, camera_type):
camera_model_2d = np.dot(model_3d, pose[:, :3].T) + pose[:, 3]
camera_model_2d = np.dot(camera_model_2d, self.intrinsic_matrix[camera_type].T)
return camera_model_2d[:, :2] / camera_model_2d[:, 2:]
@staticmethod
def exr_to_png(exr_path):
depth_path = exr_path.replace('.png0001.exr', '.png')
exr_image = OpenEXR.InputFile(exr_path)
dw = exr_image.header()['dataWindow']
(width, height) = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
def read_exr(s, width, height):
mat = np.fromstring(s, dtype=np.float32)
mat = mat.reshape(height, width)
return mat
dmap, _, _ = [read_exr(s, width, height) for s in exr_image.channels('BGR', Imath.PixelType(Imath.PixelType.FLOAT))]
dmap = Image.fromarray((dmap != 1).astype(np.int32))
dmap.save(depth_path)
exr_image.close()
os.system('rm {}'.format(exr_path))
def sample_poses(self):
statistician = DataStatistics(self.class_type)
statistician.sample_poses()
def get_plane_height(self):
if os.path.exists(self.plane_height_path):
plane_height = read_pickle(self.plane_height_path)
else:
plane_height = {}
if self.class_type in plane_height:
return plane_height[self.class_type]
else:
pose_transformer = PoseTransformer(self.class_type)
model = pose_transformer.get_blender_model()
height = np.min(model[:, -1])
plane_height[self.class_type] = height
save_pickle(plane_height, self.plane_height_path)
return height
def run(self):
""" Render images
1. prepare background images
2. sample poses from the pose distribution of training data
3. call the blender to render images
"""
self.get_bg_imgs()
self.sample_poses()
if not os.path.exists(self.output_dir_path):
os.mkdir(self.output_dir_path)
os.system('{} {} --background --python {} -- --input {} --output_dir {} --bg_imgs {} --poses_path {}'.
format(self.blender_path, self.blank_blend, self.py_path, self.obj_path,
self.output_dir_path, self.bg_imgs_path, self.poses_path))
depth_paths = glob.glob(os.path.join(self.output_dir_path, '*.exr'))
for depth_path in depth_paths:
self.exr_to_png(depth_path)
@staticmethod
def multi_thread_render():
# objects = ['ape', 'benchvise', 'bowl', 'can', 'cat', 'cup', 'driller', 'duck',
# 'glue', 'holepuncher', 'iron', 'lamp', 'phone', 'cam', 'eggbox']
objects = ['lamp', 'phone', 'cam', 'benchvise', 'cam']
def render(class_type):
renderer = Renderer(class_type)
renderer.run()
with Pool(processes=2) as pool:
pool.map(render, objects)
class YCBRenderer(Renderer):
def __init__(self, class_type):
super(YCBRenderer, self).__init__(class_type)
self.output_dir_path = os.path.join(cfg.YCB, 'renders/{}').format(class_type)
self.blank_blend = os.path.join(cfg.DATA_DIR, 'blank.blend')
self.obj_path = os.path.join(cfg.YCB, 'models', class_type, 'textured.obj')
self.class_types = np.loadtxt(os.path.join(cfg.YCB, 'image_sets/classes.txt'), dtype=np.str)
self.class_types = np.insert(self.class_types, 0, 'background')
def sample_poses(self):
statistician = YCBDataStatistics(self.class_type)
statistician.sample_poses()
@staticmethod
def multi_thread_render():
objects = ['003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle']
def render(class_type):
renderer = YCBRenderer(class_type)
renderer.run()
with Pool(processes=2) as pool:
pool.map(render, objects)
class MultiRenderer(Renderer):
class_types = ['ape', 'benchvise', 'can', 'cat', 'driller', 'duck', 'glue',
'holepuncher', 'iron', 'lamp', 'phone', 'cam', 'eggbox']
def __init__(self):
super(MultiRenderer, self).__init__('')
self.poses_path = os.path.join(cfg.DATA_DIR, '{}_poses.npy')
self.output_dir_path = '/home/pengsida/Datasets/LINEMOD/renders/all_objects'
def sample_poses(self):
for class_type in self.class_types:
statistician = DataStatistics(class_type)
statistician.sample_poses()
def run(self):
""" Render images
1. prepare background images
2. sample poses from the pose distribution of training data
3. call the blender to render images
"""
self.get_bg_imgs()
self.sample_poses()
os.system('{} {} --background --python {} -- --input {} --output_dir {} --use_cycles True --bg_imgs {} --poses_path {}'.
format(self.blender_path, self.blank_blend, self.py_path, self.obj_path, self.output_dir_path, self.bg_imgs_path, self.poses_path))
depth_paths = glob.glob(os.path.join(self.output_dir_path, '*.exr'))
for depth_path in depth_paths:
self.exr_to_png(depth_path)
class OpenGLRenderer(object):
intrinsic_matrix = {
'linemod': np.array([[572.4114, 0., 325.2611],
[0., 573.57043, 242.04899],
[0., 0., 1.]]),
# 'blender': np.array([[280.0, 0.0, 128.0],
# [0.0, 280.0, 128.0],
# [0.0, 0.0, 1.0]]),
'blender': np.array([[700., 0., 320.],
[0., 700., 240.],
[0., 0., 1.]])
}
models = {}
def __init__(self, dataset='linemod'):
if dataset == 'linemod':
self.ply_pattern = os.path.join(cfg.LINEMOD, '{}/{}.ply')
elif dataset == 'ycb':
self.ply_pattern = os.path.join(cfg.YCB, 'models/{}/{}.ply')
else:
raise NameError('Wrong dataset')
def load_ply(self, class_type, color=None):
""" Loads a 3D mesh model from a PLY file.
:return: The loaded model given by a dictionary with items:
'pts' (nx3 ndarray), 'normals' (nx3 ndarray), 'colors' (nx3 ndarray),
'faces' (mx3 ndarray) - the latter three are optional.
"""
if class_type in self.models:
return self.models[class_type]
path = self.ply_pattern.format(class_type, class_type)
f = open(path, 'r')
n_pts = 0
n_faces = 0
face_n_corners = 3 # Only triangular faces are supported
pt_props = []
face_props = []
is_binary = False
header_vertex_section = False
header_face_section = False
# Read header
while True:
line = f.readline().rstrip('\n').rstrip('\r') # Strip the newline character(s)
if line.startswith('element vertex'):
n_pts = int(line.split()[-1])
header_vertex_section = True
header_face_section = False
elif line.startswith('element face'):
n_faces = int(line.split()[-1])
header_vertex_section = False
header_face_section = True
elif line.startswith('element'): # Some other element
header_vertex_section = False
header_face_section = False
elif line.startswith('property') and header_vertex_section:
# (name of the property, data type)
pt_props.append((line.split()[-1], line.split()[-2]))
elif line.startswith('property list') and header_face_section:
elems = line.split()
if elems[-1] == 'vertex_indices':
# (name of the property, data type)
face_props.append(('n_corners', elems[2]))
for i in range(face_n_corners):
face_props.append(('ind_' + str(i), elems[3]))
else:
print('Warning: Not supported face property: ' + elems[-1])
elif line.startswith('format'):
if 'binary' in line:
is_binary = True
elif line.startswith('end_header'):
break
# Prepare data structures
model = {}
model['pts'] = np.zeros((n_pts, 3), np.float)
if n_faces > 0:
model['faces'] = np.zeros((n_faces, face_n_corners), np.float)
pt_props_names = [p[0] for p in pt_props]
is_normal = False
if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):
is_normal = True
model['normals'] = np.zeros((n_pts, 3), np.float)
is_color = False
model['colors'] = np.zeros((n_pts, 3), np.float)
if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):
is_color = True
model['colors'] = np.zeros((n_pts, 3), np.float)
is_texture = False
if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):
is_texture = True
model['texture_uv'] = np.zeros((n_pts, 2), np.float)
formats = { # For binary format
'float': ('f', 4),
'double': ('d', 8),
'int': ('i', 4),
'uchar': ('B', 1)
}
# Load vertices
for pt_id in range(n_pts):
prop_vals = {}
load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz',
'red', 'green', 'blue', 'texture_u', 'texture_v']
if is_binary:
for prop in pt_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] in load_props:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for prop_id, prop in enumerate(pt_props):
if prop[0] in load_props:
prop_vals[prop[0]] = elems[prop_id]
model['pts'][pt_id, 0] = float(prop_vals['x'])
model['pts'][pt_id, 1] = float(prop_vals['y'])
model['pts'][pt_id, 2] = float(prop_vals['z'])
if is_normal:
model['normals'][pt_id, 0] = float(prop_vals['nx'])
model['normals'][pt_id, 1] = float(prop_vals['ny'])
model['normals'][pt_id, 2] = float(prop_vals['nz'])
if color is not None:
model['colors'][pt_id, 0] = color[0]
model['colors'][pt_id, 1] = color[1]
model['colors'][pt_id, 2] = color[2]
elif is_color:
model['colors'][pt_id, 0] = float(prop_vals['red'])
model['colors'][pt_id, 1] = float(prop_vals['green'])
model['colors'][pt_id, 2] = float(prop_vals['blue'])
if is_texture:
model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u'])
model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v'])
# Load faces
for face_id in range(n_faces):
prop_vals = {}
if is_binary:
for prop in face_props:
format = formats[prop[1]]
val = struct.unpack(format[0], f.read(format[1]))[0]
if prop[0] == 'n_corners':
if val != face_n_corners:
print('Error: Only triangular faces are supported.')
print('Number of face corners: ' + str(val))
exit(-1)
else:
prop_vals[prop[0]] = val
else:
elems = f.readline().rstrip('\n').rstrip('\r').split()
for prop_id, prop in enumerate(face_props):
if prop[0] == 'n_corners':
if int(elems[prop_id]) != face_n_corners:
print('Error: Only triangular faces are supported.')
print('Number of face corners: ' + str(int(elems[prop_id])))
exit(-1)
else:
prop_vals[prop[0]] = elems[prop_id]
model['faces'][face_id, 0] = int(prop_vals['ind_0'])
model['faces'][face_id, 1] = int(prop_vals['ind_1'])
model['faces'][face_id, 2] = int(prop_vals['ind_2'])
f.close()
model['pts'] *= 1000.
self.models[class_type] = model
return model
def render(self, class_type, pose, camera_type=None, intrinsic_matrix=None, render_type='depth', color=None):
from lib.utils.opengl_render_backend import render
model = self.load_ply(class_type, color=color)
if camera_type is not None:
K = self.intrinsic_matrix[camera_type]
elif intrinsic_matrix is not None:
K = intrinsic_matrix
else:
raise ValueError('Not specify the camera matrix')
R = pose[:, :3]
t = pose[:, 3:] * 1000.
if render_type == 'depth':
return render(model, im_size=[640, 480], K=K, R=R, t=t, clip_near=10, clip_far=10000, mode='depth') / 1000.
else:
return render(model, im_size=[640, 480], K=K, R=R, t=t, clip_near=10, clip_far=10000, mode='rgb')
| [
"numpy.sqrt",
"scipy.io.loadmat",
"transforms3d.quaternions.mat2quat",
"numpy.array",
"lib.utils.base_utils.read_pickle",
"lib.utils.base_utils.read_pose",
"numpy.save",
"os.path.exists",
"scipy.stats.gaussian_kde",
"Imath.PixelType",
"lib.utils.base_utils.save_pickle",
"numpy.max",
"lib.utils.opengl_render_backend.render",
"numpy.dot",
"os.mkdir",
"numpy.concatenate",
"numpy.min",
"numpy.fromstring",
"numpy.rad2deg",
"glob.glob",
"numpy.insert",
"PIL.Image.open",
"lib.utils.base_utils.PoseTransformer",
"os.path.join",
"numpy.arcsin",
"numpy.append",
"numpy.zeros",
"numpy.argwhere",
"numpy.linalg.inv",
"numpy.load",
"OpenEXR.InputFile",
"multiprocessing.dummy.Pool"
] | [((746, 953), 'numpy.array', 'np.array', (['[[-1.00000024, -8.74227979e-08, -5.02429621e-15, 8.74227979e-08], [\n 5.02429621e-15, 1.34358856e-07, -1.00000012, -1.34358856e-07], [\n 8.74227979e-08, -1.00000012, 1.34358856e-07, 1.00000012]]'], {}), '([[-1.00000024, -8.74227979e-08, -5.02429621e-15, 8.74227979e-08],\n [5.02429621e-15, 1.34358856e-07, -1.00000012, -1.34358856e-07], [\n 8.74227979e-08, -1.00000012, 1.34358856e-07, 1.00000012]])\n', (754, 953), True, 'import numpy as np\n'), ((1320, 1363), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""dataset_poses"""'], {}), "(cfg.DATA_DIR, 'dataset_poses')\n", (1332, 1363), False, 'import os\n'), ((1564, 1607), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""blender_poses"""'], {}), "(cfg.DATA_DIR, 'blender_poses')\n", (1576, 1607), False, 'import os\n'), ((1872, 1899), 'lib.utils.base_utils.PoseTransformer', 'PoseTransformer', (['class_type'], {}), '(class_type)\n', (1887, 1899), False, 'from lib.utils.base_utils import PoseTransformer, read_pose, read_pickle, save_pickle\n'), ((1958, 1983), 'glob.glob', 'glob.glob', (['self.mask_path'], {}), '(self.mask_path)\n', (1967, 1983), False, 'import glob\n'), ((2528, 2544), 'numpy.array', 'np.array', (['widths'], {}), '(widths)\n', (2536, 2544), True, 'import numpy as np\n'), ((2563, 2580), 'numpy.array', 'np.array', (['heights'], {}), '(heights)\n', (2571, 2580), True, 'import numpy as np\n'), ((2847, 2903), 'numpy.append', 'np.append', (['object_to_camera_pose', '[[0, 0, 0, 1]]'], {'axis': '(0)'}), '(object_to_camera_pose, [[0, 0, 0, 1]], axis=0)\n', (2856, 2903), True, 'import numpy as np\n'), ((2935, 2995), 'numpy.append', 'np.append', (['self.world_to_camera_pose', '[[0, 0, 0, 1]]'], {'axis': '(0)'}), '(self.world_to_camera_pose, [[0, 0, 0, 1]], axis=0)\n', (2944, 2995), True, 'import numpy as np\n'), ((3109, 3147), 'transforms3d.quaternions.mat2quat', 'mat2quat', (['object_to_world_pose[:3, :3]'], {}), '(object_to_world_pose[:3, :3])\n', (3117, 3147), False, 'from transforms3d.quaternions import mat2quat\n'), ((3276, 3315), 'os.path.exists', 'os.path.exists', (['self.dataset_poses_path'], {}), '(self.dataset_poses_path)\n', (3290, 3315), False, 'import os\n'), ((3980, 3996), 'numpy.array', 'np.array', (['eulers'], {}), '(eulers)\n', (3988, 3996), True, 'import numpy as np\n'), ((4020, 4042), 'numpy.array', 'np.array', (['translations'], {}), '(translations)\n', (4028, 4042), True, 'import numpy as np\n'), ((5237, 5265), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['eulers.T'], {}), '(eulers.T)\n', (5255, 5265), False, 'from scipy import stats\n'), ((5417, 5451), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['translations.T'], {}), '(translations.T)\n', (5435, 5451), False, 'from scipy import stats\n'), ((5961, 6005), 'numpy.insert', 'np.insert', (['self.class_types', '(0)', '"""background"""'], {}), "(self.class_types, 0, 'background')\n", (5970, 6005), True, 'import numpy as np\n'), ((6131, 6172), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""data/{}-meta.mat"""'], {}), "(cfg.YCB, 'data/{}-meta.mat')\n", (6143, 6172), False, 'import os\n'), ((6210, 6266), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""dataset_poses/{}_poses.npy"""'], {}), "(cfg.DATA_DIR, 'dataset_poses/{}_poses.npy')\n", (6222, 6266), False, 'import os\n'), ((6312, 6351), 'os.path.exists', 'os.path.exists', (['self.dataset_poses_path'], {}), '(self.dataset_poses_path)\n', (6326, 6351), False, 'import os\n'), ((7305, 7327), 'numpy.array', 'np.array', (['cls_poses[0]'], {}), '(cls_poses[0])\n', (7313, 7327), True, 'import numpy as np\n'), ((7351, 7373), 'numpy.array', 'np.array', (['cls_poses[1]'], {}), '(cls_poses[1])\n', (7359, 7373), True, 'import numpy as np\n'), ((7481, 7568), 'numpy.array', 'np.array', (['[[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0, 1.0]]'], {}), '([[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0,\n 1.0]])\n', (7489, 7568), True, 'import numpy as np\n'), ((7794, 7863), 'numpy.array', 'np.array', (['[[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0.0, 0.0, 1.0]]'], {}), '([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0.0, 0.0, 1.0]])\n', (7802, 7863), True, 'import numpy as np\n'), ((8033, 8074), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""bg_imgs.npy"""'], {}), "(cfg.DATA_DIR, 'bg_imgs.npy')\n", (8045, 8074), False, 'import os\n'), ((8398, 8439), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""blank.blend"""'], {}), "(cfg.DATA_DIR, 'blank.blend')\n", (8410, 8439), False, 'import os\n'), ((8463, 8511), 'os.path.join', 'os.path.join', (['cfg.UTILS_DIR', '"""render_backend.py"""'], {}), "(cfg.UTILS_DIR, 'render_backend.py')\n", (8475, 8511), False, 'import os\n'), ((8638, 8684), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""plane_height.pkl"""'], {}), "(cfg.DATA_DIR, 'plane_height.pkl')\n", (8650, 8684), False, 'import os\n'), ((8724, 8757), 'os.path.exists', 'os.path.exists', (['self.bg_imgs_path'], {}), '(self.bg_imgs_path)\n', (8738, 8757), False, 'import os\n'), ((9099, 9134), 'numpy.save', 'np.save', (['self.bg_imgs_path', 'bg_imgs'], {}), '(self.bg_imgs_path, bg_imgs)\n', (9106, 9134), True, 'import numpy as np\n'), ((9291, 9352), 'numpy.dot', 'np.dot', (['camera_model_2d', 'self.intrinsic_matrix[camera_type].T'], {}), '(camera_model_2d, self.intrinsic_matrix[camera_type].T)\n', (9297, 9352), True, 'import numpy as np\n'), ((9547, 9574), 'OpenEXR.InputFile', 'OpenEXR.InputFile', (['exr_path'], {}), '(exr_path)\n', (9564, 9574), False, 'import OpenEXR\n'), ((10311, 10349), 'os.path.exists', 'os.path.exists', (['self.plane_height_path'], {}), '(self.plane_height_path)\n', (10325, 10349), False, 'import os\n'), ((12376, 12417), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""blank.blend"""'], {}), "(cfg.DATA_DIR, 'blank.blend')\n", (12388, 12417), False, 'import os\n'), ((12442, 12501), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""models"""', 'class_type', '"""textured.obj"""'], {}), "(cfg.YCB, 'models', class_type, 'textured.obj')\n", (12454, 12501), False, 'import os\n'), ((12630, 12674), 'numpy.insert', 'np.insert', (['self.class_types', '(0)', '"""background"""'], {}), "(self.class_types, 0, 'background')\n", (12639, 12674), True, 'import numpy as np\n'), ((13422, 13464), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""{}_poses.npy"""'], {}), "(cfg.DATA_DIR, '{}_poses.npy')\n", (13434, 13464), False, 'import os\n'), ((14492, 14579), 'numpy.array', 'np.array', (['[[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0, 1.0]]'], {}), '([[572.4114, 0.0, 325.2611], [0.0, 573.57043, 242.04899], [0.0, 0.0,\n 1.0]])\n', (14500, 14579), True, 'import numpy as np\n'), ((14805, 14874), 'numpy.array', 'np.array', (['[[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0.0, 0.0, 1.0]]'], {}), '([[700.0, 0.0, 320.0], [0.0, 700.0, 240.0], [0.0, 0.0, 1.0]])\n', (14813, 14874), True, 'import numpy as np\n'), ((17639, 17669), 'numpy.zeros', 'np.zeros', (['(n_pts, 3)', 'np.float'], {}), '((n_pts, 3), np.float)\n', (17647, 17669), True, 'import numpy as np\n'), ((18050, 18080), 'numpy.zeros', 'np.zeros', (['(n_pts, 3)', 'np.float'], {}), '((n_pts, 3), np.float)\n', (18058, 18080), True, 'import numpy as np\n'), ((2190, 2212), 'numpy.argwhere', 'np.argwhere', (['(mask == 1)'], {}), '(mask == 1)\n', (2201, 2212), True, 'import numpy as np\n'), ((3034, 3069), 'numpy.linalg.inv', 'np.linalg.inv', (['world_to_camera_pose'], {}), '(world_to_camera_pose)\n', (3047, 3069), True, 'import numpy as np\n'), ((3337, 3369), 'numpy.load', 'np.load', (['self.dataset_poses_path'], {}), '(self.dataset_poses_path)\n', (3344, 3369), True, 'import numpy as np\n'), ((3780, 3809), 'lib.utils.base_utils.read_pose', 'read_pose', (['rot_path', 'tra_path'], {}), '(rot_path, tra_path)\n', (3789, 3809), False, 'from lib.utils.base_utils import PoseTransformer, read_pose, read_pickle, save_pickle\n'), ((4084, 4131), 'numpy.concatenate', 'np.concatenate', (['[eulers, translations]'], {'axis': '(-1)'}), '([eulers, translations], axis=-1)\n', (4098, 4131), True, 'import numpy as np\n'), ((4990, 5008), 'numpy.array', 'np.array', (['azimuths'], {}), '(azimuths)\n', (4998, 5008), True, 'import numpy as np\n'), ((5010, 5030), 'numpy.array', 'np.array', (['elevations'], {}), '(elevations)\n', (5018, 5030), True, 'import numpy as np\n'), ((5560, 5607), 'numpy.concatenate', 'np.concatenate', (['[eulers, translations]'], {'axis': '(-1)'}), '([eulers, translations], axis=-1)\n', (5574, 5607), True, 'import numpy as np\n'), ((5871, 5918), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""image_sets/classes.txt"""'], {}), "(cfg.YCB, 'image_sets/classes.txt')\n", (5883, 5918), False, 'import os\n'), ((6042, 6087), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""image_sets/train.txt"""'], {}), "(cfg.YCB, 'image_sets/train.txt')\n", (6054, 6087), False, 'import os\n'), ((6611, 6633), 'scipy.io.loadmat', 'sio.loadmat', (['meta_path'], {}), '(meta_path)\n', (6622, 6633), True, 'import scipy.io as sio\n'), ((8809, 8879), 'os.path.join', 'os.path.join', (['"""/home/liuyuan/data/SUN2012pascalformat/JPEGImages"""', '"""*"""'], {}), "('/home/liuyuan/data/SUN2012pascalformat/JPEGImages', '*')\n", (8821, 8879), False, 'import os\n'), ((8956, 8976), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (8966, 8976), False, 'from PIL import Image\n'), ((9220, 9251), 'numpy.dot', 'np.dot', (['model_3d', 'pose[:, :3].T'], {}), '(model_3d, pose[:, :3].T)\n', (9226, 9251), True, 'import numpy as np\n'), ((9757, 9791), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': 'np.float32'}), '(s, dtype=np.float32)\n', (9770, 9791), True, 'import numpy as np\n'), ((10378, 10413), 'lib.utils.base_utils.read_pickle', 'read_pickle', (['self.plane_height_path'], {}), '(self.plane_height_path)\n', (10389, 10413), False, 'from lib.utils.base_utils import PoseTransformer, read_pose, read_pickle, save_pickle\n'), ((10597, 10629), 'lib.utils.base_utils.PoseTransformer', 'PoseTransformer', (['self.class_type'], {}), '(self.class_type)\n', (10612, 10629), False, 'from lib.utils.base_utils import PoseTransformer, read_pose, read_pickle, save_pickle\n'), ((10708, 10728), 'numpy.min', 'np.min', (['model[:, -1]'], {}), '(model[:, -1])\n', (10714, 10728), True, 'import numpy as np\n'), ((10792, 10841), 'lib.utils.base_utils.save_pickle', 'save_pickle', (['plane_height', 'self.plane_height_path'], {}), '(plane_height, self.plane_height_path)\n', (10803, 10841), False, 'from lib.utils.base_utils import PoseTransformer, read_pose, read_pickle, save_pickle\n'), ((11147, 11183), 'os.path.exists', 'os.path.exists', (['self.output_dir_path'], {}), '(self.output_dir_path)\n', (11161, 11183), False, 'import os\n'), ((11197, 11227), 'os.mkdir', 'os.mkdir', (['self.output_dir_path'], {}), '(self.output_dir_path)\n', (11205, 11227), False, 'import os\n'), ((11547, 11590), 'os.path.join', 'os.path.join', (['self.output_dir_path', '"""*.exr"""'], {}), "(self.output_dir_path, '*.exr')\n", (11559, 11590), False, 'import os\n'), ((12077, 12094), 'multiprocessing.dummy.Pool', 'Pool', ([], {'processes': '(2)'}), '(processes=2)\n', (12081, 12094), False, 'from multiprocessing.dummy import Pool\n'), ((12540, 12587), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""image_sets/classes.txt"""'], {}), "(cfg.YCB, 'image_sets/classes.txt')\n", (12552, 12587), False, 'import os\n'), ((13069, 13086), 'multiprocessing.dummy.Pool', 'Pool', ([], {'processes': '(2)'}), '(processes=2)\n', (13073, 13086), False, 'from multiprocessing.dummy import Pool\n'), ((14292, 14335), 'os.path.join', 'os.path.join', (['self.output_dir_path', '"""*.exr"""'], {}), "(self.output_dir_path, '*.exr')\n", (14304, 14335), False, 'import os\n'), ((15066, 15104), 'os.path.join', 'os.path.join', (['cfg.LINEMOD', '"""{}/{}.ply"""'], {}), "(cfg.LINEMOD, '{}/{}.ply')\n", (15078, 15104), False, 'import os\n'), ((17723, 17768), 'numpy.zeros', 'np.zeros', (['(n_faces, face_n_corners)', 'np.float'], {}), '((n_faces, face_n_corners), np.float)\n', (17731, 17768), True, 'import numpy as np\n'), ((17967, 17997), 'numpy.zeros', 'np.zeros', (['(n_pts, 3)', 'np.float'], {}), '((n_pts, 3), np.float)\n', (17975, 17997), True, 'import numpy as np\n'), ((18206, 18236), 'numpy.zeros', 'np.zeros', (['(n_pts, 3)', 'np.float'], {}), '((n_pts, 3), np.float)\n', (18214, 18236), True, 'import numpy as np\n'), ((18398, 18428), 'numpy.zeros', 'np.zeros', (['(n_pts, 2)', 'np.float'], {}), '((n_pts, 2), np.float)\n', (18406, 18428), True, 'import numpy as np\n'), ((22587, 22682), 'lib.utils.opengl_render_backend.render', 'render', (['model'], {'im_size': '[640, 480]', 'K': 'K', 'R': 'R', 't': 't', 'clip_near': '(10)', 'clip_far': '(10000)', 'mode': '"""rgb"""'}), "(model, im_size=[640, 480], K=K, R=R, t=t, clip_near=10, clip_far=\n 10000, mode='rgb')\n", (22593, 22682), False, 'from lib.utils.opengl_render_backend import render\n'), ((2244, 2265), 'numpy.min', 'np.min', (['row_col[:, 0]'], {}), '(row_col[:, 0])\n', (2250, 2265), True, 'import numpy as np\n'), ((2267, 2288), 'numpy.max', 'np.max', (['row_col[:, 0]'], {}), '(row_col[:, 0])\n', (2273, 2288), True, 'import numpy as np\n'), ((2320, 2341), 'numpy.min', 'np.min', (['row_col[:, 1]'], {}), '(row_col[:, 1])\n', (2326, 2341), True, 'import numpy as np\n'), ((2343, 2364), 'numpy.max', 'np.max', (['row_col[:, 1]'], {}), '(row_col[:, 1])\n', (2349, 2364), True, 'import numpy as np\n'), ((2633, 2647), 'numpy.min', 'np.min', (['widths'], {}), '(widths)\n', (2639, 2647), True, 'import numpy as np\n'), ((2649, 2663), 'numpy.max', 'np.max', (['widths'], {}), '(widths)\n', (2655, 2663), True, 'import numpy as np\n'), ((2720, 2735), 'numpy.min', 'np.min', (['heights'], {}), '(heights)\n', (2726, 2735), True, 'import numpy as np\n'), ((2737, 2752), 'numpy.max', 'np.max', (['heights'], {}), '(heights)\n', (2743, 2752), True, 'import numpy as np\n'), ((4672, 4682), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (4679, 4682), True, 'import numpy as np\n'), ((4872, 4917), 'numpy.rad2deg', 'np.rad2deg', (['(2 * np.pi * n * phi % (2 * np.pi))'], {}), '(2 * np.pi * n * phi % (2 * np.pi))\n', (4882, 4917), True, 'import numpy as np\n'), ((7200, 7234), 'numpy.concatenate', 'np.concatenate', (['cls_poses'], {'axis': '(-1)'}), '(cls_poses, axis=-1)\n', (7214, 7234), True, 'import numpy as np\n'), ((8101, 8160), 'os.path.join', 'os.path.join', (['cfg.DATA_DIR', '"""blender_poses"""', '"""{}_poses.npy"""'], {}), "(cfg.DATA_DIR, 'blender_poses', '{}_poses.npy')\n", (8113, 8160), False, 'import os\n'), ((8211, 8254), 'os.path.join', 'os.path.join', (['cfg.LINEMOD', '"""renders/{}_new"""'], {}), "(cfg.LINEMOD, 'renders/{}_new')\n", (8223, 8254), False, 'import os\n'), ((8536, 8574), 'os.path.join', 'os.path.join', (['cfg.LINEMOD', '"""{}/{}.ply"""'], {}), "(cfg.LINEMOD, '{}/{}.ply')\n", (8548, 8574), False, 'import os\n'), ((12294, 12329), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""renders/{}"""'], {}), "(cfg.YCB, 'renders/{}')\n", (12306, 12329), False, 'import os\n'), ((15167, 15208), 'os.path.join', 'os.path.join', (['cfg.YCB', '"""models/{}/{}.ply"""'], {}), "(cfg.YCB, 'models/{}/{}.ply')\n", (15179, 15208), False, 'import os\n'), ((22453, 22550), 'lib.utils.opengl_render_backend.render', 'render', (['model'], {'im_size': '[640, 480]', 'K': 'K', 'R': 'R', 't': 't', 'clip_near': '(10)', 'clip_far': '(10000)', 'mode': '"""depth"""'}), "(model, im_size=[640, 480], K=K, R=R, t=t, clip_near=10, clip_far=\n 10000, mode='depth')\n", (22459, 22550), False, 'from lib.utils.opengl_render_backend import render\n'), ((2082, 2103), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (2092, 2103), False, 'from PIL import Image\n'), ((2136, 2150), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (2144, 2150), True, 'import numpy as np\n'), ((4960, 4972), 'numpy.arcsin', 'np.arcsin', (['z'], {}), '(z)\n', (4969, 4972), True, 'import numpy as np\n'), ((9945, 9983), 'Imath.PixelType', 'Imath.PixelType', (['Imath.PixelType.FLOAT'], {}), '(Imath.PixelType.FLOAT)\n', (9960, 9983), False, 'import Imath\n')] |
import os
from socket import gethostbyname
from sys import argv, path
#
# --- INTRODUCTION ---
# BEBASID DOMAIN TO IP
# Based on Python 3
#
# --- HOW TO USE ---
# CLI: python scan.py yourfile.txt or python scan.py
#
# --- NOTE ---
# Domain on yourfile.txt must be above subdomain
#
# Example [Right]:
# domain.com
# n1.domain.com
#
# Example [Wrong]:
# n1.domain.com
# domain.com
#
# ------------------------
#
# Copyright (c) 2021 Haibara - KINI
# Copyright (c) 2020 Komunitas Internet Netral Indonesia (KINI)
#
# The copyright holders grant the freedom to copy, modify, convey, adapt, and/or redistribute this work
# under the terms of the Massachusetts Institute of Technology License.
# A copy of that license is available at https://github.com/bebasid/bebasid/blob/master/LICENSE
#
def execute(file):
hostnameFile = open(os.path.join(path[0],file),'r')
resultFile = open(os.path.join(path[0],"hosts-"+file), 'w')
hostnameList = hostnameFile.read().splitlines()
lineFile = len(hostnameList)
lenLiveHostname = lenDieHostname = lenCantWrite = resultHostname = 0
print("\nMemulai Proses\n")
for hostname in hostnameList:
try:
hasWritten = False
hostnameIp = gethostbyname(hostname)
resultFile = open(os.path.join(path[0],"hosts-"+file), 'r')
resultFileList = resultFile.readlines()
for result in resultFileList:
if "# [" in result:
if result.split('[', 1)[1].split(']')[0] in hostname:
hostnameIndex = resultFileList.index(result)
resultFileList[hostnameIndex+1] = "{0} {1}\n{2}".format(hostnameIp, hostname,resultFileList[hostnameIndex+1])
resultFile.close()
resultFile = open(os.path.join(path[0],"hosts-"+file), 'w')
for line in resultFileList:
if(resultFile.write(line)):
success = True
else:
success = False
if(success):
hasWritten = True
lenLiveHostname += 1
print("Domain: {0}\nIP Address: {1}\n".format(hostname, hostnameIp))
else:
lenCantWrite +=1
print("Gagal Menulis File Result")
break
if not(hasWritten):
resultFile = open(os.path.join(path[0],"hosts-"+file), 'a')
hostsText = "\n\n# [{0}]\n{1} {2}".format(hostname,hostnameIp, hostname)
if(resultFile.write(hostsText)):
lenLiveHostname += 1
print("Domain: {0}\nIP Address: {1}\n".format(hostname, hostnameIp))
else:
lenCantWrite += 1
print("Gagal Menulis File Result")
resultFile.close()
except:
lenDieHostname += 1
print("Domain {0} tidak aktif".format(hostname))
hostnameFile.close()
print("Result:\nFile\t\t: {0}\nDomain Aktif\t: {1}\nDomain Mati\t: {2}\nGagal Ditulis\t: {3}".format("hosts-"+file,lenLiveHostname,lenDieHostname,lenCantWrite))
try:
execute(argv[1])
except:
file = str(input("Masukkan nama file:"))
execute(file)
| [
"socket.gethostbyname",
"os.path.join"
] | [((834, 861), 'os.path.join', 'os.path.join', (['path[0]', 'file'], {}), '(path[0], file)\n', (846, 861), False, 'import os\n'), ((888, 926), 'os.path.join', 'os.path.join', (['path[0]', "('hosts-' + file)"], {}), "(path[0], 'hosts-' + file)\n", (900, 926), False, 'import os\n'), ((1223, 1246), 'socket.gethostbyname', 'gethostbyname', (['hostname'], {}), '(hostname)\n', (1236, 1246), False, 'from socket import gethostbyname\n'), ((1277, 1315), 'os.path.join', 'os.path.join', (['path[0]', "('hosts-' + file)"], {}), "(path[0], 'hosts-' + file)\n", (1289, 1315), False, 'import os\n'), ((2553, 2591), 'os.path.join', 'os.path.join', (['path[0]', "('hosts-' + file)"], {}), "(path[0], 'hosts-' + file)\n", (2565, 2591), False, 'import os\n'), ((1811, 1849), 'os.path.join', 'os.path.join', (['path[0]', "('hosts-' + file)"], {}), "(path[0], 'hosts-' + file)\n", (1823, 1849), False, 'import os\n')] |
#!/usr/bin/python3
#coding: utf8
import argparse
from ChromiumProfile import ChromiumProfile
def command_tsv(args):
if args.file_path: profile.LocalStatePath = args.file_path
print(profile.Tsv)
def command_list(args):
if args.file_path: profile.LocalStatePath = args.file_path
if args.dirname: print(profile.Dirnames)
elif args.username: print(profile.Usernames)
else: parser_list.print_help()
def command_get(args):
if args.file_path: profile.LocalStatePath = args.file_path
if args.dirname: print(profile.GetDirname(args.dirname))
elif args.username: print(profile.GetUsername(args.username))
elif args.last_used: print(profile.LastUsed)
else: parser_get.print_help()
profile = ChromiumProfile()
parser = argparse.ArgumentParser(description='Chromiumのプロファイル情報を取得する。')
subparsers = parser.add_subparsers()
parser.add_argument('-f', '--file-path', help='~/.config/chromium/Local State 以外の場所ならセットする')
parser_tsv = subparsers.add_parser('tsv', help='')
parser_list = subparsers.add_parser('list', help='')
parser_get = subparsers.add_parser('get', help='')
parser_tsv.set_defaults(handler=command_tsv)
parser_list.add_argument('-d', '--dirname', action='store_true', help='ディレクトリ名の一覧')
parser_list.add_argument('-u', '--username', action='store_true', help='ユーザ名の一覧')
parser_list.set_defaults(handler=command_list)
parser_get.add_argument('-d', '--dirname', help='ディレクトリ名(Profile 1 等)')
parser_get.add_argument('-u', '--username', help='ユーザ名(任意名)')
parser_get.add_argument('-l', '--last-used', action='store_true', help='最後に使用したユーザ名')
parser_get.set_defaults(handler=command_get)
args = parser.parse_args()
if hasattr(args, 'handler'): args.handler(args)
else: parser.print_help()
| [
"ChromiumProfile.ChromiumProfile",
"argparse.ArgumentParser"
] | [((726, 743), 'ChromiumProfile.ChromiumProfile', 'ChromiumProfile', ([], {}), '()\n', (741, 743), False, 'from ChromiumProfile import ChromiumProfile\n'), ((754, 816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chromiumのプロファイル情報を取得する。"""'}), "(description='Chromiumのプロファイル情報を取得する。')\n", (777, 816), False, 'import argparse\n')] |
#!/usr/bin/env python
"""Tests for `gpplot` package."""
import pytest
import gpplot
import seaborn as sns
import pandas as pd
import numpy as np
@pytest.fixture
def scatter_data():
np.random.seed(7)
nsamps = 2000
data = pd.DataFrame({'x': np.random.normal(size=nsamps)}, index=range(nsamps))
data['y'] = 2 * data['x'] + np.random.normal(size=nsamps)
return data
def test_ridgeplot():
iris = sns.load_dataset('iris')
g = gpplot.ridgeplot(iris, 'sepal_width', 'species')
assert g.row_names == ['setosa', 'versicolor', 'virginica']
def test_point_density_plot(scatter_data):
ax = gpplot.point_densityplot(scatter_data, 'x', 'y')
def test_correlation(scatter_data):
pearson = gpplot.plots.calculate_correlation(scatter_data, 'x', 'y', 'pearson')
assert pearson[1] < 0.01
spearman = gpplot.plots.calculate_correlation(scatter_data, 'x', 'y', 'spearman')
assert spearman[1] < 0.01
assert pearson[0] != spearman[0]
def test_add_correlation(scatter_data):
ax = gpplot.point_densityplot(scatter_data, 'x', 'y')
ax = gpplot.add_correlation(scatter_data, 'x', 'y', size=12, color='blue')
def test_barplot():
mpg = sns.load_dataset('mpg')
mpg_summary = (mpg.groupby(['model_year', 'origin']).agg({'mpg': 'mean'}).reset_index())
ax = gpplot.pandas_barplot(mpg_summary, 'model_year', 'origin', 'mpg')
def test_density_rugplot():
data = sns.load_dataset('iris')
fig, ax = gpplot.density_rugplot(data, 'petal_length', 'species', ['setosa', 'virginica'])
def test_label_points():
mpg = sns.load_dataset('mpg')
ax = sns.scatterplot(data=mpg, x='weight', y='mpg')
label = ['hi 1200d', 'ford f250', 'chevy c20', 'oldsmobile omega']
gpplot.label_points(mpg, 'weight', 'mpg', label, 'name')
def test_dark_boxplot():
tips = sns.load_dataset("tips")
ax = gpplot.dark_boxplot(data=tips, x="size", y="total_bill")
def test_add_regline(scatter_data):
ax = gpplot.point_densityplot(scatter_data, 'x', 'y')
ax = gpplot.add_reg_line(scatter_data, 'x', 'y')
def test_add_xyline(scatter_data):
ax = gpplot.point_densityplot(scatter_data, 'x', 'y')
ax = gpplot.add_xy_line()
| [
"numpy.random.normal",
"gpplot.label_points",
"gpplot.pandas_barplot",
"gpplot.add_xy_line",
"seaborn.load_dataset",
"gpplot.add_correlation",
"gpplot.add_reg_line",
"gpplot.point_densityplot",
"gpplot.ridgeplot",
"gpplot.density_rugplot",
"numpy.random.seed",
"seaborn.scatterplot",
"gpplot.plots.calculate_correlation",
"gpplot.dark_boxplot"
] | [((190, 207), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (204, 207), True, 'import numpy as np\n'), ((422, 446), 'seaborn.load_dataset', 'sns.load_dataset', (['"""iris"""'], {}), "('iris')\n", (438, 446), True, 'import seaborn as sns\n'), ((455, 503), 'gpplot.ridgeplot', 'gpplot.ridgeplot', (['iris', '"""sepal_width"""', '"""species"""'], {}), "(iris, 'sepal_width', 'species')\n", (471, 503), False, 'import gpplot\n'), ((622, 670), 'gpplot.point_densityplot', 'gpplot.point_densityplot', (['scatter_data', '"""x"""', '"""y"""'], {}), "(scatter_data, 'x', 'y')\n", (646, 670), False, 'import gpplot\n'), ((723, 792), 'gpplot.plots.calculate_correlation', 'gpplot.plots.calculate_correlation', (['scatter_data', '"""x"""', '"""y"""', '"""pearson"""'], {}), "(scatter_data, 'x', 'y', 'pearson')\n", (757, 792), False, 'import gpplot\n'), ((837, 907), 'gpplot.plots.calculate_correlation', 'gpplot.plots.calculate_correlation', (['scatter_data', '"""x"""', '"""y"""', '"""spearman"""'], {}), "(scatter_data, 'x', 'y', 'spearman')\n", (871, 907), False, 'import gpplot\n'), ((1026, 1074), 'gpplot.point_densityplot', 'gpplot.point_densityplot', (['scatter_data', '"""x"""', '"""y"""'], {}), "(scatter_data, 'x', 'y')\n", (1050, 1074), False, 'import gpplot\n'), ((1084, 1153), 'gpplot.add_correlation', 'gpplot.add_correlation', (['scatter_data', '"""x"""', '"""y"""'], {'size': '(12)', 'color': '"""blue"""'}), "(scatter_data, 'x', 'y', size=12, color='blue')\n", (1106, 1153), False, 'import gpplot\n'), ((1186, 1209), 'seaborn.load_dataset', 'sns.load_dataset', (['"""mpg"""'], {}), "('mpg')\n", (1202, 1209), True, 'import seaborn as sns\n'), ((1312, 1377), 'gpplot.pandas_barplot', 'gpplot.pandas_barplot', (['mpg_summary', '"""model_year"""', '"""origin"""', '"""mpg"""'], {}), "(mpg_summary, 'model_year', 'origin', 'mpg')\n", (1333, 1377), False, 'import gpplot\n'), ((1419, 1443), 'seaborn.load_dataset', 'sns.load_dataset', (['"""iris"""'], {}), "('iris')\n", (1435, 1443), True, 'import seaborn as sns\n'), ((1458, 1543), 'gpplot.density_rugplot', 'gpplot.density_rugplot', (['data', '"""petal_length"""', '"""species"""', "['setosa', 'virginica']"], {}), "(data, 'petal_length', 'species', ['setosa', 'virginica']\n )\n", (1480, 1543), False, 'import gpplot\n'), ((1576, 1599), 'seaborn.load_dataset', 'sns.load_dataset', (['"""mpg"""'], {}), "('mpg')\n", (1592, 1599), True, 'import seaborn as sns\n'), ((1609, 1655), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'mpg', 'x': '"""weight"""', 'y': '"""mpg"""'}), "(data=mpg, x='weight', y='mpg')\n", (1624, 1655), True, 'import seaborn as sns\n'), ((1731, 1787), 'gpplot.label_points', 'gpplot.label_points', (['mpg', '"""weight"""', '"""mpg"""', 'label', '"""name"""'], {}), "(mpg, 'weight', 'mpg', label, 'name')\n", (1750, 1787), False, 'import gpplot\n'), ((1826, 1850), 'seaborn.load_dataset', 'sns.load_dataset', (['"""tips"""'], {}), "('tips')\n", (1842, 1850), True, 'import seaborn as sns\n'), ((1860, 1916), 'gpplot.dark_boxplot', 'gpplot.dark_boxplot', ([], {'data': 'tips', 'x': '"""size"""', 'y': '"""total_bill"""'}), "(data=tips, x='size', y='total_bill')\n", (1879, 1916), False, 'import gpplot\n'), ((1964, 2012), 'gpplot.point_densityplot', 'gpplot.point_densityplot', (['scatter_data', '"""x"""', '"""y"""'], {}), "(scatter_data, 'x', 'y')\n", (1988, 2012), False, 'import gpplot\n'), ((2022, 2065), 'gpplot.add_reg_line', 'gpplot.add_reg_line', (['scatter_data', '"""x"""', '"""y"""'], {}), "(scatter_data, 'x', 'y')\n", (2041, 2065), False, 'import gpplot\n'), ((2112, 2160), 'gpplot.point_densityplot', 'gpplot.point_densityplot', (['scatter_data', '"""x"""', '"""y"""'], {}), "(scatter_data, 'x', 'y')\n", (2136, 2160), False, 'import gpplot\n'), ((2170, 2190), 'gpplot.add_xy_line', 'gpplot.add_xy_line', ([], {}), '()\n', (2188, 2190), False, 'import gpplot\n'), ((341, 370), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'nsamps'}), '(size=nsamps)\n', (357, 370), True, 'import numpy as np\n'), ((256, 285), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'nsamps'}), '(size=nsamps)\n', (272, 285), True, 'import numpy as np\n')] |
# Performs analysis on annotation performance, prints to stdout
import sys
import os
script_loc = os.path.realpath(__file__)
sys.path.append(os.path.join(os.path.dirname(script_loc), '..'))
import django
from collections import defaultdict, Counter
import matplotlib.pyplot as plt
from tqdm import tqdm
os.environ.setdefault('DJANGO_SETTINGS_MODULE','trick.settings')
django.setup()
from core.models import Profile, User, Prompt, EvaluationText, Tag, Annotation
def did_pay_attention(attention_checks):
''' check if the turker failed their attention check '''
for annotation in attention_checks:
if annotation.boundary != annotation.text.boundary:
return False
return True
# Set this to true to filter out mechanical turk workers that fail the attention
FILTER_ATTENTION_CHECK_FAILS = True
# First get all turkers
turker_profiles = Profile.objects.filter(is_turker=True)
total_annotations = 0
number_that_failed_attention_check = 0
# Create lists (one element per annotation) for easy analysis
trick_length = [] # List of distance from boundary per annotation
points = [] # List of points per annotation
boundaries = [] # List of chosen boundary sentence indices
true_boundaries = [] # List of true boundary sentence indices
# get all annotations done by turkers
progress_bar = tqdm(turker_profiles)
for p in progress_bar:
# Make sure to skip the annotations completed by my test account
if p.user.username == "bitchy_mackerel":
continue
progress_bar.set_description("Getting annotations for " + str(p.user.username))
annotations = Annotation.objects.filter(annotator=p.user, attention_check=False)
attention_checks = Annotation.objects.filter(annotator=p.user, attention_check=True)
total_annotations += len(annotations)
# If option is true, filter out workers who fail the attention check
if FILTER_ATTENTION_CHECK_FAILS:
if not did_pay_attention(attention_checks):
number_that_failed_attention_check += 1
continue
# Populate dictionaries and counters for analysis
for a in annotations:
trick_length.append(a.boundary - a.text.boundary)
points.append(a.points)
boundaries.append(a.boundary)
true_boundaries.append(a.text.boundary)
# Count the amount of annotations for each distance from boundary
# (This is the data for the histogram plot)
c = Counter(trick_length)
# Count total number of times a specific sentence index was chosen as well as
# the true boundary's sentence index (This was unused in the paper submission)
b = Counter(boundaries)
tb = Counter(true_boundaries)
# Print out statistics on the number of turkers who failed attention check
print("---Attention Checks---")
print("Num failed attention checks: " + str(number_that_failed_attention_check))
print("Total number of annotators: " + str(len(turker_profiles)))
print("Number of annotators that failed: " + str(sum(c.values()) - total_annotations))
print("Percentage of annotators that failed: " + str(float(number_that_failed_attention_check)/float(len(turker_profiles))))
# Print statistics for boundary guess accuracy
print("---Total Boundary Guessing Accuracy---")
print("Number of correct guesses: " + str(c[0]))
print("Number of total annotations: " + str(sum(c.values())))
print("% Accuracy of perfect guesses: " + str(float(c[0])/float(sum(c.values()))))
print("Average Distance from boundary: " + str(float(sum(trick_length)) / float(len(trick_length))))
print("Average Boundary Chosen by annotators: " + str(float(sum(boundaries)) / float(len(boundaries))))
# Plot Average Distance from boundary histogram
fig, ax = plt.subplots()
ax.bar(range(-9, 11), [v for k,v in sorted(c.items())])
ax.set_ylabel('Number of annotations')
ax.set_xlabel('Distance from generation boundary')
ax.set_title('Histogram of Annotations')
ax.legend()
plt.show()
fig.savefig("histogram.pdf", bbox_inches='tight')
| [
"os.environ.setdefault",
"core.models.Profile.objects.filter",
"django.setup",
"tqdm.tqdm",
"os.path.realpath",
"collections.Counter",
"os.path.dirname",
"core.models.Annotation.objects.filter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((100, 126), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (116, 126), False, 'import os\n'), ((307, 372), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""trick.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'trick.settings')\n", (328, 372), False, 'import os\n'), ((372, 386), 'django.setup', 'django.setup', ([], {}), '()\n', (384, 386), False, 'import django\n'), ((872, 910), 'core.models.Profile.objects.filter', 'Profile.objects.filter', ([], {'is_turker': '(True)'}), '(is_turker=True)\n', (894, 910), False, 'from core.models import Profile, User, Prompt, EvaluationText, Tag, Annotation\n'), ((1321, 1342), 'tqdm.tqdm', 'tqdm', (['turker_profiles'], {}), '(turker_profiles)\n', (1325, 1342), False, 'from tqdm import tqdm\n'), ((2407, 2428), 'collections.Counter', 'Counter', (['trick_length'], {}), '(trick_length)\n', (2414, 2428), False, 'from collections import defaultdict, Counter\n'), ((2591, 2610), 'collections.Counter', 'Counter', (['boundaries'], {}), '(boundaries)\n', (2598, 2610), False, 'from collections import defaultdict, Counter\n'), ((2616, 2640), 'collections.Counter', 'Counter', (['true_boundaries'], {}), '(true_boundaries)\n', (2623, 2640), False, 'from collections import defaultdict, Counter\n'), ((3662, 3676), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3674, 3676), True, 'import matplotlib.pyplot as plt\n'), ((3876, 3886), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3884, 3886), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1667), 'core.models.Annotation.objects.filter', 'Annotation.objects.filter', ([], {'annotator': 'p.user', 'attention_check': '(False)'}), '(annotator=p.user, attention_check=False)\n', (1626, 1667), False, 'from core.models import Profile, User, Prompt, EvaluationText, Tag, Annotation\n'), ((1691, 1756), 'core.models.Annotation.objects.filter', 'Annotation.objects.filter', ([], {'annotator': 'p.user', 'attention_check': '(True)'}), '(annotator=p.user, attention_check=True)\n', (1716, 1756), False, 'from core.models import Profile, User, Prompt, EvaluationText, Tag, Annotation\n'), ((156, 183), 'os.path.dirname', 'os.path.dirname', (['script_loc'], {}), '(script_loc)\n', (171, 183), False, 'import os\n')] |
#!/usr/bin/env python
""" I/O utilities for dealing with SIU files """
import os
import struct
import numpy as np
__all__ = ["read_line_list", "write_line_list"]
def chunk(l, n):
return [l[i:i + n] for i in range(0, len(l), n)]
def guess_mode(filename):
textchars = ''.join(map(chr, [7,8,9,10,12,13,27] + range(0x20, 0x100)))
is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))
with open(filename) as fp:
is_binary = is_binary_string(fp.read(1024))
return ["w", "b"][is_binary]
def _tuple_to_recarray(transition):
columns = ("wavelength", "excitation_potential", "J_low", "J_up", "xx1",
"log(gf)", "xx2", "log(c4)?", "log(c6)", "xx3", "xx4", "xx5", "xx6",
"xx7", "xx8", "xx9", "xx10", "atomic_number", "xx11", "ionised")
return np.core.records.fromrecords([transition], names=columns, formats=None)
def read_line_list(filename, mode=None, ignore_blanks=True):
"""
Read a line list and return a record array.
:param filename:
The path of the line list.
:type filename:
str
:param mode: [optional]
The mode to open the file in. Specifying mode "w" indicates ASCII format.
Specifying the mode as "b" indicates binary format. If no mode is given,
it will be guessed from the file contents.
:type mode:
str
:returns:
A record array of the line list.
"""
# Read it into tuples, regardless of binary/ascii
if mode is None:
mode = guess_mode(filename)
if "b" in mode.lower():
records = _read_binary_line_list(filename)
else:
records = _read_ascii_line_list(filename)
# Convert to a record array
columns = ("wavelength", "excitation_potential", "J_low", "J_up", "xx1",
"log(gf)", "xx2", "log(c4)?", "log(c6)", "xx3", "xx4", "xx5", "xx6",
"xx7", "xx8", "xx9", "xx10", "atomic_number", "xx11", "ionised")
data = np.core.records.fromrecords(records, names=columns, formats=None)
return data[data["wavelength"] > 0] if ignore_blanks else data
def write_line_list(filename, data, mode="w", clobber=False):
"""
Write the line list to disk.
:param filename:
The path of the line list to write to.
:type filename:
str
:param data:
The line list information as a record array.
:type data:
:class:`np.core.records.recarray`
:param mode: [optional]
The mode to open the file in. The default (w) indicates ASCII format.
Specifying the mode as "b" indicates binary format.
:type mode:
str
:param clobber: [optional]
Clobber the file if it already exists.
:type clobber:
bool
"""
if os.path.exists(filename) and not clobber:
raise IOError("filename {0} exists and not clobbering".format(filename))
# Data should be in record array. Can pass to either binary or ascii writer.
if "b" in mode.lower():
return _write_binary_line_list(filename, data)
else:
return _write_ascii_line_list(filename, data)
def _write_ascii_line_list(filename, data):
"""
Write line list to ASCII format.
"""
# 2010.974 0.0300 2.0 3.0 0.1 -3.3800 0.0000 0.0000 -31.1250 0
#.0000 0.0000 2 uv6 3P 1G KP STD 14 0 1
record = "{0:10.3f}{1:9.4f}{2:5.1f}{3:5.1f}{4:6.1f}{5:9.4f}{6:9.4f}{7:9.4f}"\
"{8:9.4f}{9:9.4f}{10:9.4f}{11:10.0f} {12:>5s} {13:>3s} {14:>3s} {15"\
":>3s} {16:>3s} {17:3.0f}{18:3.0f}{19:3.0f}\n"
scales = np.ones(len(data.dtype.names))
scales[6] = 1e+8
with open(filename, "w+") as fp:
for row in data:
row_data = row * scales
fp.write(record.format(*row_data))
return True
def _write_binary_line_list(filename, data, num=200):
"""
Write line list to binary format.
"""
assert 3.8e5 >= len(data), "SI line list format supports at most 380000 lines"
num_structures = int(np.ceil(len(data)/float(num)))
# Create the header
header_values = np.zeros((1900))
header_values[:num_structures] = data["wavelength"][::num]
header = struct.pack("<1900d", *header_values)
# Create the structures
contents = header
for i in xrange(num_structures):
contents += _pack_binary_structure(data[i*num:(i+1)*num], num)
with open(filename, "wb+") as fp:
fp.write(contents)
return True
def _pack_binary_structure(data, num=200):
assert num >= len(data)
columns = ["wavelength", "excitation_potential", "J_low", "J_up", "xx1",
"log(gf)", "xx2", "log(c4)?", "log(c6)", "xx3", "xx4", "xx5", "xx6",
"xx7", "xx8", "xx9", "xx10", "atomic_number", "xx11", "ionised"]
data = data[columns]
if num > len(data):
# We will need to copy the data and pad it.
data = data.copy()
padded_row = np.array([(0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0,
' ', ' ', ' ', ' ', ' ', 0, 0, 0)], dtype=data.dtype)
for i in xrange(num - len(data)):
data = np.append(data, padded_row)
colnames = data.dtype.names[:12]
scales = np.ones(len(colnames))
scales[6] = 1e+8
formatted_data = struct.pack(
"<{0}d{0}d{0}f{0}f{0}f{0}f{0}f{0}f{0}f{0}f{0}f{0}l".format(num),
*np.array([data[c]*s for c, s in zip(colnames, scales)]).flatten())
sizes = (5, 3, 3, 3, 3)
colnames = data.dtype.names[12:17]
formatted_byte_data = struct.pack("<{0}s{1}s{2}s{3}s{4}s".format(
*[num*s for s in sizes]), *["".join(list(data[c])) for c in colnames])
colnames = data.dtype.names[17:]
formatted_element_data = struct.pack("<{0}b{0}b{0}b".format(num),
*np.array([data[c] for c in colnames]).flatten())
return "".join([formatted_data, formatted_byte_data, formatted_element_data])
def _read_ascii_line_list(filename):
"""
Read an ASCII-formatted line list and return a list containing tuples for
each record.
"""
with open(filename, "r") as fp:
lines = fp.readlines()
return map(_read_ascii_line, lines)
def _read_ascii_line(line):
"""
Read the structure containined in an ASCII line and return the record as a
tuple.
"""
# 2010.974 0.0300 2.0 3.0 0.1 -3.3800 0.0000 0.0000 -31.1250 \
#0.0000 0.0000 2 uv6 3P 1G KP STD 14 0 1
record = []
record.extend(map(float, line.split()[:12]))
record[6] = record[6] * 1e-8
record[-1] = int(record[-1])
# Split the text:
record.extend(map(str.strip, chunk(line[101:126], 5)))
# And the final element information
record.extend(map(int, line[127:].split()))
return tuple(record)
def _read_binary_line_list(filename):
"""
Read a binary-formatted line list and return a list containing tuples for
each record.
"""
with open(filename, "rb") as fp:
# Skip the header (this is also length 15200)
fp.seek(0x3b60)
contents = fp.read()
size = 15200
return sum([_read_binary_structure(contents[i*size:]) \
for i in xrange(len(contents)/size)], [])
def _read_binary_structure(contents, num=200):
"""
Read the structure contained in a binary line list and return the records
of the structure.
"""
data_fmt = "<{0}d{0}d{0}f{0}f{0}f{0}f{0}f{0}f{0}f{0}f{0}f{0}l".format(num)
data = struct.unpack_from(data_fmt, contents)
sizes = (5, 3, 3, 3, 3)
offset = struct.calcsize(data_fmt)
byte_fmt = "<{0}s{1}s{2}s{3}s{4}s".format(*[num*s for s in sizes])
str_data = [chunk(d, s) for d, s in \
zip(struct.unpack_from(byte_fmt, contents[offset:]), sizes)]
offset += struct.calcsize(byte_fmt)
element_fmt = "<{0}b{0}b{0}b".format(num)
element_data = struct.unpack_from(element_fmt, contents[offset:])
# Join records together
records = []
for i in xrange(num):
record = list(data[i::num])
record[6] *= 1e-8
record.extend([each[i] for each in str_data])
record.extend(element_data[i::num])
records.append(tuple(record))
return records
| [
"struct.calcsize",
"os.path.exists",
"struct.pack",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.core.records.fromrecords",
"struct.unpack_from"
] | [((807, 877), 'numpy.core.records.fromrecords', 'np.core.records.fromrecords', (['[transition]'], {'names': 'columns', 'formats': 'None'}), '([transition], names=columns, formats=None)\n', (834, 877), True, 'import numpy as np\n'), ((1950, 2015), 'numpy.core.records.fromrecords', 'np.core.records.fromrecords', (['records'], {'names': 'columns', 'formats': 'None'}), '(records, names=columns, formats=None)\n', (1977, 2015), True, 'import numpy as np\n'), ((4100, 4114), 'numpy.zeros', 'np.zeros', (['(1900)'], {}), '(1900)\n', (4108, 4114), True, 'import numpy as np\n'), ((4193, 4230), 'struct.pack', 'struct.pack', (['"""<1900d"""', '*header_values'], {}), "('<1900d', *header_values)\n", (4204, 4230), False, 'import struct\n'), ((7440, 7478), 'struct.unpack_from', 'struct.unpack_from', (['data_fmt', 'contents'], {}), '(data_fmt, contents)\n', (7458, 7478), False, 'import struct\n'), ((7521, 7546), 'struct.calcsize', 'struct.calcsize', (['data_fmt'], {}), '(data_fmt)\n', (7536, 7546), False, 'import struct\n'), ((7744, 7769), 'struct.calcsize', 'struct.calcsize', (['byte_fmt'], {}), '(byte_fmt)\n', (7759, 7769), False, 'import struct\n'), ((7835, 7885), 'struct.unpack_from', 'struct.unpack_from', (['element_fmt', 'contents[offset:]'], {}), '(element_fmt, contents[offset:])\n', (7853, 7885), False, 'import struct\n'), ((2745, 2769), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2759, 2769), False, 'import os\n'), ((4925, 5051), 'numpy.array', 'np.array', (["[(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, ' ', ' ', ' ',\n ' ', ' ', 0, 0, 0)]"], {'dtype': 'data.dtype'}), "([(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, ' ',\n ' ', ' ', ' ', ' ', 0, 0, 0)], dtype=data.dtype)\n", (4933, 5051), True, 'import numpy as np\n'), ((5111, 5138), 'numpy.append', 'np.append', (['data', 'padded_row'], {}), '(data, padded_row)\n', (5120, 5138), True, 'import numpy as np\n'), ((7672, 7719), 'struct.unpack_from', 'struct.unpack_from', (['byte_fmt', 'contents[offset:]'], {}), '(byte_fmt, contents[offset:])\n', (7690, 7719), False, 'import struct\n'), ((5751, 5788), 'numpy.array', 'np.array', (['[data[c] for c in colnames]'], {}), '([data[c] for c in colnames])\n', (5759, 5788), True, 'import numpy as np\n')] |
import datetime
import time
import csv
import os
import ast
import glob
from math import log
from sense_hat import SenseHat
from weather import get_timestamp
from sendEmail import *
import tablib
import pandas as pd
import json
def convert_epoch(epoch_time):
converted_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch_time))
return converted_time
def epoch_to_day(epoch_time):
converted_time = time.strftime('%A', time.localtime(epoch_time))
return converted_time
def get_csv_data():
"""Open the daily csv log and return the content"""
csv_list = []
day = get_timestamp().split()[0]
# csv_path = os.path.join(os.path.dirname(__file__) + '/logs/', day + '.csv')
csv_path = '/home/pi/Pi_Weather_Station/src/logs/' + day + '.csv'
with open(csv_path, 'r') as csv_file:
# content = f.read()
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# print(row)
csv_list.append(row)
return csv_list
def get_dark_sky():
"""Read the most recent dark sky log and return a list of the stats"""
csv_content = get_csv_data()
most_recent = csv_content[-1]
dark_sky_string = most_recent[9]
dark_sky_list = dark_sky_string.strip('][').split(', ')
ds_temp = dark_sky_list[0]
ds_cond = dark_sky_list[1].strip("'")
ds_fore = dark_sky_list[2].strip("'")
return [ds_temp, ds_cond, ds_fore]
# print(get_dark_sky())
def get_gov_aqi():
"""Read the most recent aqi log and return the stats"""
csv_content = get_csv_data()
most_recent = csv_content[-1]
aqi_string = most_recent[10]
aqi_list = aqi_string.strip('][').split(', ')
aqi = aqi_list[0]
air_cond = aqi_list[1].strip("'")
return [aqi, air_cond]
# print(get_gov_aqi())
def get_timestamp():
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
return st
# test_list = ['boom', 'bam', 0]
test_dict = {'max' : '45', 'min' : '45', 'AQI' : 0}
def save_alert(result_dict):
"""Take a list and save it as a csv"""
# src_dir = os.path.dirname(os.path.realpath(__file__))
# w_log = os.path.join(src_dir + '/logs/', day + '.csv')
file_path = '/home/pi/Pi_Weather_Station/src/alerts.txt'
with open(file_path, 'w') as output:
output.write(str(result_dict))
# save_alert(test_dict)
def read_alert():
file_path = '/home/pi/Pi_Weather_Station/src/alerts.txt'
with open(file_path, 'r') as input:
s = input.read()
whip = ast.literal_eval(s)
return whip
# print(read_alert())
def check_max():
try:
alert_cont = read_alert()
maximum_temp = int(alert_cont['max_temp'])
current_temp = get_dark_sky()[0]
current_temp = float(current_temp)
if current_temp >= maximum_temp:
print('Current temperature exceeds maximum temperature threshhold set')
print('Check https://pi.sisto.solutions/alerts')
return True
else:
print('Temperature is within limit set')
return False
except:
print('That did not work.')
print('probably did not have a value set for maximum temp')
# check_max()
# if check_max() == True:
# print('truth')
# sendEmail('max temp exceeded', 'Pi max temp exceeded')
def check_min():
try:
alert_cont = read_alert()
minimum_temp = int(alert_cont['min_temp'])
current_temp = get_dark_sky()[0]
current_temp = float(current_temp)
if current_temp <= minimum_temp:
print('Current temperature exceeds minimum temperature threshhold set')
print('Check https://pi.sisto.solutions/alerts')
return True
else:
print('Temperature is within limit set')
return False
except:
print('That did not work.')
print('probably did not have a value set for minimum temp')
# if check_min() == True:
# print('truth')
def check_air():
try:
alert_cont = read_alert()
maximum_aqi = int(alert_cont['aqi_max'])
current_aqi = get_gov_aqi()[0]
current_aqi = float(current_aqi)
if current_aqi >= maximum_aqi:
print('Current AQI exceeds maximum threshhold set')
print('Check https://pi.sisto.solutions/alerts')
return True
else:
print('AQI is within limit set')
return False
except:
print('That did not work.')
print('probably did not have a value set for aqi')
# check_air()
# # csv_path = os.path.join(os.path.dirname(__file__) + '/logs/', day + '.csv')
# csv_path = '/home/pi/Pi_Weather_Station/src/logs/' + day + '.csv'
# with open(csv_path, 'r') as fh:
# imported_data = tablib.Dataset().load(fh)
# imported_data.headers = ['Log Time', 'Temp (C)', 'Temp (F)', 'Humidity', 'Pressure', 'DewPoint', 'X', 'Y', 'Z', 'Weather', 'AQI']
# print(type(imported_data))
# data = imported_data.export('csv')
# print(type(data))
# print(data)
def update_logs_html():
day = get_timestamp().split()[0]
csv_path = '/home/pi/Pi_Weather_Station/src/logs/' + day + '.csv'
columns = ['Log Time', 'Temp (C)', 'Temp (F)', 'Humidity', 'Pressure', 'DewPoint', 'X', 'Y', 'Z', 'Weather', 'AQI']
df = pd.read_csv(csv_path, names=columns)
with open('/home/pi/Pi_Weather_Station/src/templates/logs.html', 'w') as html_file:
html_file.write(df.to_html())
# print(df.to_html())
# update_logs_html()
# send_email('mailjet fix', 'mailjet has updated credentials')
with open('weather.json') as json_file:
data = json.load(json_file)
current_cond = data['currently']['summary']
chance_of_rain = data['currently']['precipProbability']
current_temp = data['currently']['temperature']
feels_like_temp = data['currently']['apparentTemperature']
dew_point = data['currently']['dewPoint']
current_hum = data['currently']['humidity']
current_press = data['currently']['pressure']
current_wind = data['currently']['windSpeed']
wind_bearing = data['currently']['windBearing']
current_uv = data['currently']['uvIndex']
current_vis = data['currently']['visibility']
forecast = data['daily']['summary']
today_sunrise = data['daily']['data'][0]['sunriseTime']
today_sunset = data['daily']['data'][0]['sunsetTime']
today_temp_hi = data['daily']['data'][0]['temperatureHigh']
today_temp_lo = data['daily']['data'][0]['temperatureLow']
tom_time = data['daily']['data'][1]['time']
tomorrow = epoch_to_day(tom_time) # get day of week for tomorrow
tom_summary = data['daily']['data'][1]['summary']
tom_temp_hi = data['daily']['data'][1]['temperatureHigh']
tom_temp_lo = data['daily']['data'][1]['temperatureLow']
tom_chance_rain = data['daily']['data'][1]['precipProbability']
d2_time = data['daily']['data'][2]['time']
d2 = epoch_to_day(d2_time) # get day 2
d2_summary = data['daily']['data'][2]['summary']
d2_temp_hi = data['daily']['data'][2]['temperatureHigh']
d2_temp_lo = data['daily']['data'][2]['temperatureLow']
d2_chance_rain = data['daily']['data'][2]['precipProbability']
d3_time = data['daily']['data'][3]['time']
d3 = epoch_to_day(d3_time) # get day 2
d3_summary = data['daily']['data'][3]['summary']
d3_temp_hi = data['daily']['data'][3]['temperatureHigh']
d3_temp_lo = data['daily']['data'][3]['temperatureLow']
d3_chance_rain = data['daily']['data'][3]['precipProbability']
# print(current_press)
# print(convert_epoch(today_sunrise))
# print(convert_epoch(today_sunset))
# print()
# print(tomorrow)
# print(d2)
# print(d3)
send_email('Are we online?', 'hoping this works')
| [
"datetime.datetime.fromtimestamp",
"pandas.read_csv",
"ast.literal_eval",
"weather.get_timestamp",
"csv.reader",
"json.load",
"time.localtime",
"time.time"
] | [((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((5358, 5394), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'names': 'columns'}), '(csv_path, names=columns)\n', (5369, 5394), True, 'import pandas as pd\n'), ((5683, 5703), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (5692, 5703), False, 'import json\n'), ((317, 343), 'time.localtime', 'time.localtime', (['epoch_time'], {}), '(epoch_time)\n', (331, 343), False, 'import time\n'), ((444, 470), 'time.localtime', 'time.localtime', (['epoch_time'], {}), '(epoch_time)\n', (458, 470), False, 'import time\n'), ((875, 910), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (885, 910), False, 'import csv\n'), ((2541, 2560), 'ast.literal_eval', 'ast.literal_eval', (['s'], {}), '(s)\n', (2557, 2560), False, 'import ast\n'), ((1854, 1889), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (1885, 1889), False, 'import datetime\n'), ((604, 619), 'weather.get_timestamp', 'get_timestamp', ([], {}), '()\n', (617, 619), False, 'from weather import get_timestamp\n'), ((5132, 5147), 'weather.get_timestamp', 'get_timestamp', ([], {}), '()\n', (5145, 5147), False, 'from weather import get_timestamp\n')] |
#!/usr/bin/env python3
import rospy
import numpy
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
from time import sleep
from robot import robot
class env():
def __init__(self):
self.robot=robot()
self.ModelPub = rospy.Publisher('/gazebo/set_model_state',ModelState,queue_size=10)
def step(self,action, speed):
reward=0
c=False
current_state=self.robot.computeState()
while(self.robot.computeState()==current_state and c==False):
c=self.robot.get_noContour()
self.robot.move(action, speed)
new_state=self.robot.computeState()
if (rospy.is_shutdown()) :
break
self.robot.move(9,-10)
new_state=self.robot.computeState()
done = False
if self.robot.get_noContour() == True :
reward = -2
done = True
if new_state == 3 or new_state == 5 :
if current_state < 3 or current_state > 5 :
reward = 0.5
else :
reward = -0.5
elif new_state == 2 or new_state == 6 :
if current_state < 2 or current_state > 6 :
reward = 0.5
else :
reward = -0.8
elif new_state == 1 or new_state == 7 :
if current_state < 1 or current_state > 7 :
reward = 0.5
else :
reward = -0.8
elif new_state == 0 or new_state== 8 :
reward = -2
else :
reward = 2
return new_state, reward, done
# if new_state == 3 or new_state == 5 :
# if current_state < 3 or current_state > 5 :
# reward = 0.5
# else :
# reward = -0.5
# elif new_state == 2 or new_state == 6 :
# if current_state < 2 or current_state > 6 :
# reward = 0.5
# else :
# reward = -0.5
# elif new_state == 1 or new_state == 7 :
# if current_state < 1 or current_state > 7 :
# reward = 0.5
# else :
# reward = -0.5
# elif new_state == 0 or new_state== 8 :
# reward = -1
# else :
# reward = 2
# return new_state, reward, done
def reset(self):
state_msg = ModelState()
state_msg.pose.position.x=1
state_msg.pose.position.y=-0.8
state_msg.pose.position.z=0.1
state_msg.pose.orientation.z=1
state_msg.pose.orientation.w=0.0000463
state_msg.model_name = "line_follower"
state_msg.reference_frame='world'
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
resp = set_state( state_msg )
except rospy.ServiceException:
print("/gazebo/get_model_state service call failed")
sleep(0.1)
return self.robot.computeState()
| [
"robot.robot",
"rospy.is_shutdown",
"rospy.ServiceProxy",
"time.sleep",
"gazebo_msgs.msg.ModelState",
"rospy.Publisher",
"rospy.wait_for_service"
] | [((238, 245), 'robot.robot', 'robot', ([], {}), '()\n', (243, 245), False, 'from robot import robot\n'), ((270, 339), 'rospy.Publisher', 'rospy.Publisher', (['"""/gazebo/set_model_state"""', 'ModelState'], {'queue_size': '(10)'}), "('/gazebo/set_model_state', ModelState, queue_size=10)\n", (285, 339), False, 'import rospy\n'), ((2432, 2444), 'gazebo_msgs.msg.ModelState', 'ModelState', ([], {}), '()\n', (2442, 2444), False, 'from gazebo_msgs.msg import ModelState\n'), ((2741, 2790), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/gazebo/set_model_state"""'], {}), "('/gazebo/set_model_state')\n", (2763, 2790), False, 'import rospy\n'), ((3046, 3056), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (3051, 3056), False, 'from time import sleep\n'), ((672, 691), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (689, 691), False, 'import rospy\n'), ((2828, 2888), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/set_model_state"""', 'SetModelState'], {}), "('/gazebo/set_model_state', SetModelState)\n", (2846, 2888), False, 'import rospy\n')] |
import unittest
from unittest import TestCase
import devanalyst.simulation.tests.test_simm # Import needed to populate tu_.ACTUAL and tu_.EXPECTED
import devanalyst.test_utils.test_utils as tu_
# ----------------------------- Simulation Model tests ------------------------------
class test_greedyAllocationLogs(TestCase):
def test_Committed(self):
self.assertTrue(tu_.testOK('simm.test_greedyAllocationLogs'))
class test_greedyAllocation(TestCase):
def test_Committed(self):
self.assertTrue(tu_.testOK('simm.test_greedyAllocation.Committed'))
def test_Tasks(self):
self.assertTrue(tu_.testOK('simm.test_greedyAllocation.Tasks'))
class test_balancedAllocation(TestCase):
def test_Committed(self):
self.assertTrue(tu_.testOK('simm.test_balancedAllocation.Committed'))
def test_Tasks(self):
self.assertTrue(tu_.testOK('simm.test_balancedAllocation.Tasks'))
class test_noLaggardsAllocation(TestCase):
def test_Committed(self):
self.assertTrue(tu_.testOK('simm.test_noLaggardsAllocation.Committed'))
def test_Tasks(self):
self.assertTrue(tu_.testOK('simm.test_noLaggardsAllocation.Tasks'))
class test_distributedLagQualityModel(TestCase):
def test_bugs(self):
self.assertTrue(tu_.testOK('simm.test_distributedLagQualityModel.bugs'))
def test_stories(self):
self.assertTrue(tu_.testOK('simm.test_distributedLagQualityModel.stories'))
| [
"devanalyst.test_utils.test_utils.testOK"
] | [((381, 425), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_greedyAllocationLogs"""'], {}), "('simm.test_greedyAllocationLogs')\n", (391, 425), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((521, 571), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_greedyAllocation.Committed"""'], {}), "('simm.test_greedyAllocation.Committed')\n", (531, 571), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((623, 669), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_greedyAllocation.Tasks"""'], {}), "('simm.test_greedyAllocation.Tasks')\n", (633, 669), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((768, 820), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_balancedAllocation.Committed"""'], {}), "('simm.test_balancedAllocation.Committed')\n", (778, 820), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((872, 920), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_balancedAllocation.Tasks"""'], {}), "('simm.test_balancedAllocation.Tasks')\n", (882, 920), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((1021, 1075), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_noLaggardsAllocation.Committed"""'], {}), "('simm.test_noLaggardsAllocation.Committed')\n", (1031, 1075), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((1127, 1177), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_noLaggardsAllocation.Tasks"""'], {}), "('simm.test_noLaggardsAllocation.Tasks')\n", (1137, 1177), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((1279, 1334), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_distributedLagQualityModel.bugs"""'], {}), "('simm.test_distributedLagQualityModel.bugs')\n", (1289, 1334), True, 'import devanalyst.test_utils.test_utils as tu_\n'), ((1388, 1446), 'devanalyst.test_utils.test_utils.testOK', 'tu_.testOK', (['"""simm.test_distributedLagQualityModel.stories"""'], {}), "('simm.test_distributedLagQualityModel.stories')\n", (1398, 1446), True, 'import devanalyst.test_utils.test_utils as tu_\n')] |
from numpy import linspace
import matplotlib.pyplot as plt
class corridor:
def __init__(self, width, length,_data_size=2):
self.right=0 #right wall
self.left=width #left wall
self.length=length
self.wall=linspace(0,length,_data_size) # this will help draw the corridor
self.boxes=[] #this will be used to define obstacles
def add_box(self,x,y,size):
self.boxes.append((x,y,size)) #later you need to detect if you've hit a box, and also draw them.
def wall_detect(self,x,y):
'''
detects if the given point (x,y) is beyond the wall.
It returns "Hit", "NoHit", and "Win", if the point is beyond the wall,
inside the corridor, or is out of the corridor, respectively.
Lets decide to "Win" if at the end of the corridor, we hit a wall.
'''
if x<self.length:
if (y<= self.left and y>=self.right ):
return 'NoHit'
elif (y> self.left or y<self.right ):
return 'Hit'
if x>= self.length:
return 'Win'
def draw(self,_n_paths=0,paths=[]):
'''
draw the corridor, and the paths!
'''
fig=plt.figure(figsize=(8,3))
fig.subplots_adjust(bottom=0.2, left=0.2, top = 0.9, right=0.9)
#=============================================================================#
sub = fig.add_subplot(111)
_len=len(self.wall)
sub.plot(self.wall, [self.right]*_len,'r')
sub.plot(self.wall, [self.left]*_len,'r')
if _n_paths!=0:
if _n_paths==1:
_lp=len(paths[0])
sub.plot([paths[0][i] for i in range(_lp)],[paths[1][i] for i in range(_lp)])
if _n_paths>1:
for n,_p in enumerate(paths):
_lp=len(_p[0])
sub.plot([_p[0][i] for i in range(_lp)],[_p[1][i] for i in range(_lp)],'k:',alpha=0.3)
sub.set_xlim(-0.1,self.length*1.1)
sub.set_ylim(-0.1,self.left*1.1)
plt.show()
| [
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((245, 276), 'numpy.linspace', 'linspace', (['(0)', 'length', '_data_size'], {}), '(0, length, _data_size)\n', (253, 276), False, 'from numpy import linspace\n'), ((1218, 1244), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (1228, 1244), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import absolute_import
from datetime import datetime, timezone
from checkout_sdk.payments.payments import RefundRequest
from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable
from tests.payments.payments_test_utils import make_card_payment
def test_should_refund_card_payment(default_api):
payment_response = make_card_payment(default_api, capture_on=datetime.now(timezone.utc))
refund_request = RefundRequest()
refund_request.reference = new_uuid()
refund_request.amount = 2
refund_response = retriable(callback=default_api.payments.refund_payment,
payment_id=payment_response.id,
refund_request=refund_request)
assert_response(refund_response,
'reference',
'action_id',
'_links')
def test_should_refund_card_payment_idempotently(default_api):
payment_response = make_card_payment(default_api, capture_on=datetime.now(timezone.utc))
refund_request = RefundRequest()
refund_request.reference = new_uuid()
refund_request.amount = 1
idempotency_key = new_idempotency_key()
refund_response_1 = retriable(callback=default_api.payments.refund_payment,
payment_id=payment_response.id,
refund_request=refund_request,
idempotency_key=idempotency_key)
assert_response(refund_response_1)
refund_response_2 = retriable(callback=default_api.payments.refund_payment,
payment_id=payment_response.id,
refund_request=refund_request,
idempotency_key=idempotency_key)
assert_response(refund_response_2)
assert refund_response_1.action_id == refund_response_2.action_id
| [
"checkout_sdk.payments.payments.RefundRequest",
"tests.checkout_test_utils.new_idempotency_key",
"tests.checkout_test_utils.retriable",
"datetime.datetime.now",
"tests.checkout_test_utils.new_uuid",
"tests.checkout_test_utils.assert_response"
] | [((466, 481), 'checkout_sdk.payments.payments.RefundRequest', 'RefundRequest', ([], {}), '()\n', (479, 481), False, 'from checkout_sdk.payments.payments import RefundRequest\n'), ((513, 523), 'tests.checkout_test_utils.new_uuid', 'new_uuid', ([], {}), '()\n', (521, 523), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((577, 700), 'tests.checkout_test_utils.retriable', 'retriable', ([], {'callback': 'default_api.payments.refund_payment', 'payment_id': 'payment_response.id', 'refund_request': 'refund_request'}), '(callback=default_api.payments.refund_payment, payment_id=\n payment_response.id, refund_request=refund_request)\n', (586, 700), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((765, 833), 'tests.checkout_test_utils.assert_response', 'assert_response', (['refund_response', '"""reference"""', '"""action_id"""', '"""_links"""'], {}), "(refund_response, 'reference', 'action_id', '_links')\n", (780, 833), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((1074, 1089), 'checkout_sdk.payments.payments.RefundRequest', 'RefundRequest', ([], {}), '()\n', (1087, 1089), False, 'from checkout_sdk.payments.payments import RefundRequest\n'), ((1121, 1131), 'tests.checkout_test_utils.new_uuid', 'new_uuid', ([], {}), '()\n', (1129, 1131), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((1185, 1206), 'tests.checkout_test_utils.new_idempotency_key', 'new_idempotency_key', ([], {}), '()\n', (1204, 1206), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((1232, 1393), 'tests.checkout_test_utils.retriable', 'retriable', ([], {'callback': 'default_api.payments.refund_payment', 'payment_id': 'payment_response.id', 'refund_request': 'refund_request', 'idempotency_key': 'idempotency_key'}), '(callback=default_api.payments.refund_payment, payment_id=\n payment_response.id, refund_request=refund_request, idempotency_key=\n idempotency_key)\n', (1241, 1393), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((1490, 1524), 'tests.checkout_test_utils.assert_response', 'assert_response', (['refund_response_1'], {}), '(refund_response_1)\n', (1505, 1524), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((1550, 1711), 'tests.checkout_test_utils.retriable', 'retriable', ([], {'callback': 'default_api.payments.refund_payment', 'payment_id': 'payment_response.id', 'refund_request': 'refund_request', 'idempotency_key': 'idempotency_key'}), '(callback=default_api.payments.refund_payment, payment_id=\n payment_response.id, refund_request=refund_request, idempotency_key=\n idempotency_key)\n', (1559, 1711), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((1808, 1842), 'tests.checkout_test_utils.assert_response', 'assert_response', (['refund_response_2'], {}), '(refund_response_2)\n', (1823, 1842), False, 'from tests.checkout_test_utils import new_uuid, assert_response, new_idempotency_key, retriable\n'), ((416, 442), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (428, 442), False, 'from datetime import datetime, timezone\n'), ((1024, 1050), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1036, 1050), False, 'from datetime import datetime, timezone\n')] |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
command = f"python \
{os.path.join(BASE_DIR, 'tkpy3_tools/i18n_tools', 'pygettext.py')} -o locale/tkpy3.pot __init__.py"
os.chdir(BASE_DIR)
# python E:\TkPy3\TkPy3\tkpy3_tools\i18n_tools\msgfmt.py -o tkpy3.mo tkpy3.po
os.system(command)
| [
"os.chdir",
"os.system",
"os.path.abspath",
"os.path.join"
] | [((203, 221), 'os.chdir', 'os.chdir', (['BASE_DIR'], {}), '(BASE_DIR)\n', (211, 221), False, 'import os\n'), ((300, 318), 'os.system', 'os.system', (['command'], {}), '(command)\n', (309, 318), False, 'import os\n'), ((54, 79), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (69, 79), False, 'import os\n'), ((115, 179), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""tkpy3_tools/i18n_tools"""', '"""pygettext.py"""'], {}), "(BASE_DIR, 'tkpy3_tools/i18n_tools', 'pygettext.py')\n", (127, 179), False, 'import os\n')] |
import numpy as np
def weighted_percentile_multiple(data, wt, percentiles):
assert np.greater_equal(percentiles, 0.0).all(), "Percentiles less than zero"
assert np.less_equal(percentiles, 1.0).all(), "Percentiles greater than one"
data = np.asarray(data)
assert len(data.shape) == 1
if wt is None:
wt = np.ones(data.shape, np.float)
else:
wt = np.asarray(wt, np.float)
assert wt.shape == data.shape
assert np.greater_equal(wt, 0.0).all(), "Not all weights are non-negative."
assert len(wt.shape) == 1
n = data.shape[0]
assert n > 0
i = np.argsort(data)
sd = np.take(data, i, axis=0)
sw = np.take(wt, i, axis=0)
aw = np.add.accumulate(sw)
if not aw[-1] > 0:
raise ValueError ('Nonpositive weight sum')
w = (aw-0.5*sw)/aw[-1]
spots = np.searchsorted(w, percentiles)
o = []
for (s, p) in zip(spots, percentiles):
if s == 0:
o.append(sd[0])
elif s == n:
o.append(sd[n-1])
else:
f1 = (w[s] - p)/(w[s] - w[s-1])
f2 = (p - w[s-1])/(w[s] - w[s-1])
assert f1>=0 and f2>=0 and f1<=1 and f2<=1
assert abs(f1+f2-1.0) < 1e-6
o.append(sd[s-1]*f1 + sd[s]*f2)
return o
def weighted_percentile_single(data, wt, percentile):
assert np.greater_equal(percentile, 0.0).all(), "Percentiles less than zero"
assert np.less_equal(percentile, 1.0).all(), "Percentiles greater than one"
data = np.asarray(data)
assert len(data.shape) == 1
if wt is None:
wt = np.ones(data.shape, np.float)
else:
wt = np.asarray(wt, np.float)
assert wt.shape == data.shape
assert np.greater_equal(wt, 0.0).all(), "Not all weights are non-negative."
assert len(wt.shape) == 1
n = data.shape[0]
assert n > 0
i = np.argsort(data)
sd = np.take(data, i, axis=0)
sw = np.take(wt, i, axis=0)
aw = np.add.accumulate(sw)
if not aw[-1] > 0:
raise ValueError ('Nonpositive weight sum')
w = (aw-0.5*sw)/aw[-1]
spot = np.searchsorted(w, percentile)
# o = []
if spot == 0:
return sd[0]
elif spot == n:
return sd[n-1]
else:
f1 = (w[spot] - percentile)/(w[spot] - w[spot-1])
f2 = (percentile - w[spot-1])/(w[spot] - w[spot-1])
assert f1>=0 and f2>=0 and f1<=1 and f2<=1
assert abs(f1+f2-1.0) < 1e-6
return (sd[spot-1]*f1 + sd[spot]*f2)
| [
"numpy.ones",
"numpy.searchsorted",
"numpy.less_equal",
"numpy.asarray",
"numpy.argsort",
"numpy.take",
"numpy.add.accumulate",
"numpy.greater_equal"
] | [((252, 268), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (262, 268), True, 'import numpy as np\n'), ((610, 626), 'numpy.argsort', 'np.argsort', (['data'], {}), '(data)\n', (620, 626), True, 'import numpy as np\n'), ((636, 660), 'numpy.take', 'np.take', (['data', 'i'], {'axis': '(0)'}), '(data, i, axis=0)\n', (643, 660), True, 'import numpy as np\n'), ((670, 692), 'numpy.take', 'np.take', (['wt', 'i'], {'axis': '(0)'}), '(wt, i, axis=0)\n', (677, 692), True, 'import numpy as np\n'), ((702, 723), 'numpy.add.accumulate', 'np.add.accumulate', (['sw'], {}), '(sw)\n', (719, 723), True, 'import numpy as np\n'), ((838, 869), 'numpy.searchsorted', 'np.searchsorted', (['w', 'percentiles'], {}), '(w, percentiles)\n', (853, 869), True, 'import numpy as np\n'), ((1507, 1523), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1517, 1523), True, 'import numpy as np\n'), ((1865, 1881), 'numpy.argsort', 'np.argsort', (['data'], {}), '(data)\n', (1875, 1881), True, 'import numpy as np\n'), ((1891, 1915), 'numpy.take', 'np.take', (['data', 'i'], {'axis': '(0)'}), '(data, i, axis=0)\n', (1898, 1915), True, 'import numpy as np\n'), ((1925, 1947), 'numpy.take', 'np.take', (['wt', 'i'], {'axis': '(0)'}), '(wt, i, axis=0)\n', (1932, 1947), True, 'import numpy as np\n'), ((1957, 1978), 'numpy.add.accumulate', 'np.add.accumulate', (['sw'], {}), '(sw)\n', (1974, 1978), True, 'import numpy as np\n'), ((2092, 2122), 'numpy.searchsorted', 'np.searchsorted', (['w', 'percentile'], {}), '(w, percentile)\n', (2107, 2122), True, 'import numpy as np\n'), ((333, 362), 'numpy.ones', 'np.ones', (['data.shape', 'np.float'], {}), '(data.shape, np.float)\n', (340, 362), True, 'import numpy as np\n'), ((386, 410), 'numpy.asarray', 'np.asarray', (['wt', 'np.float'], {}), '(wt, np.float)\n', (396, 410), True, 'import numpy as np\n'), ((1588, 1617), 'numpy.ones', 'np.ones', (['data.shape', 'np.float'], {}), '(data.shape, np.float)\n', (1595, 1617), True, 'import numpy as np\n'), ((1641, 1665), 'numpy.asarray', 'np.asarray', (['wt', 'np.float'], {}), '(wt, np.float)\n', (1651, 1665), True, 'import numpy as np\n'), ((89, 123), 'numpy.greater_equal', 'np.greater_equal', (['percentiles', '(0.0)'], {}), '(percentiles, 0.0)\n', (105, 123), True, 'import numpy as np\n'), ((171, 202), 'numpy.less_equal', 'np.less_equal', (['percentiles', '(1.0)'], {}), '(percentiles, 1.0)\n', (184, 202), True, 'import numpy as np\n'), ((1346, 1379), 'numpy.greater_equal', 'np.greater_equal', (['percentile', '(0.0)'], {}), '(percentile, 0.0)\n', (1362, 1379), True, 'import numpy as np\n'), ((1427, 1457), 'numpy.less_equal', 'np.less_equal', (['percentile', '(1.0)'], {}), '(percentile, 1.0)\n', (1440, 1457), True, 'import numpy as np\n'), ((464, 489), 'numpy.greater_equal', 'np.greater_equal', (['wt', '(0.0)'], {}), '(wt, 0.0)\n', (480, 489), True, 'import numpy as np\n'), ((1719, 1744), 'numpy.greater_equal', 'np.greater_equal', (['wt', '(0.0)'], {}), '(wt, 0.0)\n', (1735, 1744), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import traj
import numpy as np
import math
def set_velocities_at_stop_points_to_zero(path):
'''
this function finds the indices of the stop points along the path; the points at which the velocity direction changes
then it resets the velocities at these points to zeros
'''
seg_vel_dir = [ math.copysign(1, (p1-p0) ) for p0, p1 in zip(path[:-1], path[1:]) ]
wpts_vel_dir = [seg_vel_dir[0]] + [ (p0+p1)/2 for p0, p1 in zip(seg_vel_dir[:-1], seg_vel_dir[1:]) ] + [seg_vel_dir[-1]]
arr = np.array(wpts_vel_dir)
stp_pts_idx = np.where(arr==0)[0]
return wpts_vel_dir, stp_pts_idx
def max_vel_at_each_waypoint_one_dof_path_case(path, v_init, abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk):
'''
this function finds the maximum velocity at each waypoint along a 1-dof path "path", starting with initial velocity "v_init"
'''
wpts_vel_dir, stp_idx= traj.set_velocities_at_stop_points_to_zero(path)
stp_idx = [id for id in stp_idx]
n_wpts = len(path)
max_vel_vec = [ abs(v_init) ]
for wpt in range(0, n_wpts-1 ) :
if wpt+1 in stp_idx:
v_nxt = 0.0
else:
tj, ta, tv, v_nxt = traj.max_reachable_vel_per_segment( abs(path[wpt] - path[wpt+1]), max_vel_vec[wpt], abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk)
max_vel_vec.append( v_nxt )
return max_vel_vec
def reachable_vel_at_each_waypoint_one_dof_path_case(path, v_start, v_end, abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk):
'''
this function finds the estimated reachable velocity at each waypoint along a 1-dof path "path", with starting velocity "v_init", a final velocity "v_end"
taking into considereation vel/acc/jrk constraints. this idea is the same idea behind the TOPP-RA paper: "A New Approach to Time-Optimal Path Parameterization
based on Reachability Analysis [<NAME> 2018]
'''
frwd_max_vel = max_vel_at_each_waypoint_one_dof_path_case(path, v_start, abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk)
bkwd_max_vel = max_vel_at_each_waypoint_one_dof_path_case(path[::-1], v_end, abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk) #L[::-1]
bkwd_max_vel.reverse()
# check condition when v_start or v_end is not feasible: v_start > max_v_start calculated using the backward loop or Vs
if frwd_max_vel[0] > bkwd_max_vel[0] or frwd_max_vel[-1] < bkwd_max_vel[-1]:
raise ValueError("combination of v_start({}) & v_end({}) is not feasible".format(frwd_max_vel[0], bkwd_max_vel[-1] ) )
# calcuate max_rechable_vels that grantee v_end at the end of the trajectory for this portion of traj
estimated_vel = [ min(v) for v in zip( frwd_max_vel, bkwd_max_vel)]
# retrieve the direction at each way point
wpts_vel_dir, stp_idx= traj.set_velocities_at_stop_points_to_zero(path)
estimated_vel=[v*dir for v, dir in zip(estimated_vel, wpts_vel_dir) ]
return frwd_max_vel, bkwd_max_vel, estimated_vel
def reachable_vel_at_each_waypoint_multi_dof_path_case(path, v_start, v_end, abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk):
'''
this function finds the estimated reachable velocity at each waypoint along a n-dof path "path", with starting velocity "v_init", a final velocity "v_end"
taking into considereation vel/acc/jrk constraints. this idea is the same idea behind the TOPP-RA paper: "A New Approach to Time-Optimal Path Parameterization
based on Reachability Analysis [<NAME> 2018]
paper link: https://www.researchgate.net/publication/318671280_A_New_Approach_to_Time-Optimal_Path_Parameterization_Based_on_Reachability_Analysis
'''
if len(path) != len(v_start) or len(path) != len(v_end):
raise ValueError("Dimensions are not equal: len(path)={}, len(v_start)={}, len(v_end)={}".format(len(path) , len(v_start) , len(v_end) ) )
reachable_vel = []
for pth in range(0, len(path) ):
frwd_max_vel, bkwd_max_vel, estimated_vel = reachable_vel_at_each_waypoint_one_dof_path_case(path[pth], v_start[pth], v_end[pth], abs_max_pos, abs_max_vel, abs_max_acc, abs_max_jrk)
reachable_vel.append( estimated_vel )
return reachable_vel
| [
"math.copysign",
"numpy.array",
"numpy.where",
"traj.set_velocities_at_stop_points_to_zero"
] | [((549, 571), 'numpy.array', 'np.array', (['wpts_vel_dir'], {}), '(wpts_vel_dir)\n', (557, 571), True, 'import numpy as np\n'), ((947, 995), 'traj.set_velocities_at_stop_points_to_zero', 'traj.set_velocities_at_stop_points_to_zero', (['path'], {}), '(path)\n', (989, 995), False, 'import traj\n'), ((2870, 2918), 'traj.set_velocities_at_stop_points_to_zero', 'traj.set_velocities_at_stop_points_to_zero', (['path'], {}), '(path)\n', (2912, 2918), False, 'import traj\n'), ((342, 367), 'math.copysign', 'math.copysign', (['(1)', '(p1 - p0)'], {}), '(1, p1 - p0)\n', (355, 367), False, 'import math\n'), ((590, 608), 'numpy.where', 'np.where', (['(arr == 0)'], {}), '(arr == 0)\n', (598, 608), True, 'import numpy as np\n')] |
import argparse
import logging
from .settings import Settings
from .state import State
from .stats import Details, SortIndex, StatGroup
from .util.media_type import MediaType
def gen_auto_complete(parser):
""" Support autocomplete via argcomplete if installed"""
try: # pragma: no cover
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
def add_parser_helper(sub_parser, name, func_str=None, **kwargs):
parser = sub_parser.add_parser(name, **kwargs)
parser.set_defaults(func_str=func_str or name)
return parser
def parse_args(args=None, media_reader=None, already_upgraded=False):
SPECIAL_PARAM_NAMES = {"auto", "clear_cookies", "log_level", "no_save", "type", "func", "readonly", "func_str", "tmp_dir"}
state = State(Settings()) if not media_reader else media_reader.state
parser = argparse.ArgumentParser()
parser.add_argument("--auto", action="store_const", const=True, default=False, help="Automatically select input instead of prompting")
parser.add_argument("--clear-cookies", default=False, action="store_const", const=True, help="Clear all cached cookies")
parser.add_argument("--log-level", default="INFO", choices=logging._levelToName.values(), help="Controls verbosity of logs")
parser.add_argument("--no-save", default=False, action="store_const", const=True, help="Do not save state/cookies")
parser.add_argument("--tmp-dir", default=False, action="store_const", const=True, help="Save state to tmp-dir")
sub_parsers = parser.add_subparsers(dest="type")
readonly_parsers = argparse.ArgumentParser(add_help=False)
readonly_parsers.set_defaults(readonly=True)
sub_search_parsers = argparse.ArgumentParser(add_help=False)
sub_search_parsers.add_argument("--no-sort-by-preferred-lang", action="store_const", const=None, default=state.settings.get_prefered_lang_key, help="Sort results by preferred Settings:preferred_primary_language", dest="sort_func")
sub_search_parsers.add_argument("--exact", action="store_const", const=True, default=False, help="Only show exact matches")
sub_search_parsers.add_argument("--limit", type=int, default=10, help="How many chapters will be downloaded per series")
sub_search_parsers.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
sub_search_parsers.add_argument("--server", choices=state.get_server_ids(), dest="server_id")
# add remove
search_parsers = add_parser_helper(sub_parsers, "search_for_media", aliases=["search"], parents=[sub_search_parsers], description="Search for and add media")
search_parsers.add_argument("name", help="The string to search by")
migrate_parsers = add_parser_helper(sub_parsers, "migrate", description="Move media to another server")
migrate_parsers.add_argument("--exact", action="store_const", const=True, default=False, help="Only show exact matches")
migrate_parsers.add_argument("--force-same-id", action="store_const", const=True, default=False, help="Forces the media id to be the same")
migrate_parsers.add_argument("--self", action="store_const", const=True, default=False, help="Re-adds the media", dest="move_self")
migrate_parsers.add_argument("name", choices=state.get_all_names(), help="Global id of media to move")
add_parsers = add_parser_helper(sub_parsers, "add-from-url", description="Add media by human viewable location")
add_parsers.add_argument("url", help="Either the series home page or the page for an arbitrary chapter (depends on server)")
remove_parsers = add_parser_helper(sub_parsers, "remove", func_str="remove-media", description="Remove media")
remove_parsers.add_argument("id", choices=state.get_all_single_names(), help="Global id of media to remove")
# update and download
update_parser = add_parser_helper(sub_parsers, "update", description="Update all media")
update_parser.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
update_parser.add_argument("--no-shuffle", default=False, action="store_const", const=True)
update_parser.add_argument("name", choices=state.get_all_names(), default=None, nargs="?", help="Update only specified media")
download_parser = add_parser_helper(sub_parsers, "download-unread-chapters", aliases=["download-unread"], help="Downloads all chapters that have not been read")
download_parser.add_argument("--limit", "-l", type=int, default=0, help="How many chapters will be downloaded per series")
download_parser.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
download_parser.add_argument("--stream-index", "-q", default=0, type=int)
download_parser.add_argument("name", choices=state.get_all_names(), default=None, nargs="?", help="Download only series determined by name")
download_specific_parser = add_parser_helper(sub_parsers, "download_specific_chapters", aliases=["download"], help="Used to download specific chapters")
download_specific_parser.add_argument("--stream-index", "-q", default=0, type=int)
download_specific_parser.add_argument("name", choices=state.get_all_single_names())
download_specific_parser.add_argument("start", type=float, default=0, help="Starting chapter (inclusive)")
download_specific_parser.add_argument("end", type=float, nargs="?", default=0, help="Ending chapter (inclusive)")
# media consumption
bundle_parser = add_parser_helper(sub_parsers, "bundle-unread-chapters", aliases=["bundle"], help="Bundle individual manga pages into a single file")
bundle_parser.add_argument("--ignore-errors", "-i", default=False, action="store_const", const=True)
bundle_parser.add_argument("--limit", "-l", default=0, type=int)
bundle_parser.add_argument("--shuffle", "-s", default=False, action="store_const", const=True)
bundle_parser.add_argument("name", choices=state.get_all_names(MediaType.MANGA), default=None, nargs="?")
read_parser = add_parser_helper(sub_parsers, "read_bundle", aliases=["read"], help="Open a saved bundle for reading. If the command exits with status 0, then the container chapters will be marked read")
read_parser.add_argument("name", default=None, nargs="?", choices=state.bundles.keys(), help="Name of the bundle")
sub_consume_parsers = argparse.ArgumentParser(add_help=False)
sub_consume_parsers.add_argument("--abs", default=False, action="store_const", const=True, dest="force_abs")
sub_consume_parsers.add_argument("--any-unread", "-a", default=False, action="store_const", const=True)
sub_consume_parsers.add_argument("--limit", "-l", default=0, type=int)
sub_consume_parsers.add_argument("--shuffle", "-s", default=False, action="store_const", const=True)
sub_consume_parsers.add_argument("--stream-index", "-q", default=0, type=int)
view_parser = add_parser_helper(sub_parsers, "view", func_str="play", parents=[sub_consume_parsers], help="View pages of chapters")
view_parser.add_argument("name", choices=state.get_all_names(MediaType.MANGA | MediaType.NOVEL), default=None, nargs="?")
view_parser.add_argument("num_list", default=None, nargs="*", type=float)
view_parser.set_defaults(media_type=MediaType.MANGA | MediaType.NOVEL)
play_parser = add_parser_helper(sub_parsers, "play", parents=[sub_consume_parsers], help="Either stream anime or directly play downloaded media")
play_parser.add_argument("name", choices=state.get_all_names(MediaType.ANIME), default=None, nargs="?")
play_parser.add_argument("num_list", default=None, nargs="*", type=float)
play_parser.set_defaults(media_type=MediaType.ANIME)
consume_parser = add_parser_helper(sub_parsers, "consume", func_str="play", parents=[sub_consume_parsers], help="Either view or play media depending on type")
consume_parser.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
consume_parser.add_argument("name", choices=state.get_all_names(MediaType.ANIME), default=None, nargs="?")
consume_parser.add_argument("num_list", default=None, nargs="*", type=float)
steam_parser = add_parser_helper(sub_parsers, "stream", help="Streams anime; this won't download any files; if the media is already downloaded, it will be used directly")
steam_parser.add_argument("--cont", "-c", default=False, action="store_const", const=True)
steam_parser.add_argument("--download", "-d", default=False, action="store_const", const=True)
steam_parser.add_argument("--offset", type=float, default=0, help="Offset the url by N chapters")
steam_parser.add_argument("--stream-index", "-q", default=0, type=int)
steam_parser.add_argument("url")
stream_url_parser = add_parser_helper(sub_parsers, "get-stream-url", help="Gets the steaming url for the media")
stream_url_parser.add_argument("--abs", default=False, action="store_const", const=True, dest="force_abs")
stream_url_parser.add_argument("--limit", "-l", default=0, type=int)
stream_url_parser.add_argument("name", choices=state.get_all_names(MediaType.ANIME), default=None, nargs="?")
stream_url_parser.add_argument("num_list", default=None, nargs="*", type=float)
# clean
clean_parser = add_parser_helper(sub_parsers, "clean", help="Removes unused media")
clean_parser.add_argument("--bundles", "-b", default=False, action="store_const", const=True, help="Removes bundle info")
clean_parser.add_argument("--include-local-servers", default=False, action="store_const", const=True, help="Doesn't skip local servers")
clean_parser.add_argument("--remove-disabled-servers", default=False, action="store_const", const=True, help="Removes all servers not belonging to the active list")
clean_parser.add_argument("--remove-not-on-disk", default=False, action="store_const", const=True, help="Removes references where the backing directory is emtpy")
clean_parser.add_argument("--remove-read", default=False, action="store_const", const=True, help="Removes all read chapters")
# external
auto_import_parser = add_parser_helper(sub_parsers, "auto-import", func_str="auto-import-media")
auto_import_parser.add_argument("--link", action="store_const", const=True, default=False, help="Hard links instead of just moving the file")
import_parser = add_parser_helper(sub_parsers, "import", func_str="import-media")
import_parser.add_argument("--link", action="store_const", const=True, default=False, help="Hard links instead of just moving the file")
import_parser.add_argument("--media-type", default="ANIME", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
import_parser.add_argument("--name", default=None, nargs="?", help="Name Media")
import_parser.add_argument("--skip-add", action="store_const", const=True, default=False, help="Don't auto add media")
import_parser.add_argument("files", nargs="+")
# info
list_parser = add_parser_helper(sub_parsers, "list", func_str="list-media", parents=[readonly_parsers])
list_parser.add_argument("--csv", action="store_const", const=True, default=False, help="List in a script friendly format")
list_parser.add_argument("--media-type", default=None, choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
list_parser.add_argument("--out-of-date-only", default=False, action="store_const", const=True)
list_parser.add_argument("--tag", const="", nargs="?")
list_parser.add_argument("name", nargs="?", default=None, choices=state.get_server_ids())
chapter_parsers = add_parser_helper(sub_parsers, "list-chapters", parents=[readonly_parsers])
chapter_parsers.add_argument("--show-ids", action="store_const", const=True, default=False)
chapter_parsers.add_argument("name", choices=state.get_all_names())
add_parser_helper(sub_parsers, "list-servers")
list_from_servers = add_parser_helper(sub_parsers, "list_some_media_from_server", aliases=["list-from-servers"])
list_from_servers.add_argument("--limit", "-l", type=int, default=None)
list_from_servers.add_argument("server_id", choices=state.get_server_ids())
tag_parser = add_parser_helper(sub_parsers, "tag")
tag_parser.add_argument("tag_name")
tag_parser.add_argument("name", choices=state.get_all_names(), default=None, nargs="?")
untag_parser = add_parser_helper(sub_parsers, "untag")
untag_parser.add_argument("tag_name")
untag_parser.add_argument("name", choices=state.get_all_names(), default=None, nargs="?")
# credentials
login_parser = add_parser_helper(sub_parsers, "test-login", aliases=["login"], description="Relogin to all servers")
login_parser.add_argument("--force", "-f", action="store_const", const=True, default=False, help="Force re-login")
login_parser.add_argument("server_ids", default=None, choices=state.get_server_ids_with_logins(), nargs="?")
# stats
stats_parser = add_parser_helper(sub_parsers, "stats", func_str="list_stats", description="Show tracker stats", parents=[readonly_parsers])
stats_parser.add_argument("--details-type", "-d", choices=list(Details), type=Details.__getattr__, default=Details.NO_DETAILS, help="How details are displayed")
stats_parser.add_argument("--details-limit", "-l", type=int, default=None, help="How many details are shown")
stats_parser.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
stats_parser.add_argument("--min-count", "-m", type=int, default=0, help="Ignore groups with fewer than N elements")
stats_parser.add_argument("--min-score", type=float, default=1, help="Ignore entries with score less than N")
stats_parser.add_argument("--sort-index", "-s", choices=list(SortIndex), type=SortIndex.__getattr__, default=SortIndex.SCORE.name, help="Choose sort index")
stats_parser.add_argument("--stat-group", "-g", choices=list(StatGroup), type=StatGroup.__getattr__, default=StatGroup.NAME, help="Choose stat grouping")
stats_parser.add_argument("username", default=None, nargs="?", help="Username or id to load info of; defaults to the currently authenticated user")
stats_update_parser = add_parser_helper(sub_parsers, "stats-update", description="Update tracker stats")
stats_update_parser.add_argument("--user-id", default=None, help="id to load tracking info of")
stats_update_parser.add_argument("username", default=None, nargs="?", help="Username to load info of; defaults to the currently authenticated user")
# trackers and progress
load_parser = add_parser_helper(sub_parsers, "load_from_tracker", aliases=["load"], parents=[sub_search_parsers], description="Attempts to add all tracked media")
load_parser.add_argument("--force", "-f", action="store_const", const=True, default=False, help="Force set of read chapters to be in sync with progress")
load_parser.add_argument("--local-only", action="store_const", const=True, default=False, help="Only attempt to find a match among local media")
load_parser.add_argument("--no-add", action="store_const", const=True, default=False, help="Don't search for and add new media")
load_parser.add_argument("--remove", action="store_const", const=True, default=False, help="Remove media that was tracked but no longer active on tracker")
load_parser.add_argument("--user-id", default=None, nargs="?", help="id to load tracking info of")
load_parser.add_argument("user_name", default=None, nargs="?", help="Username to load tracking info of; defaults to the currently authenticated user")
untrack_paraser = add_parser_helper(sub_parsers, "remove_tracker", aliases=["untrack"], description="Removing tracker info")
untrack_paraser.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
untrack_paraser.add_argument("name", choices=state.get_all_single_names(), nargs="?", help="Media to untrack")
copy_tracker_parser = add_parser_helper(sub_parsers, "copy-tracker", description="Copies tracking info from src to dest")
copy_tracker_parser.add_argument("src", choices=state.get_all_single_names(), help="Src media")
copy_tracker_parser.add_argument("dst", choices=state.get_all_single_names(), help="Dst media")
sync_parser = add_parser_helper(sub_parsers, "sync_progress", aliases=["sync"], description="Attempts to update tracker with current progress")
sync_parser.add_argument("--dry-run", action="store_const", const=True, default=False, help="Don't actually update trackers")
sync_parser.add_argument("--force", "-f", action="store_const", const=True, default=False, help="Allow progress to decrease")
sync_parser.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
sync_parser.add_argument("name", choices=state.get_all_names(), nargs="?", help="Media to sync")
mark_unread_parsers = add_parser_helper(sub_parsers, "mark-unread", description="Mark all known chapters as unread")
mark_unread_parsers.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
mark_unread_parsers.add_argument("name", default=None, choices=state.get_all_names(), nargs="?")
mark_unread_parsers.set_defaults(func_str="mark_read", force=True, N=-1, abs=True)
mark_parsers = add_parser_helper(sub_parsers, "mark-read", description="Mark all known chapters as read")
mark_parsers.add_argument("--abs", action="store_const", const=True, default=False, help="Treat N as an abs number")
mark_parsers.add_argument("--force", "-f", action="store_const", const=True, default=False, help="Allow chapters to be marked as unread")
mark_parsers.add_argument("--media-type", choices=list(MediaType), type=MediaType.__getattr__, help="Filter for a specific type")
mark_parsers.add_argument("name", default=None, choices=state.get_all_names(), nargs="?")
mark_parsers.add_argument("N", type=int, default=0, nargs="?", help="Consider the last N chapters as not up-to-date")
offset_parser = add_parser_helper(sub_parsers, "offset")
offset_parser.add_argument("name", default=None, choices=state.get_all_names())
offset_parser.add_argument("offset", type=int, default=None, nargs="?", help="Decrease the chapter number reported by the server by N")
# upgrade state
add_parser_helper(sub_parsers, "upgrade-state", aliases=["upgrade"], description="Upgrade old state to newer format")
# store password state
set_password_parser = add_parser_helper(sub_parsers, "set-password", description="Set password")
set_password_parser.add_argument("server_id", choices=state.get_server_ids_with_logins())
set_password_parser.add_argument("username")
set_password_parser.set_defaults(func=state.settings.store_credentials)
auth_parser = add_parser_helper(sub_parsers, "auth")
auth_parser.add_argument("--just-print", action="store_const", const=True, default=False, help="Just print the auth url")
auth_parser.add_argument("tracker_id", choices=state.get_server_ids_with_logins(), nargs="?")
gen_auto_complete(parser)
namespace = parser.parse_args(args)
logging.getLogger().setLevel(namespace.log_level)
if namespace.tmp_dir:
state.settings.set_tmp_dir()
namespace.no_save = True
action = namespace.type
kwargs = {k: v for k, v in vars(namespace).items() if k not in SPECIAL_PARAM_NAMES}
obj = state
if not "readonly" in namespace:
# Import only when needed because the act of importing is slow
from .media_reader_cli import MediaReaderCLI
media_reader = media_reader if media_reader else MediaReaderCLI(state)
if state.is_out_of_date_minor():
media_reader.upgrade_state()
if namespace.clear_cookies:
media_reader.session.cookies.clear()
obj = media_reader
media_reader.auto_select = namespace.auto
else:
namespace.no_save = True
try:
if action:
func = namespace.func if "func" in namespace else getattr(obj, (namespace.func_str if "func_str" in namespace else action).replace("-", "_"))
func(**kwargs)
finally:
if not namespace.no_save and ("dry_run" not in namespace or not namespace.dry_run):
state.save()
| [
"logging.getLogger",
"argcomplete.autocomplete",
"logging._levelToName.values",
"argparse.ArgumentParser"
] | [((880, 905), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (903, 905), False, 'import argparse\n'), ((1613, 1652), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (1636, 1652), False, 'import argparse\n'), ((1728, 1767), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (1751, 1767), False, 'import argparse\n'), ((6457, 6496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (6480, 6496), False, 'import argparse\n'), ((334, 366), 'argcomplete.autocomplete', 'argcomplete.autocomplete', (['parser'], {}), '(parser)\n', (358, 366), False, 'import argcomplete\n'), ((1233, 1262), 'logging._levelToName.values', 'logging._levelToName.values', ([], {}), '()\n', (1260, 1262), False, 'import logging\n'), ((19455, 19474), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (19472, 19474), False, 'import logging\n')] |
import pytest
from fan_tools.django.fields import ChoicesEnum
from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round
@pytest.fixture(scope='class')
def local_cache():
pass
@pytest.fixture
def session_settings():
pass
@pytest.fixture
def foobar_enum():
class FooBarEnum(ChoicesEnum):
foo = 1
bar = 2
return FooBarEnum
def test_choices_enum(foobar_enum):
expected = [(1, 'foo'), (2, 'bar')]
choices = foobar_enum.get_choices()
assert choices == expected, choices
def test_usd_round():
assert str(usd_round('12.9800000003')) == '12.98'
def test_04_dict_contains():
a = {'key': 'aa', 'msg': {'bb': 1, 'cc': True}}
b = {'key': 'aa', 'msg': {'bb': 2, 'cc': True}}
assert dict_contains(a, {'key': 'aa'})
assert dict_contains(b, {'key': 'aa'})
assert dict_contains(a, {'msg': {'bb': 1}})
assert dict_contains(b, {'msg': {'bb': 2}})
assert not dict_contains(a, {'msg': {'bb': 2}})
assert not dict_contains(b, {'msg': {'bb': 1}})
assert dict_contains(a, {'msg': {'bb': 1, 'cc': True}})
assert dict_contains(b, {'msg': {'bb': 2, 'cc': True}})
assert not dict_contains(a, {'msg': {'bb': 1, 'cc': False}})
assert not dict_contains(b, {'msg': {'bb': 2, 'cc': False}})
assert not dict_contains(a, {'msg': {'bb': 1, 'nested': []}})
def test_05_expand_dot():
assert expand_dot({'msg.payload.status': 'waiting_release'}) == {
'msg': {'payload': {'status': 'waiting_release'}}
}
def test_06_slide():
assert list(slide(range(3))) == [(0, 1), (1, 2), (2, None)]
@pytest.mark.asyncio
async def test_07_async_reduce():
async def sum_two(a, b):
return a + b
assert await areduce(sum_two, [1, 2, 3, 4, 5]) == 15
assert await areduce(sum_two, [1, 2, 3, 4, 5], initial=100) == 115
| [
"fan_tools.python.usd_round",
"fan_tools.python.expand_dot",
"fan_tools.python.dict_contains",
"pytest.fixture",
"fan_tools.python.areduce"
] | [((148, 177), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (162, 177), False, 'import pytest\n'), ((767, 798), 'fan_tools.python.dict_contains', 'dict_contains', (['a', "{'key': 'aa'}"], {}), "(a, {'key': 'aa'})\n", (780, 798), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((810, 841), 'fan_tools.python.dict_contains', 'dict_contains', (['b', "{'key': 'aa'}"], {}), "(b, {'key': 'aa'})\n", (823, 841), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((853, 889), 'fan_tools.python.dict_contains', 'dict_contains', (['a', "{'msg': {'bb': 1}}"], {}), "(a, {'msg': {'bb': 1}})\n", (866, 889), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((901, 937), 'fan_tools.python.dict_contains', 'dict_contains', (['b', "{'msg': {'bb': 2}}"], {}), "(b, {'msg': {'bb': 2}})\n", (914, 937), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1055, 1103), 'fan_tools.python.dict_contains', 'dict_contains', (['a', "{'msg': {'bb': 1, 'cc': True}}"], {}), "(a, {'msg': {'bb': 1, 'cc': True}})\n", (1068, 1103), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1115, 1163), 'fan_tools.python.dict_contains', 'dict_contains', (['b', "{'msg': {'bb': 2, 'cc': True}}"], {}), "(b, {'msg': {'bb': 2, 'cc': True}})\n", (1128, 1163), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((954, 990), 'fan_tools.python.dict_contains', 'dict_contains', (['a', "{'msg': {'bb': 2}}"], {}), "(a, {'msg': {'bb': 2}})\n", (967, 990), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1006, 1042), 'fan_tools.python.dict_contains', 'dict_contains', (['b', "{'msg': {'bb': 1}}"], {}), "(b, {'msg': {'bb': 1}})\n", (1019, 1042), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1180, 1229), 'fan_tools.python.dict_contains', 'dict_contains', (['a', "{'msg': {'bb': 1, 'cc': False}}"], {}), "(a, {'msg': {'bb': 1, 'cc': False}})\n", (1193, 1229), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1245, 1294), 'fan_tools.python.dict_contains', 'dict_contains', (['b', "{'msg': {'bb': 2, 'cc': False}}"], {}), "(b, {'msg': {'bb': 2, 'cc': False}})\n", (1258, 1294), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1310, 1360), 'fan_tools.python.dict_contains', 'dict_contains', (['a', "{'msg': {'bb': 1, 'nested': []}}"], {}), "(a, {'msg': {'bb': 1, 'nested': []}})\n", (1323, 1360), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1400, 1453), 'fan_tools.python.expand_dot', 'expand_dot', (["{'msg.payload.status': 'waiting_release'}"], {}), "({'msg.payload.status': 'waiting_release'})\n", (1410, 1453), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((581, 607), 'fan_tools.python.usd_round', 'usd_round', (['"""12.9800000003"""'], {}), "('12.9800000003')\n", (590, 607), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1735, 1768), 'fan_tools.python.areduce', 'areduce', (['sum_two', '[1, 2, 3, 4, 5]'], {}), '(sum_two, [1, 2, 3, 4, 5])\n', (1742, 1768), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n'), ((1792, 1838), 'fan_tools.python.areduce', 'areduce', (['sum_two', '[1, 2, 3, 4, 5]'], {'initial': '(100)'}), '(sum_two, [1, 2, 3, 4, 5], initial=100)\n', (1799, 1838), False, 'from fan_tools.python import areduce, dict_contains, expand_dot, slide, usd_round\n')] |
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from sqlshare_rest.dao.dataset import reset_dataset_account_access
from django.utils import timezone
from django.conf import settings
from django import db
from time import sleep
from sqlshare_rest.util.queue_triggers import trigger_query_queue_processing
from sqlshare_rest.util.queue_triggers import QUERY_QUEUE_PORT_NUMBER
from sqlshare_rest.logger import getLogger
from django.db.utils import DatabaseError
from decimal import Decimal
import datetime
import atexit
import signal
import json
import time
import sys
import os
import socket
from threading import Thread
import six
TERMINATE_TRIGGER_FILE = getattr(settings,
"SQLSHARE_TERMINATE_QUERY_QUEUE_PATH",
"/tmp/sqlshare_terminate_query_queue")
def process_queue(thread_count=0, run_once=True, verbose=False):
# Make sure only one instance is running at a time:
if trigger_query_queue_processing():
return
def start_query(query, background=True):
query.is_started = True
query.save()
query_id = query.pk
if background:
from django.db import connection
connection.close()
pid1 = os.fork()
if pid1:
os.waitpid(pid1, 0)
# This is the main process
return
os.setsid()
if os.fork():
# Double fork the daemon
sys.exit(0)
try:
process_query(query_id)
except Exception as ex:
try:
query = Query.objects.get(pk=query_id)
query.has_error = True
query.error = str(ex)
query.is_finished = True
query.save()
except:
# That try is just trying to get info out to the user, it's
# relatively ok if that fails
pass
logger = getLogger(__name__)
logger.error("Error on %s: %s" % (query_id, str(ex)))
if background:
sys.exit(0)
def get_column_names_from_cursor(cursor):
index = 0
names = []
for col in cursor.description:
index += 1
column_name = col[0]
if column_name == "":
column_name = "COLUMN%s" % index
names.append(column_name)
return names
def custom_encode(obj):
if isinstance(obj, datetime.datetime):
return str(obj)
def process_query(query_id):
logger = getLogger(__name__)
query = Query.objects.get(pk=query_id)
# queries can be cancelled before we see them. clean it up now.
if query.terminated:
query.is_finished = True
query.has_error = True
query.error = "Query cancelled"
query.save()
return
pid = os.getpid()
query.process_queue_id = pid
query.save()
msg = "Processing query id %s, in process %s" % (
query.pk,
pid
)
logger.info(msg)
if verbose:
print(msg)
user = query.owner
row_count = 0
backend = get_backend()
try:
start = timezone.now()
query_plan = backend.get_query_plan(query.sql, user)
t1 = time.time()
sql = query.sql
if query.is_ui_preview:
sql = backend.get_preview_sql_for_query(sql)
cursor = backend.run_query(sql,
user,
return_cursor=True,
query=query)
t2 = time.time()
try:
all_data = []
decimal_test = type(Decimal())
def map_decimal(d):
if type(d) == decimal_test:
return str(d)
return d
for row in cursor:
# Need to manually map Decimal values to strings,
# otherwise json turns them into None
all_data.append(map(map_decimal, list(row)))
row_count += 1
columns = get_column_names_from_cursor(cursor)
formatted = json.dumps({"columns": columns, "data": all_data},
default=custom_encode)
query.preview_content = formatted
t3 = time.time()
query.query_time = t2-t1
query.total_time = t3-t1
query.query_plan = query_plan
query.save()
end = timezone.now()
except:
raise
except Exception as ex:
msg = "Error running query %s: %s" % (query.pk,
str(ex))
logger.error(msg)
query.has_error = True
query.error = str(ex)
finally:
backend.close_user_connection(user)
try:
query.is_finished = True
query.date_finished = timezone.now()
query.rows_total = row_count
query.save()
if query.is_preview_for:
dataset = query.is_preview_for
dataset.preview_is_finished = True
dataset.preview_error = query.error
# Make sure all current users can see the preview table
reset_dataset_account_access(dataset)
dataset.save()
except Exception as ex:
logger.error("Error: %s" % str(ex))
msg = "Finished query id %s." % query.pk
logger.info(msg)
def periodic_check():
"""
Every 5 seconds, do a check for new queries. Just in case something
needs processing, but didn't call trigger_processing() itself.
"""
logger = getLogger(__name__)
while True:
sleep(5)
msg = "Triggering periodic processing."
logger.debug(msg)
if verbose:
print(msg)
trigger_query_queue_processing()
filtered = Query.objects.filter(is_started=False)
if run_once:
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
start_query(oldest_query, background=False)
else:
# Start with any queries already in the queue:
for query in filtered:
start_query(query)
# Just in case things get off the rails - maybe a connection to the
# server gets blocked? - periodically trigger a check for new queries
kicker = Thread(target=periodic_check)
kicker.setDaemon(True)
kicker.start()
# Start the socket server for getting notifications of new queries
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Make it so we can run the server right away after killing it
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('localhost', QUERY_QUEUE_PORT_NUMBER))
# Make sure we close our socket when we're killed.
def close_socket():
server.close()
atexit.register(close_socket)
def kill_query(query):
logger = getLogger(__name__)
pid = query.process_queue_id
query.is_started = True
query.is_finished = True
query.has_error = True
query.error = "Query cancelled"
query.save()
try:
backend = get_backend()
backend.kill_query(query)
except:
# This is optional
pass
logger.info("Cancelling query: %s" % query.pk)
os.kill(pid, signal.SIGKILL)
server.listen(5)
while True:
(clientsocket, address) = server.accept()
# Check to see if we should exit...
if os.path.isfile(TERMINATE_TRIGGER_FILE):
sys.exit(0)
# We don't actually have a protocol to speak...
clientsocket.close()
try:
terminate_list = Query.objects.filter(terminated=True,
is_finished=False)
for query in terminate_list:
kill_query(query)
queries = Query.objects.filter(is_started=False)
for query in queries:
start_query(query)
except Exception as ex:
# This was originally DatabaseError - but then there were also
# pyodbc.Error exceptions... and pyodbc isn't a hard
# requirement.
ex_str = str(ex)
# If there's just, say, a network glitch, carry on.
# Or, say, a server restart
# If it's anything else, re-raise the error.
is_reset_error = False
if ex_str.find("Read from the server failed") >= 0:
is_reset_error = True
if ex_str.find("Write to the server failed") >= 0:
is_reset_error = True
if ex_str.find("Communication link failure") >= 0:
is_reset_error = True
adaptive = "Adaptive Server is unavailable or does not exist"
if ex_str.find(adaptive) >= 0:
is_reset_error = True
if is_reset_error:
try:
db.close_old_connections()
except Exception as ex:
ex_str = str(ex)
is_expected = False
rollback_err = "Could not perform COMMIT or ROLLBACK"
if ex_str.find(rollback_err) >= 0:
# db.close_connection tries to end transactions
# pyodbc was absolutely unable to recover from that
# because it wasn't reconnecting to be able to do
# the rollback...
from django.db import connections
for conn in connections:
connections[conn].close()
else:
logger = getLogger(__name__)
logger.error("Error in query queue: %s" % ex_str)
raise
else:
logger = getLogger(__name__)
logger.error("Error in query queue: %s" % ex_str)
raise
def kill_query_queue():
# Create the file that triggers the termination
f = open(TERMINATE_TRIGGER_FILE, "w")
f.write("OK")
f.close()
# Trigger the check...
trigger_query_queue_processing()
# Just a quick pause before polling
time.sleep(0.3)
# Poll to see if the process is still running...
for i in range(10):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('localhost', QUERY_QUEUE_PORT_NUMBER))
time.sleep(1)
except socket.error as ex:
os.remove(TERMINATE_TRIGGER_FILE)
return True
os.remove(TERMINATE_TRIGGER_FILE)
return False
| [
"sqlshare_rest.util.db.get_backend",
"time.sleep",
"sqlshare_rest.logger.getLogger",
"sys.exit",
"os.fork",
"os.remove",
"sqlshare_rest.models.Query.objects.get",
"os.kill",
"json.dumps",
"django.db.connection.close",
"django.utils.timezone.now",
"os.getpid",
"atexit.register",
"sqlshare_rest.models.Query.objects.filter",
"sqlshare_rest.util.queue_triggers.trigger_query_queue_processing",
"os.waitpid",
"django.db.close_old_connections",
"os.path.isfile",
"sqlshare_rest.dao.dataset.reset_dataset_account_access",
"time.time",
"os.setsid",
"socket.socket",
"threading.Thread",
"decimal.Decimal"
] | [((986, 1018), 'sqlshare_rest.util.queue_triggers.trigger_query_queue_processing', 'trigger_query_queue_processing', ([], {}), '()\n', (1016, 1018), False, 'from sqlshare_rest.util.queue_triggers import trigger_query_queue_processing\n'), ((6292, 6330), 'sqlshare_rest.models.Query.objects.filter', 'Query.objects.filter', ([], {'is_started': '(False)'}), '(is_started=False)\n', (6312, 6330), False, 'from sqlshare_rest.models import Query\n'), ((11028, 11060), 'sqlshare_rest.util.queue_triggers.trigger_query_queue_processing', 'trigger_query_queue_processing', ([], {}), '()\n', (11058, 11060), False, 'from sqlshare_rest.util.queue_triggers import trigger_query_queue_processing\n'), ((11106, 11121), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (11116, 11121), False, 'import time\n'), ((11487, 11520), 'os.remove', 'os.remove', (['TERMINATE_TRIGGER_FILE'], {}), '(TERMINATE_TRIGGER_FILE)\n', (11496, 11520), False, 'import os\n'), ((2633, 2652), 'sqlshare_rest.logger.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (2642, 2652), False, 'from sqlshare_rest.logger import getLogger\n'), ((2669, 2699), 'sqlshare_rest.models.Query.objects.get', 'Query.objects.get', ([], {'pk': 'query_id'}), '(pk=query_id)\n', (2686, 2699), False, 'from sqlshare_rest.models import Query\n'), ((2978, 2989), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2987, 2989), False, 'import os\n'), ((3290, 3303), 'sqlshare_rest.util.db.get_backend', 'get_backend', ([], {}), '()\n', (3301, 3303), False, 'from sqlshare_rest.util.db import get_backend\n'), ((6037, 6056), 'sqlshare_rest.logger.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (6046, 6056), False, 'from sqlshare_rest.logger import getLogger\n'), ((6829, 6858), 'threading.Thread', 'Thread', ([], {'target': 'periodic_check'}), '(target=periodic_check)\n', (6835, 6858), False, 'from threading import Thread\n'), ((7006, 7055), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (7019, 7055), False, 'import socket\n'), ((7380, 7409), 'atexit.register', 'atexit.register', (['close_socket'], {}), '(close_socket)\n', (7395, 7409), False, 'import atexit\n'), ((1243, 1261), 'django.db.connection.close', 'connection.close', ([], {}), '()\n', (1259, 1261), False, 'from django.db import connection\n'), ((1282, 1291), 'os.fork', 'os.fork', ([], {}), '()\n', (1289, 1291), False, 'import os\n'), ((1428, 1439), 'os.setsid', 'os.setsid', ([], {}), '()\n', (1437, 1439), False, 'import os\n'), ((1456, 1465), 'os.fork', 'os.fork', ([], {}), '()\n', (1463, 1465), False, 'import os\n'), ((2143, 2154), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2151, 2154), False, 'import sys\n'), ((3337, 3351), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3349, 3351), False, 'from django.utils import timezone\n'), ((3435, 3446), 'time.time', 'time.time', ([], {}), '()\n', (3444, 3446), False, 'import time\n'), ((3792, 3803), 'time.time', 'time.time', ([], {}), '()\n', (3801, 3803), False, 'import time\n'), ((5240, 5254), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5252, 5254), False, 'from django.utils import timezone\n'), ((6089, 6097), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (6094, 6097), False, 'from time import sleep\n'), ((6243, 6275), 'sqlshare_rest.util.queue_triggers.trigger_query_queue_processing', 'trigger_query_queue_processing', ([], {}), '()\n', (6273, 6275), False, 'from sqlshare_rest.util.queue_triggers import trigger_query_queue_processing\n'), ((7463, 7482), 'sqlshare_rest.logger.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (7472, 7482), False, 'from sqlshare_rest.logger import getLogger\n'), ((7949, 7977), 'os.kill', 'os.kill', (['pid', 'signal.SIGKILL'], {}), '(pid, signal.SIGKILL)\n', (7956, 7977), False, 'import os\n'), ((8141, 8179), 'os.path.isfile', 'os.path.isfile', (['TERMINATE_TRIGGER_FILE'], {}), '(TERMINATE_TRIGGER_FILE)\n', (8155, 8179), False, 'import os\n'), ((11234, 11283), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (11247, 11283), False, 'import socket\n'), ((11363, 11376), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (11373, 11376), False, 'import time\n'), ((1329, 1348), 'os.waitpid', 'os.waitpid', (['pid1', '(0)'], {}), '(pid1, 0)\n', (1339, 1348), False, 'import os\n'), ((1524, 1535), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1532, 1535), False, 'import sys\n'), ((2021, 2040), 'sqlshare_rest.logger.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (2030, 2040), False, 'from sqlshare_rest.logger import getLogger\n'), ((4406, 4479), 'json.dumps', 'json.dumps', (["{'columns': columns, 'data': all_data}"], {'default': 'custom_encode'}), "({'columns': columns, 'data': all_data}, default=custom_encode)\n", (4416, 4479), False, 'import json\n'), ((4590, 4601), 'time.time', 'time.time', ([], {}), '()\n', (4599, 4601), False, 'import time\n'), ((4783, 4797), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4795, 4797), False, 'from django.utils import timezone\n'), ((5597, 5634), 'sqlshare_rest.dao.dataset.reset_dataset_account_access', 'reset_dataset_account_access', (['dataset'], {}), '(dataset)\n', (5625, 5634), False, 'from sqlshare_rest.dao.dataset import reset_dataset_account_access\n'), ((7745, 7758), 'sqlshare_rest.util.db.get_backend', 'get_backend', ([], {}), '()\n', (7756, 7758), False, 'from sqlshare_rest.util.db import get_backend\n'), ((8197, 8208), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8205, 8208), False, 'import sys\n'), ((8352, 8408), 'sqlshare_rest.models.Query.objects.filter', 'Query.objects.filter', ([], {'terminated': '(True)', 'is_finished': '(False)'}), '(terminated=True, is_finished=False)\n', (8372, 8408), False, 'from sqlshare_rest.models import Query\n'), ((8574, 8612), 'sqlshare_rest.models.Query.objects.filter', 'Query.objects.filter', ([], {'is_started': '(False)'}), '(is_started=False)\n', (8594, 8612), False, 'from sqlshare_rest.models import Query\n'), ((11424, 11457), 'os.remove', 'os.remove', (['TERMINATE_TRIGGER_FILE'], {}), '(TERMINATE_TRIGGER_FILE)\n', (11433, 11457), False, 'import os\n'), ((1659, 1689), 'sqlshare_rest.models.Query.objects.get', 'Query.objects.get', ([], {'pk': 'query_id'}), '(pk=query_id)\n', (1676, 1689), False, 'from sqlshare_rest.models import Query\n'), ((3887, 3896), 'decimal.Decimal', 'Decimal', ([], {}), '()\n', (3894, 3896), False, 'from decimal import Decimal\n'), ((10728, 10747), 'sqlshare_rest.logger.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (10737, 10747), False, 'from sqlshare_rest.logger import getLogger\n'), ((9731, 9757), 'django.db.close_old_connections', 'db.close_old_connections', ([], {}), '()\n', (9755, 9757), False, 'from django import db\n'), ((10545, 10564), 'sqlshare_rest.logger.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (10554, 10564), False, 'from sqlshare_rest.logger import getLogger\n')] |
import numpy as np
import matplotlib.pyplot as plt
class omd:
def __init__(self, dim, eta):
self.dim = dim
self.x_t = (1.0/dim)*np.ones([dim])
self.t = 0
self.eta = eta
def step(self, loss_grad):
self.t = self.t + 1
# compute numerator
y_t = self.x_t*np.exp(-1*self.eta*(loss_grad))
# compute denominator
normalizer = np.sum(np.abs(y_t))
self.x_t = y_t/normalizer
return self.x_t
def generate_simplex(dim):
p = np.random.uniform(0,100, size=[dim])
return p/np.sum(p)
def loss(x,y, l="l2"):
if l == "l2":
return 0.5*np.linalg.norm(x-y)
elif l == "l1":
return np.sum(np.abs(x-y))
elif l == "kl":
# sum over the support of y
return np.sum(np.where(x != 0,(x-y) * np.log(x / y), 0))
elif l == "linear":
return np.sum(x-y)
def grad_loss(x,y, l="l2"):
if l == "l2":
return x - y
elif l == "l1":
return np.sum(np.sign(x-y))
elif l == "kl":
return np.ones(x.shape) - np.divide(y,x)#np.divide(x,y)+np.log(np.divide(x,y))
elif l == "linear":
return -1*x.shape[0]
if __name__ == "__main__":
dim = 5 # dimension
eta = 0.1 # stepsize
T = 700 # number of steps
losses = np.zeros([T,1]) # loss values placeholder
threshold = 0.0001 # convergence threshold
loss_func = "linear" # choose loss function
p = generate_simplex(dim)
# p = np.array([0.19775466, 0.16387309, 0.22701363, 0.10678895, 0.30456967])
# p = p.T
online_md = omd(dim, eta)
# determine initial value
x_init = online_md.x_t
x_t = x_init
for t in range(0,T):
#print(x_t)
x_t = online_md.step(grad_loss(x_t, p, loss_func))
loss_t = loss(x_t, p, loss_func)
losses[t] = loss_t
# check for convergence
norm_dist = np.linalg.norm(p - x_t)
if norm_dist < threshold:
print("solution converged at iter ", t)
break
plt.plot(losses)
plt.ylabel("loss")
plt.xlabel("iter")
plt.savefig("plots/losses.png")
print("initialization:\t\t", x_init)
print("objective simplex:\t", p)
print("learned simplex:\t", x_t)
print("norm distance: ", norm_dist)
| [
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linalg.norm",
"numpy.log",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.sign",
"numpy.random.uniform",
"numpy.divide"
] | [((550, 587), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)'], {'size': '[dim]'}), '(0, 100, size=[dim])\n', (567, 587), True, 'import numpy as np\n'), ((1397, 1413), 'numpy.zeros', 'np.zeros', (['[T, 1]'], {}), '([T, 1])\n', (1405, 1413), True, 'import numpy as np\n'), ((2192, 2208), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (2200, 2208), True, 'import matplotlib.pyplot as plt\n'), ((2214, 2232), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2224, 2232), True, 'import matplotlib.pyplot as plt\n'), ((2238, 2256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iter"""'], {}), "('iter')\n", (2248, 2256), True, 'import matplotlib.pyplot as plt\n'), ((2262, 2293), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/losses.png"""'], {}), "('plots/losses.png')\n", (2273, 2293), True, 'import matplotlib.pyplot as plt\n'), ((601, 610), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (607, 610), True, 'import numpy as np\n'), ((2050, 2073), 'numpy.linalg.norm', 'np.linalg.norm', (['(p - x_t)'], {}), '(p - x_t)\n', (2064, 2073), True, 'import numpy as np\n'), ((159, 173), 'numpy.ones', 'np.ones', (['[dim]'], {}), '([dim])\n', (166, 173), True, 'import numpy as np\n'), ((342, 375), 'numpy.exp', 'np.exp', (['(-1 * self.eta * loss_grad)'], {}), '(-1 * self.eta * loss_grad)\n', (348, 375), True, 'import numpy as np\n'), ((436, 447), 'numpy.abs', 'np.abs', (['y_t'], {}), '(y_t)\n', (442, 447), True, 'import numpy as np\n'), ((676, 697), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - y)'], {}), '(x - y)\n', (690, 697), True, 'import numpy as np\n'), ((740, 753), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (746, 753), True, 'import numpy as np\n'), ((1046, 1060), 'numpy.sign', 'np.sign', (['(x - y)'], {}), '(x - y)\n', (1053, 1060), True, 'import numpy as np\n'), ((918, 931), 'numpy.sum', 'np.sum', (['(x - y)'], {}), '(x - y)\n', (924, 931), True, 'import numpy as np\n'), ((1097, 1113), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (1104, 1113), True, 'import numpy as np\n'), ((1116, 1131), 'numpy.divide', 'np.divide', (['y', 'x'], {}), '(y, x)\n', (1125, 1131), True, 'import numpy as np\n'), ((858, 871), 'numpy.log', 'np.log', (['(x / y)'], {}), '(x / y)\n', (864, 871), True, 'import numpy as np\n')] |
from justgood import imjustgood
media = imjustgood("YOUR_APIKEY_HERE")
post_url = "https://instagram.com/p/CJtqfEbhpjO/" # example instapost url
data = media.instapost(post_url)
# Get attributes
result = "Instagram Post"
result += "\nUsername : {}".format(data["result"]["username"])
result += "\nFullname : {}".format(data["result"]["fullname"])
result += "\nCreated : {}".format(data["result"]["created"])
result += "\nCaption : {}".format(data["result"]["caption"])
result += "\n\nPicture :\n{}".format(data["result"]["picture"])
number = 0
result += "\n\nMedia Post"
for a in data["result"]["postData"]:
number += 1
if a["type"] == "image":
result += "\n{}. Image Url : {}".format(number, a["postUrl"])
if a["type"] == "video":
result += "\n{}. Video Url : {}".format(number, a["postUrl"])
result += "\nPoster Url: {}".format(a["poster"])
print(result)
# Get JSON results
print(data)
| [
"justgood.imjustgood"
] | [((41, 71), 'justgood.imjustgood', 'imjustgood', (['"""YOUR_APIKEY_HERE"""'], {}), "('YOUR_APIKEY_HERE')\n", (51, 71), False, 'from justgood import imjustgood\n')] |
import abc
import os
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
from gtmcore.dataset import Dataset
class CacheManager(metaclass=abc.ABCMeta):
"""Abstract class to manage the dataset object cache.
An object cache is the location where files are materialized (as unique objects). These objects are hardlinked
into directories representing a revision of the dataset (with the appropriate filenames).
"""
def __init__(self, dataset: 'Dataset', username: Optional[str]) -> None:
"""
Args:
dataset: Current dataset object
username: Username of current logged in user, which may be required by the CacheManager implementation
"""
self.dataset = dataset
self.username = username
self.initialize()
@property
def current_revision_dir(self) -> str:
"""Method to return the directory containing files for the current dataset revision
Returns:
str
"""
return os.path.join(self.cache_root, self.dataset.git.repo.head.commit.hexsha)
@property
def cache_root(self) -> str:
"""The location of the file cache root
Returns:
str
"""
raise NotImplemented
def initialize(self) -> None:
"""Method to configure a file cache for use. this can include creating/provisioning resources or just loading
things
Returns:
None
"""
raise NotImplemented
| [
"os.path.join"
] | [((1022, 1093), 'os.path.join', 'os.path.join', (['self.cache_root', 'self.dataset.git.repo.head.commit.hexsha'], {}), '(self.cache_root, self.dataset.git.repo.head.commit.hexsha)\n', (1034, 1093), False, 'import os\n')] |
import models
from time import sleep
motor1 = models.motor(True, speed = 15, minimum = 10, maximum = 30)
pump1 = models.pump(True, 60, minimum= 600, maximum=620)
while True:
motor1.simulate_speed()
motor1.publish_state()
motor1.publish_speed()
pump1.simulate_pressure()
pump1.publish_pressure()
pump1.publish_state()
sleep(3) | [
"models.motor",
"models.pump",
"time.sleep"
] | [((49, 101), 'models.motor', 'models.motor', (['(True)'], {'speed': '(15)', 'minimum': '(10)', 'maximum': '(30)'}), '(True, speed=15, minimum=10, maximum=30)\n', (61, 101), False, 'import models\n'), ((116, 163), 'models.pump', 'models.pump', (['(True)', '(60)'], {'minimum': '(600)', 'maximum': '(620)'}), '(True, 60, minimum=600, maximum=620)\n', (127, 163), False, 'import models\n'), ((349, 357), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (354, 357), False, 'from time import sleep\n')] |
from dataclasses import dataclass, field
from typing import List
@dataclass
class PlayingCard:
rank: str
suit: str
def __str__(self):
return f'{self.suit}{self.rank}'
RANKS = '2 3 4 5 6 7 8 9 10 J Q K A'.split()
SUITS = '♣ ♢ ♡ ♠'.split()
def make_french_deck():
return [PlayingCard(r, s) for s in SUITS for r in RANKS]
@dataclass
class Desk:
cards: List[PlayingCard] = field(default_factory=make_french_deck)
| [
"dataclasses.field"
] | [((402, 441), 'dataclasses.field', 'field', ([], {'default_factory': 'make_french_deck'}), '(default_factory=make_french_deck)\n', (407, 441), False, 'from dataclasses import dataclass, field\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, errno
import logging
def list_difference(a, b):
'''
:param a:
:param b:
:return:
'''
set_a = set(a)
set_b = set(b)
comparison = set_a.difference(set_b)
return list(comparison)
def mkdir_p(path):
'''
:param path:
:return:
'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def makefolders(subfolders):
'''
create multiple folders
:param subfolders:
:return:
'''
assert isinstance(subfolders, list)
for path in subfolders:
if not os.path.exists(path):
mkdir_p(path)
def setLogConfig():
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
return logger
| [
"logging.basicConfig",
"os.path.exists",
"logging.getLogger",
"os.makedirs",
"os.path.isdir"
] | [((886, 925), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (905, 925), False, 'import logging\n'), ((940, 967), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (957, 967), False, 'import logging\n'), ((443, 460), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (454, 460), False, 'import os, errno\n'), ((809, 829), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (823, 829), False, 'import os, errno\n'), ((531, 550), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (544, 550), False, 'import os, errno\n')] |
import argparse
import os
from .gief import __doc__, app
parser = argparse.ArgumentParser(prog='gief', description=__doc__)
parser.add_argument('path', nargs='?', default=os.getcwd(),
help='Folder where files will be uploaded to (CWD by default)')
parser.add_argument('-H', '--host', default='0.0.0.0')
parser.add_argument('-p', '--port', type=int, default=5000)
args = parser.parse_args()
app.config['path'] = args.path
app.run(args.host, args.port)
| [
"argparse.ArgumentParser",
"os.getcwd"
] | [((69, 126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""gief"""', 'description': '__doc__'}), "(prog='gief', description=__doc__)\n", (92, 126), False, 'import argparse\n'), ((175, 186), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (184, 186), False, 'import os\n')] |
# Generated by Django 3.0.6 on 2020-05-23 07:50
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Candidate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('phone', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('stage', models.CharField(choices=[('AP', 'Applied'), ('PS', 'Phone Screen'), ('OS', 'On site'), ('OF', 'Offered'), ('AC', 'Accepted'), ('RE', 'Rejected')], max_length=2)),
('resume', models.FileField(storage=django.core.files.storage.FileSystemStorage(location='/media/uploads'), upload_to='')),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((351, 444), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (367, 444), False, 'from django.db import migrations, models\n'), ((474, 506), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (490, 506), False, 'from django.db import migrations, models\n'), ((539, 571), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (555, 571), False, 'from django.db import migrations, models\n'), ((600, 632), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (616, 632), False, 'from django.db import migrations, models\n'), ((663, 695), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (679, 695), False, 'from django.db import migrations, models\n'), ((724, 893), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('AP', 'Applied'), ('PS', 'Phone Screen'), ('OS', 'On site'), ('OF',\n 'Offered'), ('AC', 'Accepted'), ('RE', 'Rejected')]", 'max_length': '(2)'}), "(choices=[('AP', 'Applied'), ('PS', 'Phone Screen'), ('OS',\n 'On site'), ('OF', 'Offered'), ('AC', 'Accepted'), ('RE', 'Rejected')],\n max_length=2)\n", (740, 893), False, 'from django.db import migrations, models\n')] |
"""
Don't Get Volunteered!
======================
As a henchman on Commander Lambda's space station, you're expected to be resourceful,
smart, and a quick thinker. It's not easy building a doomsday device and ordering the
bunnies around at the same time, after all! In order to make sure that everyone is sufficiently
quick-witted, Commander Lambda has installed new flooring outside the henchman dormitories.
It looks like a chessboard, and every morning and evening you have to solve a new movement
puzzle in order to cross the floor. That would be fine if you got to be the rook or the queen,
but instead, you have to be the knight. Worse, if you take too much time solving the puzzle, you
get "volunteered" as a test subject for the LAMBCHOP doomsday device!
To help yourself get to and from your bunk every day, write a function called solution(src, dest)
which takes in two parameters: the source square, on which you start, and the destination square,
which is where you need to land to solve the puzzle. The function should return an integer representing
the smallest number of moves it will take for you to travel from the source square to the destination square
using a chess knight's moves (that is, two squares in any direction immediately followed by one square
perpendicular to that direction, or vice versa, in an "L" shape). Both the source and destination squares
will be an integer between 0 and 63, inclusive, and are numbered like the example chessboard below:
-------------------------
| 0| 1| 2| 3| 4| 5| 6| 7|
-------------------------
| 8| 9|10|11|12|13|14|15|
-------------------------
|16|17|18|19|20|21|22|23|
-------------------------
|24|25|26|27|28|29|30|31|
-------------------------
|32|33|34|35|36|37|38|39|
-------------------------
|40|41|42|43|44|45|46|47|
-------------------------
|48|49|50|51|52|53|54|55|
-------------------------
|56|57|58|59|60|61|62|63|
-------------------------
Constraints:
Java
====
Your code will be compiled using standard Java 8. All tests will be run by calling the solution() method inside the Solution class
Execution time is limited.
Wildcard imports and some specific classes are restricted (e.g. java.lang.ClassLoader). You will receive an
error when you verify your solution if you have used a blacklisted class.
Third-party libraries, input/output operations, spawning threads or processes and changes to the execution environment are not allowed.
Your solution must be under 32000 characters in length including new lines and and other non-printing characters.
Python
======
Your code will run inside a Python 2.7.13 sandbox. All tests will be run by calling the solution() function.
Standard libraries are supported except for bz2, crypt, fcntl, mmap, pwd, pyexpat, select, signal, termios, thread, time,
unicodedata, zipimport, zlib.
Input/output operations are not allowed.
Your solution must be under 32000 characters in length including new lines and and other non-printing characters.
"""
from collections import deque
class Board:
"""
All things board related.
"""
def __init__(self):
self.upper_bound = 63
self.row_length = 8
self.generate(self.row_length, self.upper_bound)
def generate(self, row_length, upper_bound):
"""
Generate the board's graph
"""
self.graph = {}
count = 0
row_count = 0
while count <= upper_bound:
row = []
for _ in range(row_length):
row.append(count)
count += 1
self.graph[row_count] = row
row_count += 1
def find_coordinates(self, value):
"""
Provided a numerical value between zero and the upper limit, return location coordinates
"""
for row in self.graph:
if value in self.graph[row]:
return (row, self.graph[row].index(value))
class ChessPiece:
"""
Base Chess piece class
"""
def __init__(self, type):
self.type = type
self.movements = []
class Knight(ChessPiece):
"""
Knight specific attributes
"""
def __init__(self, type="knight"):
self.type = type
self.movements = [
(2, -1),
(1, -2),
(-1, -2),
(-2, -1),
(-1, 2),
(1, 2),
(2, 1),
(-2, 1),
]
def solution(start, end):
"""
Determine the shortest number of steps to get from to the provided start, and end values.
"""
visited = set()
theboard = Board()
mypiece = Knight()
start_coordinates = theboard.find_coordinates(start)
end_coordinates = theboard.find_coordinates(end)
visited.add(start_coordinates)
processing_queue = deque([(0, start_coordinates)]) # faster than an array
while processing_queue:
moves_count, coordinates = processing_queue.pop() # FIFO from right
if (
coordinates[0] == end_coordinates[0]
and coordinates[1] == end_coordinates[1]
):
return moves_count # answer found
for move in mypiece.movements:
next_move = (coordinates[0] + move[0], coordinates[1] + move[1])
if next_move not in visited:
processing_queue.appendleft((moves_count + 1, next_move))
visited.add(coordinates)
return Warning("No solution found.")
"""
Local dev only - not part of the submitted answer
"""
tests = [(0, 1), (19, 36)]
if __name__ == "__main__":
results = []
for test in tests:
print("SOLUTION: {}".format(solution(test[0], test[1])))
| [
"collections.deque"
] | [((4782, 4813), 'collections.deque', 'deque', (['[(0, start_coordinates)]'], {}), '([(0, start_coordinates)])\n', (4787, 4813), False, 'from collections import deque\n')] |
import os
from main.image_ops.converter import convert_frame
old_file = '/tmp/test-laptop-01-2021-01-27-10:54:50:532000.webp'
def test_convert_frame():
new_file = convert_frame(old_file, '.png')
assert new_file.endswith('.png')
| [
"main.image_ops.converter.convert_frame"
] | [((170, 201), 'main.image_ops.converter.convert_frame', 'convert_frame', (['old_file', '""".png"""'], {}), "(old_file, '.png')\n", (183, 201), False, 'from main.image_ops.converter import convert_frame\n')] |
import numpy as np
from keras.callbacks import Callback
from collections import defaultdict
class ExpandLogger(Callback):
"""
Acts as `BaseLogger` to expand logs with the average of observations, actions, mean_q, etc.
"""
def __init__(self):
super().__init__()
self.observations = []
self.rewards = []
self.actions = []
self.metrics = []
self.metrics_names = []
self.info = defaultdict(list)
self.step = 0
def on_train_begin(self, logs=None):
self.metrics_names = self.model.metrics_names
def on_episode_begin(self, episode, logs=None):
""" Reset environment variables at beginning of each episode """
self.observations = []
self.rewards = []
self.actions = []
self.metrics = []
def on_episode_end(self, episode, logs=None):
""" Compute training statistics of the episode when done """
mean_q_id = self.metrics_names.index('mean_q')
metrics = np.asarray(self.metrics)
metrics = metrics[~np.isnan(metrics).any(axis=1)]
if metrics.shape[0] > 0:
logs['mean_q'] = metrics[:, mean_q_id].mean()
logs['reward_mean'] = np.mean(self.rewards)
logs['wheel_vel_sum'] = np.sum(self.actions, axis=1).mean()
logs['wheel_vel_diff'] = np.abs(np.diff(self.actions, axis=1)).mean()
logs['dist_to_obstacles'] = np.mean(self.observations, axis=0)[0]
for info_key, info_values in self.info.items():
logs[info_key] = np.mean(info_values)
del logs['nb_steps'] # don't show total num. of steps
def on_step_end(self, step, logs=None):
""" Update statistics of episode after each step """
self.observations.append(logs['observation'])
self.rewards.append(logs['reward'])
self.actions.append(logs['action'])
self.metrics.append(logs['metrics'])
for info_key, info_value in logs['info'].items():
self.info[info_key].append(info_value)
self.step += 1
class DataDumpLogger(Callback):
def __init__(self, fpath):
super().__init__()
self.fpath = fpath
self.observations = []
self.rewards = []
self.actions = []
def on_episode_begin(self, episode, logs=None):
self.observations.append([])
self.rewards.append([])
self.actions.append([])
def on_step_end(self, step, logs=None):
self.observations[-1].append(logs['observation'])
self.rewards[-1].append(logs['reward'])
self.actions[-1].append(logs['action'])
def on_train_end(self, logs=None):
episode_id = []
for episode, obs_episode in enumerate(self.observations):
episode_id.append(np.repeat(episode, len(obs_episode)))
episode_id = np.hstack(episode_id)
observations = np.vstack(self.observations)
rewards = np.hstack(self.rewards)
actions = np.vstack(self.actions)
observation_head = ['dist_to_obstacles', 'servo_angle']
if observations.shape[1] > 2:
observation_head.append('servo_turn')
header = ['episode', *observation_head, 'wheel_left', 'wheel_right', 'reward']
data = np.c_[episode_id, observations, actions, rewards]
np.savetxt(self.fpath, data, fmt='%.5f', delimiter=',', header=','.join(header), comments='')
| [
"numpy.mean",
"numpy.hstack",
"numpy.asarray",
"numpy.diff",
"numpy.sum",
"collections.defaultdict",
"numpy.vstack",
"numpy.isnan"
] | [((449, 466), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (460, 466), False, 'from collections import defaultdict\n'), ((1013, 1037), 'numpy.asarray', 'np.asarray', (['self.metrics'], {}), '(self.metrics)\n', (1023, 1037), True, 'import numpy as np\n'), ((1217, 1238), 'numpy.mean', 'np.mean', (['self.rewards'], {}), '(self.rewards)\n', (1224, 1238), True, 'import numpy as np\n'), ((2827, 2848), 'numpy.hstack', 'np.hstack', (['episode_id'], {}), '(episode_id)\n', (2836, 2848), True, 'import numpy as np\n'), ((2872, 2900), 'numpy.vstack', 'np.vstack', (['self.observations'], {}), '(self.observations)\n', (2881, 2900), True, 'import numpy as np\n'), ((2919, 2942), 'numpy.hstack', 'np.hstack', (['self.rewards'], {}), '(self.rewards)\n', (2928, 2942), True, 'import numpy as np\n'), ((2961, 2984), 'numpy.vstack', 'np.vstack', (['self.actions'], {}), '(self.actions)\n', (2970, 2984), True, 'import numpy as np\n'), ((1421, 1455), 'numpy.mean', 'np.mean', (['self.observations'], {'axis': '(0)'}), '(self.observations, axis=0)\n', (1428, 1455), True, 'import numpy as np\n'), ((1544, 1564), 'numpy.mean', 'np.mean', (['info_values'], {}), '(info_values)\n', (1551, 1564), True, 'import numpy as np\n'), ((1271, 1299), 'numpy.sum', 'np.sum', (['self.actions'], {'axis': '(1)'}), '(self.actions, axis=1)\n', (1277, 1299), True, 'import numpy as np\n'), ((1347, 1376), 'numpy.diff', 'np.diff', (['self.actions'], {'axis': '(1)'}), '(self.actions, axis=1)\n', (1354, 1376), True, 'import numpy as np\n'), ((1065, 1082), 'numpy.isnan', 'np.isnan', (['metrics'], {}), '(metrics)\n', (1073, 1082), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
config.require('SCIKIT_FEM')
import numpy as np
import skfem
from pymor.analyticalproblems.domaindescriptions import (RectDomain, CylindricalDomain, TorusDomain, LineDomain,
CircleDomain, PolygonalDomain)
def discretize_domain(domain_description, diameter=1 / 100, mesh_type=None):
def discretize_RectDomain():
mt = mesh_type or skfem.MeshQuad
x0i = int(np.ceil(domain_description.width * np.sqrt(2) / diameter))
x1i = int(np.ceil(domain_description.height * np.sqrt(2) / diameter))
mesh = mt.init_tensor(
np.linspace(domain_description.domain[0, 0], domain_description.domain[1, 0], x0i + 1),
np.linspace(domain_description.domain[0, 1], domain_description.domain[1, 1], x1i + 1)
).with_boundaries({
'left': lambda x: x[0] == domain_description.domain[0, 0],
'right': lambda x: x[0] == domain_description.domain[1, 0],
'top': lambda x: x[1] == domain_description.domain[1, 1],
'bottom': lambda x: x[1] == domain_description.domain[0, 1],
})
boundary_facets = {
bt: np.hstack([mesh.boundaries[edge]
for edge in ['left', 'right', 'top', 'bottom']
if getattr(domain_description, edge) == bt])
for bt in domain_description.boundary_types
}
return mesh, boundary_facets
def discretize_LineDomain():
mt = mesh_type or skfem.MeshLine
mesh = mt(
np.linspace(domain_description.domain[0], domain_description.domain[1],
int(np.ceil((domain_description.domain[1] - domain_description.domain[0]) / diameter)))
).with_boundaries({
'left': lambda x: x[0] == domain_description.domain[0],
'right': lambda x: x[0] == domain_description.domain[1]
})
boundary_facets = {
bt: np.hstack([mesh.boundaries[edge]
for edge in ['left', 'right']
if getattr(domain_description, edge) == bt])
for bt in domain_description.boundary_types
}
return mesh, boundary_facets
if isinstance(domain_description, RectDomain):
return discretize_RectDomain()
elif isinstance(domain_description, CylindricalDomain):
raise NotImplementedError
elif isinstance(domain_description, TorusDomain):
raise NotImplementedError
elif isinstance(domain_description, PolygonalDomain):
# from pymor.discretizers.builtin.domaindiscretizers.gmsh import discretize_gmsh
# return discretize_gmsh(domain_description, clscale=diameter)
raise NotImplementedError
elif isinstance(domain_description, LineDomain):
return discretize_LineDomain()
elif isinstance(domain_description, CircleDomain):
raise NotImplementedError
else:
raise NotImplementedError
| [
"numpy.ceil",
"numpy.linspace",
"numpy.sqrt",
"pymor.core.config.config.require"
] | [((251, 279), 'pymor.core.config.config.require', 'config.require', (['"""SCIKIT_FEM"""'], {}), "('SCIKIT_FEM')\n", (265, 279), False, 'from pymor.core.config import config\n'), ((868, 959), 'numpy.linspace', 'np.linspace', (['domain_description.domain[0, 0]', 'domain_description.domain[1, 0]', '(x0i + 1)'], {}), '(domain_description.domain[0, 0], domain_description.domain[1, 0\n ], x0i + 1)\n', (879, 959), True, 'import numpy as np\n'), ((968, 1059), 'numpy.linspace', 'np.linspace', (['domain_description.domain[0, 1]', 'domain_description.domain[1, 1]', '(x1i + 1)'], {}), '(domain_description.domain[0, 1], domain_description.domain[1, 1\n ], x1i + 1)\n', (979, 1059), True, 'import numpy as np\n'), ((723, 733), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (730, 733), True, 'import numpy as np\n'), ((801, 811), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (808, 811), True, 'import numpy as np\n'), ((1912, 1997), 'numpy.ceil', 'np.ceil', (['((domain_description.domain[1] - domain_description.domain[0]) / diameter)'], {}), '((domain_description.domain[1] - domain_description.domain[0]) /\n diameter)\n', (1919, 1997), True, 'import numpy as np\n')] |
from nsga2 import seq
from nsga2.problems.problem_definitions import ProblemDefinitions
class ZDT2Definitions(ProblemDefinitions):
def __init__(self):
self.n = 30
def f1(self, individual):
return individual.features[0]
def f2(self, individual):
sigma = sum(individual.features[1:])
g = 1 + sigma*9/(self.n - 1)
h = 1 - (self.f1(individual)/g)**2
return g*h
def perfect_pareto_front(self):
domain = seq(0, 1, 0.01)
return domain, map(lambda x1: 1 - x1**2, domain)
| [
"nsga2.seq"
] | [((475, 490), 'nsga2.seq', 'seq', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (478, 490), False, 'from nsga2 import seq\n')] |
import wx
import wx.xrc
import threading
import urllib.request as urllib2
from io import BytesIO
from TTS_Reader import TextToSpeech
class ReadNewsFrame(wx.Frame):
"""
This class extends wx.Frame, meaning it won't be a part of the main app frame, and
is used by News (API) Panel to display a whole article with the resources that are provided to it
...
Attributes:
img : wx.Image
The image in the article to be displayed
text : str
The complete main content text of the article
tts : TextToSpeech
References an instance of TextToSpeech for a Text-to-Speech reader
_read_news_img_src : str
URL to the news article image
_read_news_title : str
Title to the news article
_read_news_source : str
Source / provider of the news article
_read_news_published_on : str
Date of publishing of the article
ReadNewsButtonToggleReader : wx.Button
This button is clicked when there's need for a T-T-S
to be created. It calls on method OnToggleBtnPress(self, event)
...
Methods:
OnToggleBtnPress(self, event)
Creates an instance of TextToSpeech and provides it with the
text of the article, then plays the TTS
OnClose(self, event)
Tries to destroy the tts object if it's running, closes and
destroys the ReadNewsFrame
"""
def __init__(self, parent, _read_news_img_src, _read_news_text, _read_news_title, _read_news_source, _read_news_published_on):
super(ReadNewsFrame, self).__init__(parent = parent, id = wx.ID_ANY, title = u"Read News", pos = wx.DefaultPosition, size = wx.Size(900, 700), style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER | wx.TAB_TRAVERSAL)
self.img = None
self.text = _read_news_text
self.tts = None
################################ STATIC UI ################################
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
ReadNewsRootWindow = wx.BoxSizer(wx.VERTICAL)
self.ReadNewsImagePanel = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, 300), wx.TAB_TRAVERSAL)
ReadNewsImageSizer = wx.BoxSizer(wx.VERTICAL)
try:
"""Tries to open the URL of the article image, download the image, parse it to wx.Image"""
file_url = urllib2.urlopen(_read_news_img_src).read()
stream = BytesIO(file_url)
self.img = wx.Image(stream)
except:
wx.MessageBox(message = 'There was an error while processing the image loading call.', caption = 'Error. Could not load the news.', parent = self)
ReadNewsImageSizer.SetMinSize(wx.Size(-1, 300))
self.ReadNewsImage = wx.StaticBitmap(self.ReadNewsImagePanel, wx.ID_ANY, wx.Bitmap(r"img/placeholder300x202.jpg", wx.BITMAP_TYPE_ANY), wx.DefaultPosition, wx.Size(600, 290), 0)
if self.img != None:
self.ReadNewsImage.SetBitmap(wx.Bitmap(self.img))
ReadNewsImageSizer.Add(self.ReadNewsImage, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.ReadNewsImagePanel.SetSizer(ReadNewsImageSizer)
self.ReadNewsImagePanel.Layout()
ReadNewsRootWindow.Add(self.ReadNewsImagePanel, 0, wx.EXPAND | wx.ALL, 5)
self.m_staticline23 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
ReadNewsRootWindow.Add(self.m_staticline23, 0, wx.EXPAND | wx.ALL, 5)
ReadNewsTextWindow = wx.BoxSizer(wx.VERTICAL)
ReadNewsTextWindow.SetMinSize(wx.Size(-1, 400))
ReadNewsProperties = wx.BoxSizer(wx.HORIZONTAL)
self.static_news_title = wx.StaticText(self, wx.ID_ANY, u"Title:", wx.DefaultPosition, wx.DefaultSize, 0)
self.static_news_title.Wrap(-1)
ReadNewsProperties.Add(self.static_news_title, 0, wx.ALL, 5)
self.read_news_title_text = wx.StaticText(self, wx.ID_ANY, _read_news_title, wx.DefaultPosition, wx.Size(300, -1), 0)
self.read_news_title_text.Wrap(300)
ReadNewsProperties.Add(self.read_news_title_text, 0, wx.ALL, 5)
self.static_news_author = wx.StaticText(self, wx.ID_ANY, u"Source:", wx.DefaultPosition, wx.DefaultSize, 0)
self.static_news_author.Wrap(-1)
ReadNewsProperties.Add(self.static_news_author, 0, wx.ALL, 5)
self.read_news_source = wx.StaticText(self, wx.ID_ANY, _read_news_source, wx.DefaultPosition, wx.Size(150, -1), 0)
self.read_news_source.Wrap(-1)
ReadNewsProperties.Add(self.read_news_source, 0, wx.ALL, 5)
self.static_news_date = wx.StaticText(self, wx.ID_ANY, u"Published on:", wx.DefaultPosition, wx.DefaultSize, 0)
self.static_news_date.Wrap(-1)
ReadNewsProperties.Add(self.static_news_date, 0, wx.ALL, 5)
self.read_news_publish_date = wx.StaticText(self, wx.ID_ANY, _read_news_published_on, wx.DefaultPosition, wx.DefaultSize, 0)
self.read_news_publish_date.Wrap(-1)
ReadNewsProperties.Add(self.read_news_publish_date, 0, wx.ALL, 5)
ReadNewsTextWindow.Add(ReadNewsProperties, 0, wx.EXPAND, 5)
self.m_staticline17 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
ReadNewsTextWindow.Add(self.m_staticline17, 0, wx.EXPAND | wx.ALL, 5)
self.ReadNewsTextScrollWindow = wx.ScrolledWindow(self, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, 400), wx.HSCROLL | wx.VSCROLL)
self.ReadNewsTextScrollWindow.SetScrollRate(5, 5)
ReadNewsTextSizer = wx.BoxSizer(wx.VERTICAL)
ReadNewsTextSizer.SetMinSize(wx.Size(-1, 400))
self.read_news_main_content = wx.StaticText(self.ReadNewsTextScrollWindow, wx.ID_ANY, _read_news_text, wx.DefaultPosition, wx.Size(875, -1), 0)
self.read_news_main_content.Wrap(875)
ReadNewsTextSizer.Add(self.read_news_main_content, 1, wx.ALL, 5)
self.ReadNewsTextScrollWindow.SetSizer(ReadNewsTextSizer)
self.ReadNewsTextScrollWindow.Layout()
ReadNewsTextWindow.Add(self.ReadNewsTextScrollWindow, 1, wx.EXPAND | wx.ALL, 5)
self.m_staticline21 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
ReadNewsTextWindow.Add(self.m_staticline21, 0, wx.EXPAND | wx.ALL, 5)
ReadNewsTextWindowButtons = wx.BoxSizer(wx.HORIZONTAL)
self.ReadNewsButtonToggleReader = wx.Button(self, wx.ID_ANY, u"TTS Reader", wx.DefaultPosition, wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.OnToggleBtnPress, self.ReadNewsButtonToggleReader)
ReadNewsTextWindowButtons.Add(self.ReadNewsButtonToggleReader, 0, wx.ALL, 5)
ReadNewsTextWindow.Add(ReadNewsTextWindowButtons, 0, wx.EXPAND, 5)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.m_staticline22 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
ReadNewsTextWindow.Add(self.m_staticline22, 0, wx.EXPAND | wx.ALL, 5)
ReadNewsRootWindow.Add(ReadNewsTextWindow, 0, wx.EXPAND, 5)
self.SetSizer(ReadNewsRootWindow)
self.Layout()
################################ STATIC UI ################################
self.Show()
self.Centre(wx.BOTH)
def __del__(self):
pass
def OnToggleBtnPress(self, event):
"""
Creates an instance of TextToSpeech and provides it with the
text of the article, then plays the TTS
"""
self.tts = TextToSpeech(self, self.text)
self.tts.Play()
def OnClose(self, event):
"""
Tries to destroy the tts object if it's running, closes and
destroys the ReadNewsFrame
"""
try:
self.tts.Stop()
del self.tts
except:
pass
self.Destroy() | [
"wx.Button",
"wx.BoxSizer",
"io.BytesIO",
"wx.StaticLine",
"wx.Size",
"wx.StaticText",
"wx.Image",
"wx.MessageBox",
"wx.Bitmap",
"TTS_Reader.TextToSpeech",
"urllib.request.urlopen"
] | [((2022, 2046), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (2033, 2046), False, 'import wx\n'), ((2193, 2217), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (2204, 2217), False, 'import wx\n'), ((3325, 3414), 'wx.StaticLine', 'wx.StaticLine', (['self', 'wx.ID_ANY', 'wx.DefaultPosition', 'wx.DefaultSize', 'wx.LI_HORIZONTAL'], {}), '(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.\n LI_HORIZONTAL)\n', (3338, 3414), False, 'import wx\n'), ((3518, 3542), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (3529, 3542), False, 'import wx\n'), ((3629, 3655), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (3640, 3655), False, 'import wx\n'), ((3690, 3775), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY', 'u"""Title:"""', 'wx.DefaultPosition', 'wx.DefaultSize', '(0)'], {}), "(self, wx.ID_ANY, u'Title:', wx.DefaultPosition, wx.DefaultSize, 0\n )\n", (3703, 3775), False, 'import wx\n'), ((4158, 4244), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY', 'u"""Source:"""', 'wx.DefaultPosition', 'wx.DefaultSize', '(0)'], {}), "(self, wx.ID_ANY, u'Source:', wx.DefaultPosition, wx.\n DefaultSize, 0)\n", (4171, 4244), False, 'import wx\n'), ((4615, 4707), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY', 'u"""Published on:"""', 'wx.DefaultPosition', 'wx.DefaultSize', '(0)'], {}), "(self, wx.ID_ANY, u'Published on:', wx.DefaultPosition, wx.\n DefaultSize, 0)\n", (4628, 4707), False, 'import wx\n'), ((4849, 4947), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY', '_read_news_published_on', 'wx.DefaultPosition', 'wx.DefaultSize', '(0)'], {}), '(self, wx.ID_ANY, _read_news_published_on, wx.DefaultPosition,\n wx.DefaultSize, 0)\n', (4862, 4947), False, 'import wx\n'), ((5163, 5252), 'wx.StaticLine', 'wx.StaticLine', (['self', 'wx.ID_ANY', 'wx.DefaultPosition', 'wx.DefaultSize', 'wx.LI_HORIZONTAL'], {}), '(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.\n LI_HORIZONTAL)\n', (5176, 5252), False, 'import wx\n'), ((5551, 5575), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (5562, 5575), False, 'import wx\n'), ((6136, 6225), 'wx.StaticLine', 'wx.StaticLine', (['self', 'wx.ID_ANY', 'wx.DefaultPosition', 'wx.DefaultSize', 'wx.LI_HORIZONTAL'], {}), '(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.\n LI_HORIZONTAL)\n', (6149, 6225), False, 'import wx\n'), ((6336, 6362), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (6347, 6362), False, 'import wx\n'), ((6406, 6491), 'wx.Button', 'wx.Button', (['self', 'wx.ID_ANY', 'u"""TTS Reader"""', 'wx.DefaultPosition', 'wx.DefaultSize', '(0)'], {}), "(self, wx.ID_ANY, u'TTS Reader', wx.DefaultPosition, wx.DefaultSize, 0\n )\n", (6415, 6491), False, 'import wx\n'), ((6814, 6903), 'wx.StaticLine', 'wx.StaticLine', (['self', 'wx.ID_ANY', 'wx.DefaultPosition', 'wx.DefaultSize', 'wx.LI_HORIZONTAL'], {}), '(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.\n LI_HORIZONTAL)\n', (6827, 6903), False, 'import wx\n'), ((7482, 7511), 'TTS_Reader.TextToSpeech', 'TextToSpeech', (['self', 'self.text'], {}), '(self, self.text)\n', (7494, 7511), False, 'from TTS_Reader import TextToSpeech\n'), ((2128, 2144), 'wx.Size', 'wx.Size', (['(-1)', '(300)'], {}), '(-1, 300)\n', (2135, 2144), False, 'import wx\n'), ((2422, 2439), 'io.BytesIO', 'BytesIO', (['file_url'], {}), '(file_url)\n', (2429, 2439), False, 'from io import BytesIO\n'), ((2463, 2479), 'wx.Image', 'wx.Image', (['stream'], {}), '(stream)\n', (2471, 2479), False, 'import wx\n'), ((2694, 2710), 'wx.Size', 'wx.Size', (['(-1)', '(300)'], {}), '(-1, 300)\n', (2701, 2710), False, 'import wx\n'), ((2793, 2852), 'wx.Bitmap', 'wx.Bitmap', (['"""img/placeholder300x202.jpg"""', 'wx.BITMAP_TYPE_ANY'], {}), "('img/placeholder300x202.jpg', wx.BITMAP_TYPE_ANY)\n", (2802, 2852), False, 'import wx\n'), ((2875, 2892), 'wx.Size', 'wx.Size', (['(600)', '(290)'], {}), '(600, 290)\n', (2882, 2892), False, 'import wx\n'), ((3582, 3598), 'wx.Size', 'wx.Size', (['(-1)', '(400)'], {}), '(-1, 400)\n', (3589, 3598), False, 'import wx\n'), ((3986, 4002), 'wx.Size', 'wx.Size', (['(300)', '(-1)'], {}), '(300, -1)\n', (3993, 4002), False, 'import wx\n'), ((4454, 4470), 'wx.Size', 'wx.Size', (['(150)', '(-1)'], {}), '(150, -1)\n', (4461, 4470), False, 'import wx\n'), ((5422, 5438), 'wx.Size', 'wx.Size', (['(-1)', '(400)'], {}), '(-1, 400)\n', (5429, 5438), False, 'import wx\n'), ((5614, 5630), 'wx.Size', 'wx.Size', (['(-1)', '(400)'], {}), '(-1, 400)\n', (5621, 5630), False, 'import wx\n'), ((5763, 5779), 'wx.Size', 'wx.Size', (['(875)', '(-1)'], {}), '(875, -1)\n', (5770, 5779), False, 'import wx\n'), ((1675, 1692), 'wx.Size', 'wx.Size', (['(900)', '(700)'], {}), '(900, 700)\n', (1682, 1692), False, 'import wx\n'), ((2508, 2658), 'wx.MessageBox', 'wx.MessageBox', ([], {'message': '"""There was an error while processing the image loading call."""', 'caption': '"""Error. Could not load the news."""', 'parent': 'self'}), "(message=\n 'There was an error while processing the image loading call.', caption=\n 'Error. Could not load the news.', parent=self)\n", (2521, 2658), False, 'import wx\n'), ((2967, 2986), 'wx.Bitmap', 'wx.Bitmap', (['self.img'], {}), '(self.img)\n', (2976, 2986), False, 'import wx\n'), ((2358, 2393), 'urllib.request.urlopen', 'urllib2.urlopen', (['_read_news_img_src'], {}), '(_read_news_img_src)\n', (2373, 2393), True, 'import urllib.request as urllib2\n')] |
import unittest
import json
import os
import subprocess
from read_mp_project import read_mp_properties
print(".....Runnint unittest for feature: material extraction.....")
class Testreadmp(unittest.TestCase):
# Testing properties of extracted materials.
# read_mp_project.py checked
def test_extracted_materials(self):
mp_properties = read_mp_properties('testing_data_materials.json')
tmp_keys = ['material_id', 'pretty_formula', 'elements',
'nelements', 'energy', 'energy_per_atom', 'density', 'volume',
'nsites', 'band_gap', 'total_magnetization', 'G_Reuss', 'G_VRH',
'G_Voigt', 'G_Voigt_Reuss_Hill', 'K_Reuss', 'K_VRH', 'K_Voigt',
'K_Voigt_Reuss_Hill', 'elastic_anisotropy', 'elastic_tensor',
'homogeneous_poisson', 'poisson_ratio', 'universal_anisotropy',
'elastic_tensor_original', 'compliance_tensor', 'warnings', 'piezo',
'diel', 'copyright', 'cif', 'elasticity']
with open('testing_data_materials.json') as f:
json_data = json.load(f)
num_results = json_data["num_results"]
self.assertEqual(tmp_keys, list(mp_properties))
# check if content of extracted cif is the same as manually downloaded.
with open("AcAg_mp-866199_computed.cif") as f:
content_in_cif = f.read()
self.assertEqual(mp_properties["cif"][0], content_in_cif)
self.assertEqual(mp_properties["material_id"][0],"mp-866199")
self.assertEqual(len(mp_properties["cif"]), num_results)
# Testing fails and error handlingself.
# query_mp_project.sh checked
def test_extration_process(self):
# no materials existed. Trying to exttract something which doesn't existself.
# No MP_API_KEY
# Check if errors or warnings are in?
# The warnings should be handled
with open('testing_data_materials.json') as f:
json_data = json.load(f)
valid_response = json_data["valid_response"]
self.assertEqual(True, valid_response)
# create temporary file to store response results
subprocess.call("./query_mp_project.sh Ti -- 00 > tmp_res_file.json", shell=True)
with open("tmp_res_file.json") as f:
json_data = json.load(f)
valid_response = json_data["valid_response"]
num_results = json_data["num_results"]
self.assertEqual(valid_response, True)
self.assertEqual(num_results, 0)
os.remove("tmp_res_file.json")
if __name__ == '__main__':
unittest.main()
| [
"read_mp_project.read_mp_properties",
"json.load",
"subprocess.call",
"unittest.main",
"os.remove"
] | [((2603, 2618), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2616, 2618), False, 'import unittest\n'), ((357, 406), 'read_mp_project.read_mp_properties', 'read_mp_properties', (['"""testing_data_materials.json"""'], {}), "('testing_data_materials.json')\n", (375, 406), False, 'from read_mp_project import read_mp_properties\n'), ((2179, 2265), 'subprocess.call', 'subprocess.call', (['"""./query_mp_project.sh Ti -- 00 > tmp_res_file.json"""'], {'shell': '(True)'}), "('./query_mp_project.sh Ti -- 00 > tmp_res_file.json', shell\n =True)\n", (2194, 2265), False, 'import subprocess\n'), ((2539, 2569), 'os.remove', 'os.remove', (['"""tmp_res_file.json"""'], {}), "('tmp_res_file.json')\n", (2548, 2569), False, 'import os\n'), ((1113, 1125), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1122, 1125), False, 'import json\n'), ((2000, 2012), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2009, 2012), False, 'import json\n'), ((2330, 2342), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2339, 2342), False, 'import json\n')] |
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import uuid
import pytest
from elasticapm.conf import constants
azureblob = pytest.importorskip("azure.storage.blob")
azurequeue = pytest.importorskip("azure.storage.queue")
azuretable = pytest.importorskip("azure.cosmosdb.table")
azurefile = pytest.importorskip("azure.storage.fileshare")
pytestmark = [pytest.mark.azurestorage]
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import BlobServiceClient
from azure.storage.fileshare import ShareClient
from azure.storage.queue import QueueClient
CONNECTION_STRING = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
if not CONNECTION_STRING:
pytestmark.append(
pytest.mark.skip("Skipping azure storage tests, no AZURE_STORAGE_CONNECTION_STRING environment variable set")
)
@pytest.fixture()
def container_client(blob_service_client):
container_name = "apm-agent-python-ci-" + str(uuid.uuid4())
container_client = blob_service_client.create_container(container_name)
yield container_client
blob_service_client.delete_container(container_name)
@pytest.fixture()
def blob_service_client():
blob_service_client = BlobServiceClient.from_connection_string(CONNECTION_STRING)
return blob_service_client
@pytest.fixture()
def queue_client():
queue_name = "apm-agent-python-ci-" + str(uuid.uuid4())
queue_client = QueueClient.from_connection_string(CONNECTION_STRING, queue_name)
queue_client.create_queue()
yield queue_client
queue_client.delete_queue()
@pytest.fixture()
def table_service():
table_name = "apmagentpythonci" + str(uuid.uuid4().hex)
table_service = TableService(connection_string=CONNECTION_STRING)
table_service.create_table(table_name)
table_service.table_name = table_name
yield table_service
table_service.delete_table(table_name)
@pytest.fixture()
def share_client():
share_name = "apmagentpythonci" + str(uuid.uuid4().hex)
share_client = ShareClient.from_connection_string(conn_str=CONNECTION_STRING, share_name=share_name)
share_client.create_share()
yield share_client
share_client.delete_share()
def test_blob_list_blobs(instrument, elasticapm_client, container_client):
elasticapm_client.begin_transaction("transaction.test")
list(container_client.list_blobs())
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureBlob ListBlobs {}".format(container_client.container_name)
assert span["type"] == "storage"
assert span["subtype"] == "azureblob"
assert span["action"] == "ListBlobs"
def test_blob_create_container(instrument, elasticapm_client, blob_service_client):
elasticapm_client.begin_transaction("transaction.test")
container_name = str(uuid.uuid4())
container_client = blob_service_client.create_container(container_name)
blob_service_client.delete_container(container_name)
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureBlob Create {}".format(container_name)
assert span["type"] == "storage"
assert span["subtype"] == "azureblob"
assert span["action"] == "Create"
def test_blob_upload(instrument, elasticapm_client, container_client, blob_service_client):
elasticapm_client.begin_transaction("transaction.test")
# Upload this file to the container
blob_client = blob_service_client.get_blob_client(container=container_client.container_name, blob=__file__)
with open(__file__, "rb") as data:
blob_client.upload_blob(data)
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureBlob Upload {}/{}".format(container_client.container_name, __file__)
assert span["type"] == "storage"
assert span["subtype"] == "azureblob"
assert span["action"] == "Upload"
def test_queue(instrument, elasticapm_client, queue_client):
elasticapm_client.begin_transaction("transaction.test")
# Send a message
queue_client.send_message("Test message")
list(queue_client.peek_messages())
messages = queue_client.receive_messages()
for msg_batch in messages.by_page():
for msg in msg_batch:
queue_client.delete_message(msg)
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureQueue SEND to {}".format(queue_client.queue_name)
assert span["type"] == "messaging"
assert span["subtype"] == "azurequeue"
assert span["action"] == "send"
span = elasticapm_client.events[constants.SPAN][1]
assert span["name"] == "AzureQueue PEEK from {}".format(queue_client.queue_name)
assert span["type"] == "messaging"
assert span["subtype"] == "azurequeue"
assert span["action"] == "peek"
span = elasticapm_client.events[constants.SPAN][2]
assert span["name"] == "AzureQueue RECEIVE from {}".format(queue_client.queue_name)
assert span["type"] == "messaging"
assert span["subtype"] == "azurequeue"
assert span["action"] == "receive"
span = elasticapm_client.events[constants.SPAN][3]
assert span["name"] == "AzureQueue DELETE from {}".format(queue_client.queue_name)
assert span["type"] == "messaging"
assert span["subtype"] == "azurequeue"
assert span["action"] == "delete"
def test_table_create(instrument, elasticapm_client):
table_name = "apmagentpythonci" + str(uuid.uuid4().hex)
table_service = TableService(connection_string=CONNECTION_STRING)
elasticapm_client.begin_transaction("transaction.test")
table_service.create_table(table_name)
table_service.delete_table(table_name)
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureTable Create {}".format(table_name)
assert span["type"] == "storage"
assert span["subtype"] == "azuretable"
assert span["action"] == "Create"
def test_table(instrument, elasticapm_client, table_service):
table_name = table_service.table_name
elasticapm_client.begin_transaction("transaction.test")
task = {"PartitionKey": "tasksSeattle", "RowKey": "001", "description": "Take out the trash", "priority": 200}
table_service.insert_entity(table_name, task)
task = {"PartitionKey": "tasksSeattle", "RowKey": "001", "description": "Take out the garbage", "priority": 250}
table_service.update_entity(table_name, task)
task = table_service.get_entity(table_name, "tasksSeattle", "001")
table_service.delete_entity(table_name, "tasksSeattle", "001")
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureTable Insert {}".format(table_name)
assert span["type"] == "storage"
assert span["subtype"] == "azuretable"
assert span["action"] == "Insert"
span = elasticapm_client.events[constants.SPAN][1]
assert span["name"] == "AzureTable Update {}(PartitionKey='tasksSeattle',RowKey='001')".format(table_name)
assert span["type"] == "storage"
assert span["subtype"] == "azuretable"
assert span["action"] == "Update"
span = elasticapm_client.events[constants.SPAN][2]
assert span["name"] == "AzureTable Query {}(PartitionKey='tasksSeattle',RowKey='001')".format(table_name)
assert span["type"] == "storage"
assert span["subtype"] == "azuretable"
assert span["action"] == "Query"
span = elasticapm_client.events[constants.SPAN][3]
assert span["name"] == "AzureTable Delete {}(PartitionKey='tasksSeattle',RowKey='001')".format(table_name)
assert span["type"] == "storage"
assert span["subtype"] == "azuretable"
assert span["action"] == "Delete"
def test_fileshare(instrument, elasticapm_client, share_client):
elasticapm_client.begin_transaction("transaction.test")
# Upload this file to the share
file_client = share_client.get_file_client("testfile.txt")
with open(__file__, "rb") as data:
file_client.upload_file(data)
file_client.download_file()
file_client.delete_file()
elasticapm_client.end_transaction("MyView")
span = elasticapm_client.events[constants.SPAN][0]
assert span["name"] == "AzureFile Create {}/testfile.txt".format(share_client.share_name)
assert span["type"] == "storage"
assert span["subtype"] == "azurefile"
assert span["action"] == "Create"
span = elasticapm_client.events[constants.SPAN][1]
assert span["name"] == "AzureFile Upload {}/testfile.txt".format(share_client.share_name)
assert span["type"] == "storage"
assert span["subtype"] == "azurefile"
assert span["action"] == "Upload"
span = elasticapm_client.events[constants.SPAN][2]
assert span["name"] == "AzureFile Download {}/testfile.txt".format(share_client.share_name)
assert span["type"] == "storage"
assert span["subtype"] == "azurefile"
assert span["action"] == "Download"
span = elasticapm_client.events[constants.SPAN][3]
assert span["name"] == "AzureFile Delete {}/testfile.txt".format(share_client.share_name)
assert span["type"] == "storage"
assert span["subtype"] == "azurefile"
assert span["action"] == "Delete"
| [
"os.getenv",
"pytest.mark.skip",
"uuid.uuid4",
"azure.storage.blob.BlobServiceClient.from_connection_string",
"azure.cosmosdb.table.tableservice.TableService",
"pytest.importorskip",
"pytest.fixture",
"azure.storage.fileshare.ShareClient.from_connection_string",
"azure.storage.queue.QueueClient.from_connection_string"
] | [((1682, 1723), 'pytest.importorskip', 'pytest.importorskip', (['"""azure.storage.blob"""'], {}), "('azure.storage.blob')\n", (1701, 1723), False, 'import pytest\n'), ((1737, 1779), 'pytest.importorskip', 'pytest.importorskip', (['"""azure.storage.queue"""'], {}), "('azure.storage.queue')\n", (1756, 1779), False, 'import pytest\n'), ((1793, 1836), 'pytest.importorskip', 'pytest.importorskip', (['"""azure.cosmosdb.table"""'], {}), "('azure.cosmosdb.table')\n", (1812, 1836), False, 'import pytest\n'), ((1849, 1895), 'pytest.importorskip', 'pytest.importorskip', (['"""azure.storage.fileshare"""'], {}), "('azure.storage.fileshare')\n", (1868, 1895), False, 'import pytest\n'), ((2158, 2202), 'os.getenv', 'os.getenv', (['"""AZURE_STORAGE_CONNECTION_STRING"""'], {}), "('AZURE_STORAGE_CONNECTION_STRING')\n", (2167, 2202), False, 'import os\n'), ((2380, 2396), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2394, 2396), False, 'import pytest\n'), ((2669, 2685), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2683, 2685), False, 'import pytest\n'), ((2833, 2849), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2847, 2849), False, 'import pytest\n'), ((3107, 3123), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3121, 3123), False, 'import pytest\n'), ((3432, 3448), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3446, 3448), False, 'import pytest\n'), ((2739, 2798), 'azure.storage.blob.BlobServiceClient.from_connection_string', 'BlobServiceClient.from_connection_string', (['CONNECTION_STRING'], {}), '(CONNECTION_STRING)\n', (2779, 2798), False, 'from azure.storage.blob import BlobServiceClient\n'), ((2949, 3014), 'azure.storage.queue.QueueClient.from_connection_string', 'QueueClient.from_connection_string', (['CONNECTION_STRING', 'queue_name'], {}), '(CONNECTION_STRING, queue_name)\n', (2983, 3014), False, 'from azure.storage.queue import QueueClient\n'), ((3225, 3274), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', ([], {'connection_string': 'CONNECTION_STRING'}), '(connection_string=CONNECTION_STRING)\n', (3237, 3274), False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((3548, 3638), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', ([], {'conn_str': 'CONNECTION_STRING', 'share_name': 'share_name'}), '(conn_str=CONNECTION_STRING, share_name=\n share_name)\n', (3582, 3638), False, 'from azure.storage.fileshare import ShareClient\n'), ((7153, 7202), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', ([], {'connection_string': 'CONNECTION_STRING'}), '(connection_string=CONNECTION_STRING)\n', (7165, 7202), False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((2261, 2380), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Skipping azure storage tests, no AZURE_STORAGE_CONNECTION_STRING environment variable set"""'], {}), "(\n 'Skipping azure storage tests, no AZURE_STORAGE_CONNECTION_STRING environment variable set'\n )\n", (2277, 2380), False, 'import pytest\n'), ((4387, 4399), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4397, 4399), False, 'import uuid\n'), ((2490, 2502), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2500, 2502), False, 'import uuid\n'), ((2916, 2928), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2926, 2928), False, 'import uuid\n'), ((3187, 3199), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3197, 3199), False, 'import uuid\n'), ((3511, 3523), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3521, 3523), False, 'import uuid\n'), ((7115, 7127), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7125, 7127), False, 'import uuid\n')] |
# Standard imports
import configparser
import datetime
import logging
import shutil
import threading
import time
import os
# Non-standard imports
import icmplib
import wakeonlan
# Constellation imports
import config
class ExhibitComponent:
"""Holds basic data about a component in the exhibit"""
def __init__(self, id_, this_type, category='dynamic'):
# category='dynamic' for components that are connected over the network
# category='static' for components added from currentExhibitConfiguration.ini
self.id = id_
self.type = this_type
self.category = category
self.ip = "" # IP address of client
self.helperPort = 8000 # port of the localhost helper for this component DEPRECIATED
self.helperAddress = None # full IP and port of helper
self.macAddress = None # Added below if we have specified a Wake on LAN device
self.broadcastAddress = "255.255.255.255"
self.WOLPort = 9
self.last_contact_datetime = datetime.datetime.now()
self.lastInteractionDateTime = datetime.datetime(2020, 1, 1)
self.config = {"commands": [],
"allowed_actions": [],
"description": config.componentDescriptions.get(id_, ""),
"AnyDeskID": ""}
if category != "static":
self.update_configuration()
# Check if we have specified a Wake on LAN device matching this id
# If yes, subsume it into this component
wol = get_wake_on_LAN_component(self.id)
if wol is not None:
self.macAddress = wol.macAddress
if "power_on" not in self.config["allowed_actions"]:
self.config["allowed_actions"].append("power_on")
if "shutdown" not in self.config["allowed_actions"]:
self.config["allowed_actions"].append("power_off")
config.wakeOnLANList = [x for x in config.wakeOnLANList if x.id != wol.id]
def seconds_since_last_contact(self) -> float:
"""Return the number of seconds since a ping was received"""
diff = datetime.datetime.now() - self.last_contact_datetime
return diff.total_seconds()
def seconds_since_last_interaction(self) -> float:
"""Return the number of seconds since an interaction was recorded"""
diff = datetime.datetime.now() - self.lastInteractionDateTime
return diff.total_seconds()
def update_last_contact_datetime(self):
# We've received a new ping from this component, so update its
# last_contact_datetime
self.last_contact_datetime = datetime.datetime.now()
def update_last_interaction_datetime(self):
# We've received a new interaction ping, so update its
# lastInteractionDateTime
self.lastInteractionDateTime = datetime.datetime.now()
def current_status(self) -> str:
"""Return the current status of the component
Options: [OFFLINE, SYSTEM ON, ONLINE, ACTIVE, WAITING]
"""
if self.category == "static":
return "STATIC"
if self.seconds_since_last_contact() < 30:
if self.seconds_since_last_interaction() < 10:
status = "ACTIVE"
else:
status = "ONLINE"
elif self.seconds_since_last_contact() < 60:
status = "WAITING"
else:
# If we haven't heard from the component, we might still be able
# to ping the PC and see if it is alive
status = self.update_PC_status()
return status
def update_configuration(self):
"""Retrieve the latest configuration data from the configParser object"""
try:
file_config = dict(config.currentExhibitConfiguration.items(self.id))
for key in file_config:
if key == 'content':
self.config[key] = [s.strip() for s in file_config[key].split(",")]
elif key == "description":
pass # This is specified elsewhere
else:
self.config[key] = file_config[key]
except configparser.NoSectionError:
pass
# print(f"Warning: there is no configuration available for component with id={self.id}")
# with config.logLock:
# logging.warning(f"there is no configuration available for component with id={self.id}")
self.config["current_exhibit"] = config.currentExhibit[0:-8]
def queue_command(self, command):
"""Queue a command to be sent to the component on the next ping"""
if (command in ["power_on", "wakeDisplay"]) and (self.macAddress is not None):
self.wake_with_LAN()
else:
print(f"{self.id}: command queued: {command}")
self.config["commands"].append(command)
print(f"{self.id}: pending commands: {self.config['commands']}")
def wake_with_LAN(self):
# Function to send a magic packet waking the device
if self.macAddress is not None:
print(f"Sending wake on LAN packet to {self.id}")
with config.logLock:
logging.info(f"Sending wake on LAN packet to {self.id}")
try:
wakeonlan.send_magic_packet(self.macAddress,
ip_address=self.broadcastAddress,
port=self.WOLPort)
except ValueError as e:
print(f"Wake on LAN error for component {self.id}: {str(e)}")
with config.logLock:
logging.error(f"Wake on LAN error for component {self.id}: {str(e)}")
def update_PC_status(self):
"""If we have an IP address, ping the host to see if it is awake"""
status = "UNKNOWN"
if self.ip is not None:
try:
ping = icmplib.ping(self.ip, privileged=False, count=1, timeout=0.05)
if ping.is_alive:
status = "SYSTEM ON"
elif self.seconds_since_last_contact() > 60:
status = "OFFLINE"
else:
status = "WAITING"
except icmplib.exceptions.SocketPermissionError:
if "wakeOnLANPrivilege" not in config.serverWarningDict:
print(
"Warning: to check the status of Wake on LAN devices, you must run the control server with administrator privileges.")
with config.logLock:
logging.info(f"Need administrator privilege to check Wake on LAN status")
config.serverWarningDict["wakeOnLANPrivilege"] = True
return status
class WakeOnLANDevice:
"""Holds basic information about a wake on LAN device and facilitates waking it"""
def __init__(self, id_, mac_address, ip_address=None):
self.id = id_
self.type = "WAKE_ON_LAN"
self.macAddress = mac_address
self.broadcastAddress = "255.255.255.255"
self.port = 9
self.ip = ip_address
self.config = {"allowed_actions": ["power_on"],
"description": config.componentDescriptions.get(id_, "")}
self.state = {"status": "UNKNOWN"}
self.last_contact_datetime = datetime.datetime(2020, 1, 1)
def seconds_since_last_contact(self) -> float:
diff = datetime.datetime.now() - self.last_contact_datetime
return diff.total_seconds()
def queue_command(self, cmd):
"""Wrapper function to match other exhibit components"""
if cmd in ["power_on", "wakeDisplay"]:
self.wake()
def wake(self):
"""Function to send a magic packet waking the device"""
print(f"Sending wake on LAN packet to {self.id}")
with config.logLock:
logging.info(f"Sending wake on LAN packet to {self.id}")
try:
wakeonlan.send_magic_packet(self.macAddress,
ip_address=self.broadcastAddress,
port=self.port)
except ValueError as e:
print(f"Wake on LAN error for component {self.id}: {str(e)}")
with config.logLock:
logging.error(f"Wake on LAN error for component {self.id}: {str(e)}")
def update(self):
"""If we have an IP address, ping the host to see if it is awake"""
if self.ip is not None:
try:
ping = icmplib.ping(self.ip, privileged=False, count=1)
if ping.is_alive:
self.state["status"] = "SYSTEM ON"
self.last_contact_datetime = datetime.datetime.now()
elif self.seconds_since_last_contact() > 60:
self.state["status"] = "OFFLINE"
except icmplib.exceptions.SocketPermissionError:
if "wakeOnLANPrivilege" not in config.serverWarningDict:
print(
"Warning: to check the status of Wake on LAN devices, you must run the control server with administrator privileges.")
with config.logLock:
logging.info(f"Need administrator privilege to check Wake on LAN status")
config.serverWarningDict["wakeOnLANPrivilege"] = True
else:
self.state["status"] = "UNKNOWN"
def add_exhibit_component(this_id, this_type, category="dynamic") -> ExhibitComponent:
"""Create a new ExhibitComponent, add it to the config.componentList, and return it"""
component = ExhibitComponent(this_id, this_type, category)
config.componentList.append(component)
return component
def check_available_exhibits():
"""Get a list of available "*.exhibit" configuration files"""
config.exhibit_list = []
exhibits_path = os.path.join(config.APP_PATH, "exhibits")
with config.exhibitsLock:
for file in os.listdir(exhibits_path):
if file.lower().endswith(".exhibit"):
config.exhibit_list.append(file)
def command_all_exhibit_components(cmd):
"""Queue a command for every exhibit component"""
print("Sending command to all components:", cmd)
with config.logLock:
logging.info("command_all_exhibit_components: %s", cmd)
for component in config.componentList:
component.queue_command(cmd)
for projector in config.projectorList:
projector.queue_command(cmd)
for device in config.wakeOnLANList:
device.queue_command(cmd)
def create_new_exhibit(name, clone):
"""Create a new exhibit file
Set clone=None to create a new file, or set it equal to the name of an
existing exhibit to clone that exhibit."""
# Make sure we have the proper extension
if not name.lower().endswith(".exhibit"):
name += ".exhibit"
new_file = os.path.join(config.APP_PATH, "exhibits", name)
if clone is not None:
# Copy an existing file
# Make sure we have the proper extension on the file we're copying from
if not clone.lower().endswith(".exhibit"):
clone += ".exhibit"
existing_file = os.path.join(config.APP_PATH, "exhibits", clone)
shutil.copyfile(existing_file, new_file)
else:
# Make a new file
with config.exhibitsLock:
if not os.path.isfile(new_file):
# If this file does not exist, touch it so that it does.
with open(new_file, "w", encoding='UTF-8'):
pass
check_available_exhibits()
def delete_exhibit(name):
"""Delete the specified exhibit file"""
# Make sure we have the proper extension
if not name.lower().endswith(".exhibit"):
name += ".exhibit"
file_to_delete = os.path.join(config.APP_PATH, "exhibits", name)
with config.exhibitsLock:
try:
os.remove(file_to_delete)
except FileNotFoundError:
print(f"Error: Unable to delete exhibit {file_to_delete}. File not found!")
check_available_exhibits()
def get_exhibit_component(this_id) -> ExhibitComponent:
"""Return a component with the given id, or None if no such component exists"""
return next((x for x in config.componentList if x.id == this_id), None)
def get_wake_on_LAN_component(this_id) -> WakeOnLANDevice:
"""Return a WakeOnLan device with the given id, or None if no such component exists"""
return next((x for x in config.wakeOnLANList if x.id == this_id), None)
def poll_wake_on_LAN_devices():
"""Ask every Wake on LAN device to report its status at an interval.
"""
for device in config.wakeOnLANList:
new_thread = threading.Thread(target=device.update)
new_thread.daemon = True # So it dies if we exit
new_thread.start()
config.polling_thread_dict["poll_wake_on_LAN_devices"] = threading.Timer(30, poll_wake_on_LAN_devices)
config.polling_thread_dict["poll_wake_on_LAN_devices"].start()
def read_exhibit_configuration(name, updateDefault=False):
# We want the format of name to be "XXXX.exhibit", but it might be
# "exhibits/XXXX.exhibit"
error = False
split_path = os.path.split(name)
if len(split_path) == 2:
if split_path[0] == "exhibits":
name = split_path[1]
elif split_path[0] == "":
pass
else:
error = True
else:
error = True
if error:
# Something bad has happened. Display an error and bail out
print(
f"Error: exhibit definition with name {name} does not appear to be properly formatted. This file should be located in the exhibits directory.")
with config.logLock:
logging.error('Bad exhibit definition filename: %s', name)
return
config.currentExhibit = name
config.currentExhibitConfiguration = configparser.ConfigParser()
exhibit_path = os.path.join(config.APP_PATH, "exhibits")
config.currentExhibitConfiguration.read(exhibit_path)
if updateDefault:
configReader = configparser.ConfigParser(delimiters="=")
configReader.optionxform = str # Override default, which is case in-sensitive
cEC_path = os.path.join(config.APP_PATH,
'currentExhibitConfiguration.ini')
with config.galleryConfigurationLock:
configReader.read(cEC_path)
configReader.set("CURRENT", "current_exhibit", name)
with open(cEC_path, "w", encoding="UTF-8") as f:
configReader.write(f)
def set_component_content(id_, content_list):
"""Loop the content list and build a string to write to the config file"""
content = ", ".join(content_list)
with config.galleryConfigurationLock:
try:
config.currentExhibitConfiguration.set(id_, "content", content)
except configparser.NoSectionError: # This exhibit does not have content for this component
config.currentExhibitConfiguration.add_section(id_)
config.currentExhibitConfiguration.set(id_, "content", content)
# Update the component
get_exhibit_component(id_).update_configuration()
# Write new configuration to file
with config.galleryConfigurationLock:
with open(os.path.join(config.APP_PATH, "exhibits", config.currentExhibit),
'w', encoding="UTF-8") as f:
config.currentExhibitConfiguration.write(f)
def update_synchronization_list(this_id, other_ids):
"""Manage synchronization between components.
config.synchronizationList is a list of dictionaries, with one dictionary for every
set of synchronized components.
"""
print(f"Received sync request from {this_id} to sync with {other_ids}")
print(f"Current synchronizationList: {config.synchronizationList}")
id_known = False
index = 0
match_index = -1
for item in config.synchronizationList:
if this_id in item["ids"]:
id_known = True
match_index = index
index += 1
if id_known is False:
# Create a new dictionary
temp = {"ids": [this_id] + other_ids}
temp["checked_in"] = [False for _ in temp["ids"]]
(temp["checked_in"])[0] = True # Check in the current id
config.synchronizationList.append(temp)
else:
index = (config.synchronizationList[match_index])["ids"].index(this_id)
((config.synchronizationList[match_index])["checked_in"])[index] = True
if all((config.synchronizationList[match_index])["checked_in"]):
print("All components have checked in. Dispatching sync command")
time_to_start = str(round(time.time() * 1000) + 10000)
for item in (config.synchronizationList[match_index])["ids"]:
get_exhibit_component(item).queue_command(f"beginSynchronization_{time_to_start}")
# Remove this sync from the list in case it happens again later.
config.synchronizationList.pop(match_index)
def update_exhibit_component_status(data, ip):
"""Update an ExhibitComponent with the values in a dictionary."""
this_id = data["id"]
this_type = data["type"]
component = get_exhibit_component(this_id)
if component is None: # This is a new id, so make the component
component = add_exhibit_component(this_id, this_type)
component.ip = ip
if "helperPort" in data:
component.helperPort = data["helperPort"]
if "helperAddress" in data:
component.helperAddress = data["helperAddress"]
component.update_last_contact_datetime()
if "AnyDeskID" in data:
component.config["AnyDeskID"] = data["AnyDeskID"]
if "currentInteraction" in data:
if data["currentInteraction"].lower() == "true":
component.update_last_interaction_datetime()
if "allowed_actions" in data:
allowed_actions = data["allowed_actions"]
for key in allowed_actions:
if allowed_actions[key].lower() in ["true", "yes", "1"]:
if key not in component.config["allowed_actions"]:
component.config["allowed_actions"].append(key)
else:
component.config["allowed_actions"] = [x for x in component.config["allowed_actions"] if x != key]
if "error" in data:
component.config["error"] = data["error"]
else:
if "error" in component.config:
component.config.pop("error")
# Set up log file
log_path = os.path.join(config.APP_PATH, "control_server.log")
logging.basicConfig(datefmt='%Y-%m-%d %H:%M:%S',
filename=log_path,
format='%(levelname)s, %(asctime)s, %(message)s',
level=logging.DEBUG)
| [
"configparser.ConfigParser",
"config.componentList.append",
"config.synchronizationList.append",
"logging.info",
"config.currentExhibitConfiguration.set",
"logging.error",
"os.remove",
"datetime.datetime",
"os.listdir",
"config.exhibit_list.append",
"os.path.split",
"config.currentExhibitConfiguration.write",
"wakeonlan.send_magic_packet",
"config.currentExhibitConfiguration.read",
"config.synchronizationList.pop",
"threading.Timer",
"os.path.isfile",
"shutil.copyfile",
"time.time",
"logging.basicConfig",
"config.currentExhibitConfiguration.add_section",
"os.path.join",
"datetime.datetime.now",
"config.componentDescriptions.get",
"threading.Thread",
"icmplib.ping",
"config.currentExhibitConfiguration.items"
] | [((18591, 18642), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""control_server.log"""'], {}), "(config.APP_PATH, 'control_server.log')\n", (18603, 18642), False, 'import os\n'), ((18643, 18786), 'logging.basicConfig', 'logging.basicConfig', ([], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'filename': 'log_path', 'format': '"""%(levelname)s, %(asctime)s, %(message)s"""', 'level': 'logging.DEBUG'}), "(datefmt='%Y-%m-%d %H:%M:%S', filename=log_path, format=\n '%(levelname)s, %(asctime)s, %(message)s', level=logging.DEBUG)\n", (18662, 18786), False, 'import logging\n'), ((9719, 9757), 'config.componentList.append', 'config.componentList.append', (['component'], {}), '(component)\n', (9746, 9757), False, 'import config\n'), ((9930, 9971), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""exhibits"""'], {}), "(config.APP_PATH, 'exhibits')\n", (9942, 9971), False, 'import os\n'), ((10956, 11003), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""exhibits"""', 'name'], {}), "(config.APP_PATH, 'exhibits', name)\n", (10968, 11003), False, 'import os\n'), ((11868, 11915), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""exhibits"""', 'name'], {}), "(config.APP_PATH, 'exhibits', name)\n", (11880, 11915), False, 'import os\n'), ((12963, 13008), 'threading.Timer', 'threading.Timer', (['(30)', 'poll_wake_on_LAN_devices'], {}), '(30, poll_wake_on_LAN_devices)\n', (12978, 13008), False, 'import threading\n'), ((13274, 13293), 'os.path.split', 'os.path.split', (['name'], {}), '(name)\n', (13287, 13293), False, 'import os\n'), ((13961, 13988), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (13986, 13988), False, 'import configparser\n'), ((14008, 14049), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""exhibits"""'], {}), "(config.APP_PATH, 'exhibits')\n", (14020, 14049), False, 'import os\n'), ((14054, 14107), 'config.currentExhibitConfiguration.read', 'config.currentExhibitConfiguration.read', (['exhibit_path'], {}), '(exhibit_path)\n', (14093, 14107), False, 'import config\n'), ((1023, 1046), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1044, 1046), False, 'import datetime\n'), ((1086, 1115), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (1103, 1115), False, 'import datetime\n'), ((2649, 2672), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2670, 2672), False, 'import datetime\n'), ((2860, 2883), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2881, 2883), False, 'import datetime\n'), ((7369, 7398), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (7386, 7398), False, 'import datetime\n'), ((10023, 10048), 'os.listdir', 'os.listdir', (['exhibits_path'], {}), '(exhibits_path)\n', (10033, 10048), False, 'import os\n'), ((10333, 10388), 'logging.info', 'logging.info', (['"""command_all_exhibit_components: %s"""', 'cmd'], {}), "('command_all_exhibit_components: %s', cmd)\n", (10345, 10388), False, 'import logging\n'), ((11251, 11299), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""exhibits"""', 'clone'], {}), "(config.APP_PATH, 'exhibits', clone)\n", (11263, 11299), False, 'import os\n'), ((11308, 11348), 'shutil.copyfile', 'shutil.copyfile', (['existing_file', 'new_file'], {}), '(existing_file, new_file)\n', (11323, 11348), False, 'import shutil\n'), ((12777, 12815), 'threading.Thread', 'threading.Thread', ([], {'target': 'device.update'}), '(target=device.update)\n', (12793, 12815), False, 'import threading\n'), ((14154, 14195), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'delimiters': '"""="""'}), "(delimiters='=')\n", (14179, 14195), False, 'import configparser\n'), ((14302, 14366), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""currentExhibitConfiguration.ini"""'], {}), "(config.APP_PATH, 'currentExhibitConfiguration.ini')\n", (14314, 14366), False, 'import os\n'), ((16378, 16417), 'config.synchronizationList.append', 'config.synchronizationList.append', (['temp'], {}), '(temp)\n', (16411, 16417), False, 'import config\n'), ((1240, 1281), 'config.componentDescriptions.get', 'config.componentDescriptions.get', (['id_', '""""""'], {}), "(id_, '')\n", (1272, 1281), False, 'import config\n'), ((2132, 2155), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2153, 2155), False, 'import datetime\n'), ((2371, 2394), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2392, 2394), False, 'import datetime\n'), ((7245, 7286), 'config.componentDescriptions.get', 'config.componentDescriptions.get', (['id_', '""""""'], {}), "(id_, '')\n", (7277, 7286), False, 'import config\n'), ((7467, 7490), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7488, 7490), False, 'import datetime\n'), ((7915, 7971), 'logging.info', 'logging.info', (['f"""Sending wake on LAN packet to {self.id}"""'], {}), "(f'Sending wake on LAN packet to {self.id}')\n", (7927, 7971), False, 'import logging\n'), ((7997, 8096), 'wakeonlan.send_magic_packet', 'wakeonlan.send_magic_packet', (['self.macAddress'], {'ip_address': 'self.broadcastAddress', 'port': 'self.port'}), '(self.macAddress, ip_address=self.\n broadcastAddress, port=self.port)\n', (8024, 8096), False, 'import wakeonlan\n'), ((11972, 11997), 'os.remove', 'os.remove', (['file_to_delete'], {}), '(file_to_delete)\n', (11981, 11997), False, 'import os\n'), ((13812, 13870), 'logging.error', 'logging.error', (['"""Bad exhibit definition filename: %s"""', 'name'], {}), "('Bad exhibit definition filename: %s', name)\n", (13825, 13870), False, 'import logging\n'), ((14883, 14946), 'config.currentExhibitConfiguration.set', 'config.currentExhibitConfiguration.set', (['id_', '"""content"""', 'content'], {}), "(id_, 'content', content)\n", (14921, 14946), False, 'import config\n'), ((15494, 15537), 'config.currentExhibitConfiguration.write', 'config.currentExhibitConfiguration.write', (['f'], {}), '(f)\n', (15534, 15537), False, 'import config\n'), ((17068, 17111), 'config.synchronizationList.pop', 'config.synchronizationList.pop', (['match_index'], {}), '(match_index)\n', (17098, 17111), False, 'import config\n'), ((3776, 3825), 'config.currentExhibitConfiguration.items', 'config.currentExhibitConfiguration.items', (['self.id'], {}), '(self.id)\n', (3816, 3825), False, 'import config\n'), ((5219, 5275), 'logging.info', 'logging.info', (['f"""Sending wake on LAN packet to {self.id}"""'], {}), "(f'Sending wake on LAN packet to {self.id}')\n", (5231, 5275), False, 'import logging\n'), ((5309, 5411), 'wakeonlan.send_magic_packet', 'wakeonlan.send_magic_packet', (['self.macAddress'], {'ip_address': 'self.broadcastAddress', 'port': 'self.WOLPort'}), '(self.macAddress, ip_address=self.\n broadcastAddress, port=self.WOLPort)\n', (5336, 5411), False, 'import wakeonlan\n'), ((5945, 6007), 'icmplib.ping', 'icmplib.ping', (['self.ip'], {'privileged': '(False)', 'count': '(1)', 'timeout': '(0.05)'}), '(self.ip, privileged=False, count=1, timeout=0.05)\n', (5957, 6007), False, 'import icmplib\n'), ((8570, 8618), 'icmplib.ping', 'icmplib.ping', (['self.ip'], {'privileged': '(False)', 'count': '(1)'}), '(self.ip, privileged=False, count=1)\n', (8582, 8618), False, 'import icmplib\n'), ((10116, 10148), 'config.exhibit_list.append', 'config.exhibit_list.append', (['file'], {}), '(file)\n', (10142, 10148), False, 'import config\n'), ((11439, 11463), 'os.path.isfile', 'os.path.isfile', (['new_file'], {}), '(new_file)\n', (11453, 11463), False, 'import os\n'), ((15060, 15111), 'config.currentExhibitConfiguration.add_section', 'config.currentExhibitConfiguration.add_section', (['id_'], {}), '(id_)\n', (15106, 15111), False, 'import config\n'), ((15124, 15187), 'config.currentExhibitConfiguration.set', 'config.currentExhibitConfiguration.set', (['id_', '"""content"""', 'content'], {}), "(id_, 'content', content)\n", (15162, 15187), False, 'import config\n'), ((15369, 15433), 'os.path.join', 'os.path.join', (['config.APP_PATH', '"""exhibits"""', 'config.currentExhibit'], {}), "(config.APP_PATH, 'exhibits', config.currentExhibit)\n", (15381, 15433), False, 'import os\n'), ((8757, 8780), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8778, 8780), False, 'import datetime\n'), ((6613, 6686), 'logging.info', 'logging.info', (['f"""Need administrator privilege to check Wake on LAN status"""'], {}), "(f'Need administrator privilege to check Wake on LAN status')\n", (6625, 6686), False, 'import logging\n'), ((9264, 9337), 'logging.info', 'logging.info', (['f"""Need administrator privilege to check Wake on LAN status"""'], {}), "(f'Need administrator privilege to check Wake on LAN status')\n", (9276, 9337), False, 'import logging\n'), ((16777, 16788), 'time.time', 'time.time', ([], {}), '()\n', (16786, 16788), False, 'import time\n')] |
import math
def h(x, N):
return math.log(x/N)*x/N
def gmd(R, S, fm, fs):
"""
Args:
R (list of list): the partition, output of the entity resolution that we want to evaluate
S (list of list): the gold standard
fm(x,y) -> int (function): cost of merging a group of size x with another group of size y
fs(x,y) -> int (function): cost of splitting a group into 2 groups of respective sizes x and y
Returns:
double : the generalized merge distance between R and S
"""
# build a map M from record to cluster number
# store sizes of each cluster in Rsizes
Rsizes = {}
M = {}
for i, group in enumerate(R):
for r, rec in enumerate(group):
M[rec] = i
Rsizes[i] = len(group)
split_cost = 0
merge_cost = 0
for i, group in enumerate(S):
# determine which clusters in R contain the records in group i
pMap = {}
for r, rec in enumerate(group):
# if we haven't seen the R cluster corresponding to this element we add it to the map
try:
M[rec]
except KeyError as err:
raise KeyError(
'The element of R : {} isn\'t present in S. Check that you did reconcile R and S'.format(err))
if M[rec] not in pMap:
pMap[M[rec]] = 0
# increment the count for this partition
pMap[M[rec]] += 1
# compute cost to generate group i of S
totalRecs = 0
s_cost = 0
m_cost = 0
for i, count in pMap.items():
if Rsizes[i] > count:
# add the cost to split R[i]
s_cost += fs(count, Rsizes[i] - count)
Rsizes[i] -= count
if totalRecs != 0:
# cost to merge into S[i]
m_cost += fm(count, totalRecs)
totalRecs += count
split_cost += s_cost
merge_cost += m_cost
return split_cost + merge_cost
def check_r_s_same_size(R, S):
"""
Raises an exception if R and S have different number of distinct elements
Args:
R (list of list): the gold standard
S (list of list): the partition, output of the entity resolution that we want to evaluate
"""
R_set = set()
S_set = set()
for i, val in enumerate(R):
for j, rec in enumerate(val):
R_set.add(rec)
for i, val in enumerate(S):
for j, rec in enumerate(val):
S_set.add(rec)
if len(R_set) != len(S_set):
raise ValueError("R and S have different numbers of distincts records : R has", len(
R_set), "distinct records while S has", len(S_set), "distinct records")
def to_set(list_of_list):
"""
Transforms a list of list into a set
[[0,1],[2]] -> {0,1,2}
"""
_set = set()
for list_ in list_of_list:
for element in list_:
_set.add(element)
return _set
def splitted(_set):
""" Transforms a set {0,1,2} into a list of singletons [[0],[1],[2]]"""
splitted = []
for el in _set:
splitted.append([el])
return splitted
def evaluate(R, S, metric):
"""
Evaluate R against S on metric
Args:
R (list of list): the entity resolution we want to evaluate
S (list of list): the gold standard
metric (str) : the metric we want to use to evaluate : bmd, precision, recall, variation_of_information, f1
Returns:
double : the value of the metric
"""
check_r_s_same_size(R,S)
S_set = to_set(S)
N = len(S_set)
metrics_cost = {
'bmd': {
'merge_cost': lambda x, y: 1,
'split_cost': lambda x, y: 1},
'precision': {
'merge_cost': lambda x, y: 0,
'split_cost': lambda x, y: x*y},
'recall': {
'merge_cost': lambda x, y: x*y,
'split_cost': lambda x, y: 0},
'variation_of_information': {
'merge_cost': lambda x, y: h(x+y, N) - h(x, N) - h(y, N),
'split_cost': lambda x, y: h(x+y, N) - h(x, N) - h(y, N)},
'f1':{}
}
if metric not in metrics_cost:
raise ValueError('{} is not part of the possible metrics : {}'.format(
metric, metrics_cost.keys()))
direct_metrics = {'bmd', 'variation_of_information'}
if metric in direct_metrics:
return gmd(R, S, metrics_cost[metric]['merge_cost'], metrics_cost[metric]['split_cost'])
if metric in ('precision','f1'):
S_splitted = splitted(S_set)
distance_to_splitted = gmd(
R, S_splitted, metrics_cost['precision']['merge_cost'], metrics_cost['precision']['split_cost'])
if distance_to_splitted == 0:
return 0
precision = 1 - gmd(R, S, metrics_cost['precision']['merge_cost'], metrics_cost['precision']['split_cost']) / distance_to_splitted
value = precision
if metric in ('recall','f1'):
S_splitted = splitted(S_set)
distance_from_splitted = gmd(
S_splitted, S, metrics_cost['recall']['merge_cost'], metrics_cost['recall']['split_cost'])
if distance_from_splitted == 0:
return 0
recall = 1 - gmd(R, S, metrics_cost['recall']['merge_cost'], metrics_cost['recall']['split_cost']) / distance_from_splitted
value = recall
if metric == 'f1':
return 2 * precision * recall / (precision + recall)
else:
return value
def worst_entities(R,S,kind):
"""
Args:
R (list of list): the entity resolution we want to evaluate
S (list of list): the gold standard
kind (str) : the type of worst entities we want, e.g. "glued" entities or "broken" entities
Returns:
list : list of worst entities. R entities in the "glued" case, S entities in the "broken" case
dictionary : "glued" : R entity -> set of S entities that the glued R entity contains
"broken" : S entity -> set of R entities that the broken S entity contains
"""
if kind == "glued":
dic_s = {}
for i,entity in enumerate(S):
for r, rec in enumerate(entity):
dic_s[rec] = i
dic_r_s = {}
for i,entity in enumerate(R):
_set = set()
for r,rec in enumerate(entity):
_set.add(dic_s[rec])
dic_r_s[i] = _set
dic_r_s_count = {key:len(_set) for key,_set in dic_r_s.items()}
sorted_list = sorted(dic_r_s_count,key = dic_r_s_count.get, reverse = True)
return sorted_list[:100], dic_r_s
if kind == "broken":
dic_r = {}
for i,entity in enumerate(R):
for r, rec in enumerate(entity):
dic_r[rec] = i
dic_s_r = {}
for i,entity in enumerate(S):
_set = set()
for r,rec in enumerate(entity):
_set.add(dic_r[rec])
dic_s_r[i] = _set
dic_s_r_count = {key:len(_set) for key,_set in dic_s_r.items()}
sorted_list = sorted(dic_s_r_count,key = dic_s_r_count.get, reverse = True)
return sorted_list[:100], dic_s_r
| [
"math.log"
] | [((40, 55), 'math.log', 'math.log', (['(x / N)'], {}), '(x / N)\n', (48, 55), False, 'import math\n')] |
from secml.adv.attacks.evasion.foolbox.tests.c_attack_evasion_foolbox_testcases import CAttackEvasionFoolboxTestCases
try:
import foolbox
import torch
except ImportError:
CAttackEvasionFoolboxTestCases.importskip("foolbox")
from secml.adv.attacks.evasion.foolbox.fb_attacks.fb_ead_attack import CFoolboxEAD
class TestCAttackEvasionFoolboxEAD(CAttackEvasionFoolboxTestCases):
"""Unit test for CAttackEvasionFoolboxDDN"""
make_figures = False # Set as True to produce figures
def setUp(self):
super(TestCAttackEvasionFoolboxEAD, self).setUp()
self.attack_class = CFoolboxEAD
self.attack_params = {'steps': self.default_steps, 'binary_search_steps': 9,
'confidence': 0.1, 'initial_stepsize': 1e-1,
'epsilons': None, 'abort_early': False}
self.has_targeted = True
self.has_untargeted = True
def test_run_targeted(self):
self._test_run_targeted()
def test_run_untargeted(self):
self._test_run_untargeted()
def test_check_foolbox_equal_targeted(self):
self._test_check_foolbox_equal_targeted()
def test_check_foolbox_equal_untargeted(self):
self._test_check_foolbox_equal_untargeted()
def test_shapes(self):
self._test_shapes()
| [
"secml.adv.attacks.evasion.foolbox.tests.c_attack_evasion_foolbox_testcases.CAttackEvasionFoolboxTestCases.importskip"
] | [((184, 236), 'secml.adv.attacks.evasion.foolbox.tests.c_attack_evasion_foolbox_testcases.CAttackEvasionFoolboxTestCases.importskip', 'CAttackEvasionFoolboxTestCases.importskip', (['"""foolbox"""'], {}), "('foolbox')\n", (225, 236), False, 'from secml.adv.attacks.evasion.foolbox.tests.c_attack_evasion_foolbox_testcases import CAttackEvasionFoolboxTestCases\n')] |
import pathlib
from googleapiclient.http import MediaFileUpload
from parus.config import get_default_folder_id
from parus.google import build_google_drive_service
def upload_to_drive(file, credentials, name=None, mime=None, folder_id=None):
p = pathlib.Path(file)
folder = folder_id or get_default_folder_id()
meta = {'name': name or p.name}
if folder is not None:
meta.update(parents=[folder])
drive_service = build_google_drive_service(credentials)
with drive_service.files() as drive_files:
media = MediaFileUpload(p, mimetype=mime, resumable=True)
uploaded = drive_files.create(body=meta, media_body=media, fields='id, name').execute()
print('Upload completed: %s (id: %s)' % (uploaded.get('name'), uploaded.get('id')))
| [
"parus.google.build_google_drive_service",
"parus.config.get_default_folder_id",
"googleapiclient.http.MediaFileUpload",
"pathlib.Path"
] | [((252, 270), 'pathlib.Path', 'pathlib.Path', (['file'], {}), '(file)\n', (264, 270), False, 'import pathlib\n'), ((444, 483), 'parus.google.build_google_drive_service', 'build_google_drive_service', (['credentials'], {}), '(credentials)\n', (470, 483), False, 'from parus.google import build_google_drive_service\n'), ((297, 320), 'parus.config.get_default_folder_id', 'get_default_folder_id', ([], {}), '()\n', (318, 320), False, 'from parus.config import get_default_folder_id\n'), ((547, 596), 'googleapiclient.http.MediaFileUpload', 'MediaFileUpload', (['p'], {'mimetype': 'mime', 'resumable': '(True)'}), '(p, mimetype=mime, resumable=True)\n', (562, 596), False, 'from googleapiclient.http import MediaFileUpload\n')] |
import sys
import os
import click
from ..version import __version__
from .run import create_run_cli_group
from .pipeline import create_pipeline_cli_group
def create_dagster_cli():
@click.group(commands={'pipeline': create_pipeline_cli_group(), 'run': create_run_cli_group()})
@click.version_option(version=__version__)
def group():
'Noop'
# add the path for the cwd so imports in dynamically loaded code work correctly
sys.path.append(os.getcwd())
return group
def main():
cli = create_dagster_cli()
cli(obj={}) # pylint:disable=E1123
| [
"click.version_option",
"os.getcwd"
] | [((288, 329), 'click.version_option', 'click.version_option', ([], {'version': '__version__'}), '(version=__version__)\n', (308, 329), False, 'import click\n'), ((467, 478), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (476, 478), False, 'import os\n')] |
from PyQt5 import QtWidgets as QtW
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt
from .widgets.time_bar_widget import TimeBarWidget
from datetime import datetime
from threading import Thread
from time import sleep
from math import ceil
from settings import Settings
from .new_flight_window import NewFlightWindow
from .pending_flights_window import PendingFlightsWindow
from .close_confirmation_window import CloseConfirmationWindow
from .total_stats_window import TotalStatsWindow
from database import FlightStatistics
from openpyxl import load_workbook
class MainWindow(QtW.QWidget):
def __init__(self):
super().__init__()
self.__layout = QtW.QGridLayout()
self.__saved_flights_layout = QtW.QGridLayout()
self.__group_box = QtW.QGroupBox()
self.__clock = QtW.QLabel('00:00')
self.__clock.setProperty('color', 'color_vlight')
self.__layout.addWidget(self.__clock, 0, 0)
self.__row_count = 0
for i in range(24):
self.__layout.addWidget(QtW.QLabel(f'{i}:00'), 0, i+1)
self.render_flights()
self.__label_status = QtW.QLabel('READY')
self.__label_status.setProperty('color', 'color_green')
self.__layout.addWidget(self.__label_status, 0, 25)
self.__group_box.setLayout(self.__saved_flights_layout)
self.__scroll_area = QtW.QScrollArea()
self.__scroll_area.setWidget(self.__group_box)
self.__scroll_area.setWidgetResizable(True)
self.__scroll_area.setFixedHeight(400)
self.__layout.addWidget(self.__scroll_area, 1, 0, 1, 26)
button_new_flight = QtW.QPushButton('New flight', self)
button_show_pending = QtW.QPushButton('Show pending', self)
button_stats = QtW.QPushButton('Show total', self)
button_import = QtW.QPushButton('Import', self)
button_import.pressed.connect(self.__import_data)
button_new_flight.setCursor(QCursor(Qt.PointingHandCursor))
button_show_pending.setCursor(QCursor(Qt.PointingHandCursor))
button_stats.setCursor(QCursor(Qt.PointingHandCursor))
button_import.setCursor(QCursor(Qt.PointingHandCursor))
self.__layout.addWidget(button_new_flight, 2, 0, 1, 3)
self.__layout.addWidget(button_show_pending, 2, 3, 1, 3)
self.__layout.addWidget(button_stats, 2, 6, 1, 3)
self.__layout.addWidget(button_import, 2, 9, 1, 3)
label_copyright = QtW.QLabel(f'© {datetime.now().year} osx11')
label_copyright.setProperty('color', 'color_vlight')
self.__layout.addWidget(label_copyright, 2, 24, 1, 2)
Thread(target=self.__update_clock, daemon=True).start()
self.__new_flight_window = NewFlightWindow()
button_new_flight.clicked.connect(self.__new_flight_window.show)
self.__pending_flights_window = PendingFlightsWindow(self)
button_show_pending.clicked.connect(self.__show_pending_flights_window)
self.__total_stats_window = TotalStatsWindow()
button_stats.clicked.connect(self.__show_total_stats_window)
self.__close_confirmation_window = CloseConfirmationWindow(self)
self.setFixedSize(1300, 505)
self.setWindowTitle('Flight Statistics')
self.setLayout(self.__layout)
self.setStyleSheet(Settings().style)
self.show()
def set_status(self, status, color='color_green'):
self.__label_status.setText(status)
self.__label_status.setProperty('color', color)
self.__label_status.style().polish(self.__label_status)
def __clear_saved_flights_layout(self):
for i in reversed(range(self.__saved_flights_layout.count())):
self.__saved_flights_layout.itemAt(i).widget().setParent(None)
# this have to be filled with empty labels otherwise timebars will be displayed incorrectly
for i in range(25):
self.__saved_flights_layout.addWidget(QtW.QLabel(''), 0, i)
def render_flights(self):
self.__clear_saved_flights_layout()
query = (FlightStatistics
.select()
.where(FlightStatistics.actual_arrival_time != None))
self.__row_count = query.count()
previous_pos_was_nextday = False
pos = 0
for flight in query:
departure_date = flight.scheduled_departure_date[:5]
arrival_date = flight.actual_arrival_date[:5]
actual_departure_hour = int(flight.actual_departure_time[:2])
actual_arrival_hour = int(flight.actual_arrival_time[:2])
flight_time = ceil(float(flight.flight_time[:2]) + float(flight.flight_time[-2:])/60)
if actual_departure_hour == 0:
actual_departure_hour = 1
if actual_arrival_hour == 0:
actual_arrival_hour += 1
if flight_time == 0:
flight_time = 1
arrived_next_day = arrival_date > departure_date
no_distance = flight.distance == 0
timebar = TimeBarWidget(self,
f'{flight.departure_icao}-{flight.arrival_icao}',
flight_time,
flight.id,
no_distance=no_distance)
timebar_nextday = TimeBarWidget(self,
f'{flight.departure_icao}-{flight.arrival_icao}',
flight_time, flight.id,
is_next_day=True)
if not arrived_next_day:
if previous_pos_was_nextday:
pos += 1
previous_pos_was_nextday = False
self.__saved_flights_layout.addWidget(QtW.QLabel(departure_date), pos, 0)
self.__saved_flights_layout.addWidget(timebar, pos, actual_departure_hour, 1, flight_time)
else:
previous_pos_was_nextday = True
self.__saved_flights_layout.addWidget(QtW.QLabel(departure_date), pos, 0)
self.__saved_flights_layout.addWidget(QtW.QLabel(arrival_date), pos+1, 0)
self.__saved_flights_layout.addWidget(timebar, pos, actual_departure_hour, 1, (24-actual_departure_hour))
self.__saved_flights_layout.addWidget(timebar_nextday, pos+1, 1, 1, actual_arrival_hour)
pos += 1
def __update_clock(self):
while True:
now = datetime.utcnow()
hour = now.hour
if hour < 10:
hour = f'0{hour}'
minute = now.minute
if minute < 10:
minute = f'0{minute}'
remaining = 60 - now.second
self.__clock.setText(f'{hour}:{minute}Z')
sleep(remaining)
def __show_pending_flights_window(self):
self.__pending_flights_window.update_flight_schedule()
self.__pending_flights_window.show()
def __show_total_stats_window(self):
self.__total_stats_window.update_statistics()
self.__total_stats_window.show()
def __import_data(self):
file_url = QtW.QFileDialog().getOpenFileName()[0]
if not file_url:
return
workbook = load_workbook(file_url)
sheet = workbook.active
i = 6
while dep_city := sheet[f'A{i}'].value:
dep_icao = sheet[f'B{i}'].value
dep_dt = sheet[f'C{i}'].value
arr_city = sheet[f'D{i}'].value
arr_icao = sheet[f'E{i}'].value
arr_dt = sheet[F'F{i}'].value
aircraft = sheet[f'H{i}'].value
dist = sheet[f'I{i}'].value
flight_time = sheet[f'L{i}'].value
FlightStatistics.create(flight_number='OSX11',
scheduled_departure_date=dep_dt.strftime('%d.%m.%y'),
scheduled_departure_time=dep_dt.strftime('%H:%M'),
actual_arrival_date=arr_dt.strftime('%d.%m.%y'),
actual_departure_time=dep_dt.strftime('%H:%M'),
actual_arrival_time=arr_dt.strftime('%H:%M'),
aircraft=aircraft,
departure_icao=dep_icao,
arrival_icao=arr_icao,
departure_city=dep_city,
arrival_city=arr_city,
flight_time=flight_time.strftime('%H:%M'),
distance=dist)
i += 1
self.render_flights()
def closeEvent(self, event):
if FlightStatistics.has_opened_flight():
self.__close_confirmation_window.show()
event.ignore()
| [
"database.FlightStatistics.has_opened_flight",
"database.FlightStatistics.select",
"datetime.datetime.utcnow",
"openpyxl.load_workbook",
"PyQt5.QtWidgets.QFileDialog",
"time.sleep",
"PyQt5.QtGui.QCursor",
"datetime.datetime.now",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QLabel",
"settings.Settings",
"threading.Thread",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QScrollArea"
] | [((678, 695), 'PyQt5.QtWidgets.QGridLayout', 'QtW.QGridLayout', ([], {}), '()\n', (693, 695), True, 'from PyQt5 import QtWidgets as QtW\n'), ((735, 752), 'PyQt5.QtWidgets.QGridLayout', 'QtW.QGridLayout', ([], {}), '()\n', (750, 752), True, 'from PyQt5 import QtWidgets as QtW\n'), ((780, 795), 'PyQt5.QtWidgets.QGroupBox', 'QtW.QGroupBox', ([], {}), '()\n', (793, 795), True, 'from PyQt5 import QtWidgets as QtW\n'), ((820, 839), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['"""00:00"""'], {}), "('00:00')\n", (830, 839), True, 'from PyQt5 import QtWidgets as QtW\n'), ((1138, 1157), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['"""READY"""'], {}), "('READY')\n", (1148, 1157), True, 'from PyQt5 import QtWidgets as QtW\n'), ((1376, 1393), 'PyQt5.QtWidgets.QScrollArea', 'QtW.QScrollArea', ([], {}), '()\n', (1391, 1393), True, 'from PyQt5 import QtWidgets as QtW\n'), ((1643, 1678), 'PyQt5.QtWidgets.QPushButton', 'QtW.QPushButton', (['"""New flight"""', 'self'], {}), "('New flight', self)\n", (1658, 1678), True, 'from PyQt5 import QtWidgets as QtW\n'), ((1709, 1746), 'PyQt5.QtWidgets.QPushButton', 'QtW.QPushButton', (['"""Show pending"""', 'self'], {}), "('Show pending', self)\n", (1724, 1746), True, 'from PyQt5 import QtWidgets as QtW\n'), ((1770, 1805), 'PyQt5.QtWidgets.QPushButton', 'QtW.QPushButton', (['"""Show total"""', 'self'], {}), "('Show total', self)\n", (1785, 1805), True, 'from PyQt5 import QtWidgets as QtW\n'), ((1830, 1861), 'PyQt5.QtWidgets.QPushButton', 'QtW.QPushButton', (['"""Import"""', 'self'], {}), "('Import', self)\n", (1845, 1861), True, 'from PyQt5 import QtWidgets as QtW\n'), ((7279, 7302), 'openpyxl.load_workbook', 'load_workbook', (['file_url'], {}), '(file_url)\n', (7292, 7302), False, 'from openpyxl import load_workbook\n'), ((8754, 8790), 'database.FlightStatistics.has_opened_flight', 'FlightStatistics.has_opened_flight', ([], {}), '()\n', (8788, 8790), False, 'from database import FlightStatistics\n'), ((1957, 1987), 'PyQt5.QtGui.QCursor', 'QCursor', (['Qt.PointingHandCursor'], {}), '(Qt.PointingHandCursor)\n', (1964, 1987), False, 'from PyQt5.QtGui import QCursor\n'), ((2027, 2057), 'PyQt5.QtGui.QCursor', 'QCursor', (['Qt.PointingHandCursor'], {}), '(Qt.PointingHandCursor)\n', (2034, 2057), False, 'from PyQt5.QtGui import QCursor\n'), ((2090, 2120), 'PyQt5.QtGui.QCursor', 'QCursor', (['Qt.PointingHandCursor'], {}), '(Qt.PointingHandCursor)\n', (2097, 2120), False, 'from PyQt5.QtGui import QCursor\n'), ((2154, 2184), 'PyQt5.QtGui.QCursor', 'QCursor', (['Qt.PointingHandCursor'], {}), '(Qt.PointingHandCursor)\n', (2161, 2184), False, 'from PyQt5.QtGui import QCursor\n'), ((6504, 6521), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6519, 6521), False, 'from datetime import datetime\n'), ((6818, 6834), 'time.sleep', 'sleep', (['remaining'], {}), '(remaining)\n', (6823, 6834), False, 'from time import sleep\n'), ((1045, 1066), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['f"""{i}:00"""'], {}), "(f'{i}:00')\n", (1055, 1066), True, 'from PyQt5 import QtWidgets as QtW\n'), ((2636, 2683), 'threading.Thread', 'Thread', ([], {'target': 'self.__update_clock', 'daemon': '(True)'}), '(target=self.__update_clock, daemon=True)\n', (2642, 2683), False, 'from threading import Thread\n'), ((3318, 3328), 'settings.Settings', 'Settings', ([], {}), '()\n', (3326, 3328), False, 'from settings import Settings\n'), ((3946, 3960), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['""""""'], {}), "('')\n", (3956, 3960), True, 'from PyQt5 import QtWidgets as QtW\n'), ((4061, 4086), 'database.FlightStatistics.select', 'FlightStatistics.select', ([], {}), '()\n', (4084, 4086), False, 'from database import FlightStatistics\n'), ((5796, 5822), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['departure_date'], {}), '(departure_date)\n', (5806, 5822), True, 'from PyQt5 import QtWidgets as QtW\n'), ((6059, 6085), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['departure_date'], {}), '(departure_date)\n', (6069, 6085), True, 'from PyQt5 import QtWidgets as QtW\n'), ((6149, 6173), 'PyQt5.QtWidgets.QLabel', 'QtW.QLabel', (['arrival_date'], {}), '(arrival_date)\n', (6159, 6173), True, 'from PyQt5 import QtWidgets as QtW\n'), ((7175, 7192), 'PyQt5.QtWidgets.QFileDialog', 'QtW.QFileDialog', ([], {}), '()\n', (7190, 7192), True, 'from PyQt5 import QtWidgets as QtW\n'), ((2476, 2490), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2488, 2490), False, 'from datetime import datetime\n')] |
import requests
from django.core.exceptions import ObjectDoesNotExist
from .models import ProjectGroup, ProjectMember
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import dateutil.parser
from copy import deepcopy
def update_members(members, project):
# delete members from DB if not authorized any more
oh_ids_found = [int(member['project_member_id']) for member in members]
for db_member in project.projectmember_set.all():
if db_member.id not in oh_ids_found:
db_member.delete()
# update existing member or create if needed
for oh_member in members:
try:
db_member = ProjectMember.objects.get(
id=int(oh_member['project_member_id']))
except ObjectDoesNotExist:
db_member = ProjectMember(
project_id=project.id,
id=int(oh_member['project_member_id']))
db_member.date_joined = dateutil.parser.parse(oh_member['created'])
db_member.sources_shared = oh_member.get('sources_shared')
db_member.username = oh_member.get('username')
db_member.save()
# fetching old file data for this member
project_member_old_files = project.file_set.filter(member=db_member)
for file in oh_member['data']:
# maintaining a list of obsolete files for this member in database
project_member_old_files = project_member_old_files.exclude(oh_file_id=file['id'])
project.file_set.update_or_create(oh_file_id=file['id'],
basename=file['basename'],
created=dateutil.parser.parse(file['created']),
source=file['source'],
member=db_member,
defaults={
'download_url': file['download_url'],
})
# deleting obsolete files from database for this member
project.file_set.filter(id__in=project_member_old_files).delete()
def get_all_members(token):
req_url = 'https://www.openhumans.org/api/direct-sharing' \
'/project/members/?access_token={}'.format(token)
members = requests.get(req_url).json()
if 'results' in members.keys():
# results = members['results']
member_urls = [i['exchange_member'] for i in members['results']]
while members['next']:
members = requests.get(members['next']).json()
for i in members['results']:
member_urls.append(i['exchange_member'])
results = []
print('got all member URLs, now getting complete objects')
for member_url in member_urls:
member = requests.get(member_url).json()
output_member = deepcopy(member)
while member['next']:
member = requests.get(member['next']).json()
output_member['data'] += member['data']
results += [output_member]
print('got all member objects and return')
return results
else:
return members
def filter_members_group_id(token, group_id):
members = get_all_members(token)
project_group = ProjectGroup.objects.get(pk=group_id)
group_member_ids = [str(p.id) for p in project_group.projectmember_set.all()]
filtered_members = []
for member in members:
if member['project_member_id'] in group_member_ids:
filtered_members.append(member)
return filtered_members
def paginate_items(queryset, page):
paginator = Paginator(queryset, 25)
try:
paged_queryset = paginator.page(page)
except PageNotAnInteger:
paged_queryset = paginator.page(1)
except EmptyPage:
paged_queryset = paginator.page(paginator.num_pages)
return paged_queryset
def token_is_valid(token):
req_url = 'https://www.openhumans.org/api/' \
'direct-sharing/project/?access_token={}'.format(token)
project_info = requests.get(req_url).json()
if 'detail' not in project_info.keys():
return True
return False
| [
"copy.deepcopy",
"requests.get",
"django.core.paginator.Paginator"
] | [((3742, 3765), 'django.core.paginator.Paginator', 'Paginator', (['queryset', '(25)'], {}), '(queryset, 25)\n', (3751, 3765), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((2391, 2412), 'requests.get', 'requests.get', (['req_url'], {}), '(req_url)\n', (2403, 2412), False, 'import requests\n'), ((2964, 2980), 'copy.deepcopy', 'deepcopy', (['member'], {}), '(member)\n', (2972, 2980), False, 'from copy import deepcopy\n'), ((4170, 4191), 'requests.get', 'requests.get', (['req_url'], {}), '(req_url)\n', (4182, 4191), False, 'import requests\n'), ((2621, 2650), 'requests.get', 'requests.get', (["members['next']"], {}), "(members['next'])\n", (2633, 2650), False, 'import requests\n'), ((2904, 2928), 'requests.get', 'requests.get', (['member_url'], {}), '(member_url)\n', (2916, 2928), False, 'import requests\n'), ((3040, 3068), 'requests.get', 'requests.get', (["member['next']"], {}), "(member['next'])\n", (3052, 3068), False, 'import requests\n')] |
from pydantic import BaseModel, Field, constr
from enum import Enum
from typing import Optional
from Bio.SeqFeature import SeqFeature, FeatureLocation
from pydantic.types import conlist
# Enumerations:
class SourceType(str, Enum):
genbank_id = 'genbank_id',
file = 'file',
restriction = 'restriction'
sticky_ligation = 'sticky_ligation'
PCR = 'PCR'
class SequenceFileFormat(str, Enum):
fasta = 'fasta'
genbank = 'genbank'
snapgene = 'snapgene'
# Sequence: =========================================
class GenbankSequence(BaseModel):
"""A class to store sequences and features in genbank model
"""
type: str = 'file'
file_extension: str = 'gb'
file_content: str = ''
overhang_crick_3prime: int = Field(0, description='Taken from pydna\'s `dseq::ovhg`\
An integer describing the length of the\
crick strand overhang in the 5\' of the molecule, or 3\' of the crick strand')
overhang_watson_3prime: int = Field(0, description='The equivalent of `overhang_crick_3prime`\
but for the watson strand')
class SequenceEntity(BaseModel):
id: Optional[int]
kind: str = 'entity'
sequence: GenbankSequence = None
class PrimerModel(BaseModel):
"""Called PrimerModel not to be confused with the class from pydna."""
id: int
name: str
# TODO: add this to the flake8 exceptions
sequence: constr(regex='^[acgtACGT]+$')
# The next two models are unused for now
class SequenceFeature(BaseModel):
id: str
type: str
start: int
end: int
strand: int = None
def seq_feature2pydantic(sf: SeqFeature) -> SequenceFeature:
if not isinstance(sf.location, FeatureLocation):
raise TypeError(
'Compound locations are not yet supported.'
)
return SequenceFeature(
id=sf.id,
type=sf.type,
strand=sf.location.strand,
start=sf.location.start,
end=sf.location.end
)
# Sources =========================================
class Source(BaseModel):
"""A class to represent sources of DNA
"""
# Fields required to execute a source step
id: int = None
kind: str = 'source'
input: list[int] = []
output: int = None
type: SourceType = None
class UploadedFileSource(Source):
"""Describes a sequence from a file uploaded by the user
"""
file_name: str
file_format: SequenceFileFormat
type: SourceType = SourceType('file')
index_in_file: int = None
class GenbankIdSource(Source):
"""Documents a request to GenBank
"""
genbank_id: str
type: SourceType = SourceType('genbank_id')
# TODO There is some abstract common thing between restriction and PCR, since
# they select a subset of the molecule, perhaps they can be merged in some way.
class SequenceSubsetSource(Source):
"""An abstract class for sources that select a subset of a sequence, such as PCR and digestion."""
# This can only take one input
input: conlist(int, min_items=1, max_items=1)
# Boundaries of a fragment (length should be either empty, or length = 2)
fragment_boundaries: list[int] = Field([], description='Edges of the fragment that will be taken:\n \
* For a PCR, these are the positions of the 3\' binding sites of the primers, such that sequence[start:end]\
would be the part of the sequence where primers don\'t align.\n\
* For restriction enzymes the extremes of the overhangs\n\
For both, 0-based indexing, [first,second)')
class RestrictionEnzymeDigestionSource(SequenceSubsetSource):
"""Documents a restriction enzyme digestion, and the selection of one of the fragments."""
type: SourceType = SourceType('restriction')
# The order of the enzymes in the list corresponds to the fragment_boundaries.
# For instance, if a fragment 5' is cut with EcoRI and the 3' with BamHI,
# restriction_enzymes = ['EcoRI', 'BamHI']
restriction_enzymes: conlist(str, min_items=1)
class PrimerAnnealingSettings(BaseModel):
"""Settings to find annealing sites for the primer"""
minimum_annealing: int = Field(..., description='The minimum number of \
overlaping basepairs for an annealing to be considered.')
class PCRSource(SequenceSubsetSource):
"""Documents a PCR, and the selection of one of the products."""
type: SourceType = SourceType('PCR')
primers: conlist(int, max_items=2) = Field([], description='id of\
the forward and reverse primer (in that order). If the reverse and forward is the same,\
the id should be submitted twice. It accepts a single input if primer_footprints is not set.')
primer_footprints: conlist(int, max_items=2) = Field([], description='The number of basepairs that are anealed\
in each primer (same order as in `primers`). Missmatch support should be added in the future.')
# TODO test this
primer_annealing_settings: PrimerAnnealingSettings = Field(None, description='This does not have\
to be specified if the primers and primer_footprints are provided.')
class StickyLigationSource(Source):
"""Documents a ligation with sticky ends. This might consist of \
a single fragment's circularisation"""
# TODO: this should support at some point specifying the order of the fragments
# of the assembly + whether there is circularization.
input: conlist(int, min_items=1)
type: SourceType = SourceType('sticky_ligation')
fragments_inverted: list[bool] = []
circularised: bool = None
# TODO include this
# @validator('fragments_inverted')
# def lists_have_equal_length(cls, v, values):
# assert len(v) == len(values['input']) or len(v) == 0, '`fragments_inverted` must\
# be either empty, or have the same length as `input`'
| [
"pydantic.Field",
"pydantic.constr",
"pydantic.types.conlist"
] | [((760, 954), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""Taken from pydna\'s `dseq::ovhg` An integer describing the length of the crick strand overhang in the 5\' of the molecule, or 3\' of the crick strand"""'}), '(0, description=\n "Taken from pydna\'s `dseq::ovhg` An integer describing the length of the crick strand overhang in the 5\' of the molecule, or 3\' of the crick strand"\n )\n', (765, 954), False, 'from pydantic import BaseModel, Field, constr\n'), ((986, 1094), 'pydantic.Field', 'Field', (['(0)'], {'description': '"""The equivalent of `overhang_crick_3prime` but for the watson strand"""'}), "(0, description=\n 'The equivalent of `overhang_crick_3prime` but for the watson strand'\n )\n", (991, 1094), False, 'from pydantic import BaseModel, Field, constr\n'), ((1400, 1429), 'pydantic.constr', 'constr', ([], {'regex': '"""^[acgtACGT]+$"""'}), "(regex='^[acgtACGT]+$')\n", (1406, 1429), False, 'from pydantic import BaseModel, Field, constr\n'), ((2987, 3025), 'pydantic.types.conlist', 'conlist', (['int'], {'min_items': '(1)', 'max_items': '(1)'}), '(int, min_items=1, max_items=1)\n', (2994, 3025), False, 'from pydantic.types import conlist\n'), ((3142, 3505), 'pydantic.Field', 'Field', (['[]'], {'description': '"""Edges of the fragment that will be taken:\n * For a PCR, these are the positions of the 3\' binding sites of the primers, such that sequence[start:end] would be the part of the sequence where primers don\'t align.\n * For restriction enzymes the extremes of the overhangs\n For both, 0-based indexing, [first,second)"""'}), '([], description=\n """Edges of the fragment that will be taken:\n * For a PCR, these are the positions of the 3\' binding sites of the primers, such that sequence[start:end] would be the part of the sequence where primers don\'t align.\n * For restriction enzymes the extremes of the overhangs\n For both, 0-based indexing, [first,second)"""\n )\n', (3147, 3505), False, 'from pydantic import BaseModel, Field, constr\n'), ((3948, 3973), 'pydantic.types.conlist', 'conlist', (['str'], {'min_items': '(1)'}), '(str, min_items=1)\n', (3955, 3973), False, 'from pydantic.types import conlist\n'), ((4105, 4222), 'pydantic.Field', 'Field', (['...'], {'description': '"""The minimum number of overlaping basepairs for an annealing to be considered."""'}), "(..., description=\n 'The minimum number of overlaping basepairs for an annealing to be considered.'\n )\n", (4110, 4222), False, 'from pydantic import BaseModel, Field, constr\n'), ((4381, 4406), 'pydantic.types.conlist', 'conlist', (['int'], {'max_items': '(2)'}), '(int, max_items=2)\n', (4388, 4406), False, 'from pydantic.types import conlist\n'), ((4409, 4644), 'pydantic.Field', 'Field', (['[]'], {'description': '"""id of the forward and reverse primer (in that order). If the reverse and forward is the same, the id should be submitted twice. It accepts a single input if primer_footprints is not set."""'}), "([], description=\n 'id of the forward and reverse primer (in that order). If the reverse and forward is the same, the id should be submitted twice. It accepts a single input if primer_footprints is not set.'\n )\n", (4414, 4644), False, 'from pydantic import BaseModel, Field, constr\n'), ((4663, 4688), 'pydantic.types.conlist', 'conlist', (['int'], {'max_items': '(2)'}), '(int, max_items=2)\n', (4670, 4688), False, 'from pydantic.types import conlist\n'), ((4691, 4863), 'pydantic.Field', 'Field', (['[]'], {'description': '"""The number of basepairs that are anealed in each primer (same order as in `primers`). Missmatch support should be added in the future."""'}), "([], description=\n 'The number of basepairs that are anealed in each primer (same order as in `primers`). Missmatch support should be added in the future.'\n )\n", (4696, 4863), False, 'from pydantic import BaseModel, Field, constr\n'), ((4935, 5064), 'pydantic.Field', 'Field', (['None'], {'description': '"""This does not have to be specified if the primers and primer_footprints are provided."""'}), "(None, description=\n 'This does not have to be specified if the primers and primer_footprints are provided.'\n )\n", (4940, 5064), False, 'from pydantic import BaseModel, Field, constr\n'), ((5362, 5387), 'pydantic.types.conlist', 'conlist', (['int'], {'min_items': '(1)'}), '(int, min_items=1)\n', (5369, 5387), False, 'from pydantic.types import conlist\n')] |
import datetime
import os
import json
import logging
import time
import traceback
from analysis import yfinance_client
from storage import sqlite
def TakeSnapshot(ticker, db: sqlite.ShardedSqliteStorage, db_raw: sqlite.ShardedSqliteStorage):
date_str = datetime.datetime.now().strftime('%Y-%m-%d')
client = yfinance_client.YFinanceClient(ticker)
analysis = client.GetAnalysis()
info = client.GetInfo()
db.Write(ticker, date_str, analysis.SerializeToString(), group='yf.analysis')
db.Write(ticker, date_str, info.SerializeToString(), group='yf.info')
if db_raw:
db_raw.Write(
ticker, date_str, client.ticker.analysis.to_json().encode('utf-8'), group='yf.analysis')
db_raw.Write(ticker, date_str, json.dumps(client.ticker.info).encode('utf-8'), group='yf.info')
def EnsureEnv(key: str):
val = os.getenv(key)
if not val:
raise Exception('Environment var must be set: %s' % key)
return val
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.ERROR,
datefmt='%Y-%m-%d %H:%M:%S')
# ONE-SHOT Mode
oneshot_mode = os.getenv('YF_SNAPSHOT_ONESHOT', 'TRUE')
# DB
db_path = EnsureEnv('YF_SNAPSHOT_DB_PATH')
db = sqlite.ShardedSqliteStorage(db_path)
# DB (Raw Data)
db_raw_path = os.getenv('YF_SNAPSHOT_DB_RAW_PATH', '')
# Tickers (with cache)
tickers_str = db.Get('YF_SNAPSHOT_TICKERS')
if not tickers_str:
tickers_str = EnsureEnv('YF_SNAPSHOT_TICKERS')
db.Set('YF_SNAPSHOT_TICKERS', tickers_str)
while True:
db = sqlite.ShardedSqliteStorage(db_path)
db_raw = sqlite.ShardedSqliteStorage(db_raw_path) if db_raw_path else None
errors = []
for t in tickers_str.split(','):
t = t.strip()
logging.info('Snapshotting %s' % t)
try:
TakeSnapshot(t, db, db_raw)
except:
errors.append(t)
logging.error(traceback.format_exc())
err_msg = 'Failed to snapshot: [%s]' % ','.join(errors)
if oneshot_mode == 'FALSE':
if errors:
raise Exception(err_msg)
else:
logging.info('Finished oneshot')
break
else:
if errors:
logging.error(err_msg)
logging.info('Sleeping for 7200s...')
time.sleep(7200)
| [
"analysis.yfinance_client.YFinanceClient",
"logging.basicConfig",
"traceback.format_exc",
"storage.sqlite.ShardedSqliteStorage",
"os.getenv",
"json.dumps",
"time.sleep",
"datetime.datetime.now",
"logging.info",
"logging.error"
] | [((313, 351), 'analysis.yfinance_client.YFinanceClient', 'yfinance_client.YFinanceClient', (['ticker'], {}), '(ticker)\n', (343, 351), False, 'from analysis import yfinance_client\n'), ((826, 840), 'os.getenv', 'os.getenv', (['key'], {}), '(key)\n', (835, 840), False, 'import os\n'), ((959, 1082), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-8s %(message)s"""', 'level': 'logging.INFO', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='%(asctime)s %(levelname)-8s %(message)s', level\n =logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\n", (978, 1082), False, 'import logging\n'), ((1099, 1223), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)-8s %(message)s"""', 'level': 'logging.ERROR', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(format='%(asctime)s %(levelname)-8s %(message)s', level\n =logging.ERROR, datefmt='%Y-%m-%d %H:%M:%S')\n", (1118, 1223), False, 'import logging\n'), ((1274, 1314), 'os.getenv', 'os.getenv', (['"""YF_SNAPSHOT_ONESHOT"""', '"""TRUE"""'], {}), "('YF_SNAPSHOT_ONESHOT', 'TRUE')\n", (1283, 1314), False, 'import os\n'), ((1375, 1411), 'storage.sqlite.ShardedSqliteStorage', 'sqlite.ShardedSqliteStorage', (['db_path'], {}), '(db_path)\n', (1402, 1411), False, 'from storage import sqlite\n'), ((1447, 1487), 'os.getenv', 'os.getenv', (['"""YF_SNAPSHOT_DB_RAW_PATH"""', '""""""'], {}), "('YF_SNAPSHOT_DB_RAW_PATH', '')\n", (1456, 1487), False, 'import os\n'), ((1704, 1740), 'storage.sqlite.ShardedSqliteStorage', 'sqlite.ShardedSqliteStorage', (['db_path'], {}), '(db_path)\n', (1731, 1740), False, 'from storage import sqlite\n'), ((257, 280), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (278, 280), False, 'import datetime\n'), ((1754, 1794), 'storage.sqlite.ShardedSqliteStorage', 'sqlite.ShardedSqliteStorage', (['db_raw_path'], {}), '(db_raw_path)\n', (1781, 1794), False, 'from storage import sqlite\n'), ((1900, 1935), 'logging.info', 'logging.info', (["('Snapshotting %s' % t)"], {}), "('Snapshotting %s' % t)\n", (1912, 1935), False, 'import logging\n'), ((2341, 2378), 'logging.info', 'logging.info', (['"""Sleeping for 7200s..."""'], {}), "('Sleeping for 7200s...')\n", (2353, 2378), False, 'import logging\n'), ((2385, 2401), 'time.sleep', 'time.sleep', (['(7200)'], {}), '(7200)\n', (2395, 2401), False, 'import time\n'), ((2232, 2264), 'logging.info', 'logging.info', (['"""Finished oneshot"""'], {}), "('Finished oneshot')\n", (2244, 2264), False, 'import logging\n'), ((2312, 2334), 'logging.error', 'logging.error', (['err_msg'], {}), '(err_msg)\n', (2325, 2334), False, 'import logging\n'), ((727, 757), 'json.dumps', 'json.dumps', (['client.ticker.info'], {}), '(client.ticker.info)\n', (737, 757), False, 'import json\n'), ((2044, 2066), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2064, 2066), False, 'import traceback\n')] |
#################################################################
# PREFERENCE WINDOW
#################################################################
# Load modules
import tkinter as tk
from tkinter import ttk
# Load personal modules
from stellapy.config import write_configurationFile
from stellapy.GUI.interface.PreferencesWindow.TabAbout import TabAbout
from stellapy.GUI.interface.PreferencesWindow.TabAppearance import TabAppearance
from stellapy.GUI.interface.PreferencesWindow.TabGeneral import TabGeneral
#================
# MENU CREATION
#================
class PreferenceWindow:
def __init__(self, root):
#===========
# VARIABLES
#===========
# Attach the root so we can carry it into the functions, get the tab for its dimensions
self.root = root
tab1 = root.tab1
tab1.update()
# Get the width and height of the root window + title bars and of simply the root window
self.height = root.winfo_height() # Height of the application minus the application title
self.width = root.winfo_width() # Width of the application
window_height = tab1.winfo_height() # Height of the window minus the tab header
window_width = tab1.winfo_width() # Width of the window
outerFrame_width = tab1.winfo_rootx() - tab1.winfo_x() # Top left x coordinate of the window excluding the outer-frame and including it
outerFrame_height = tab1.winfo_rooty() - tab1.winfo_y() # Top y coordinate of the window excluding the outer-frame and including it
header_width = self.width - window_width # Pixels between root window and notebook
header_height = self.height - window_height # Height of the header of the notebook
header_height = header_height - header_width # Height of the header of the notebook
#===========
# WIDGETS
#===========
# Pack the button in a frame so it's the same size as the notebook header
self.frame_openPreferences = ttk.Frame(self.root, height=header_height, width=100, style="dot.TFrame")
self.frame_openPreferences.pack_propagate(0) # The size of the frame controls the size of the button rather than visa versa
self.frame_openPreferences.place(relx=1, rely=0, anchor="ne")
# Create a button that will open the preferences window
self.btn_openPreferences = ttk.Button(master=self.frame_openPreferences, text=" ... ", style='dot.TButton')
self.btn_openPreferences.config(command=lambda: self.open_preferencesWindow())
self.btn_openPreferences.pack(expand=1, fill=tk.BOTH)
#==============================
# Open the preferences window
#==============================
def open_preferencesWindow(self):
# Create the preferences window
self.window_preferences = tk.Toplevel(self.root)
self.window_preferences.title("Preferences")
# Center the new window in the screen
winx = 500; x = self.width/2 - winx/2
winy = 500; y = self.height/2 - winy/2
self.window_preferences.minsize(winx, winy)
self.window_preferences.geometry("+%d+%d" % (x, y))
# Create a tabbed view with the possible settings
self.tab_header = ttk.Notebook(self.window_preferences, style='header.TNotebook')
# Add frames to the tab_header which are the tab windows
self.tab_general = ttk.Frame(self.tab_header) # Text editor
self.tab_about = ttk.Frame(self.tab_header) # About me
self.tab_appearance = ttk.Frame(self.tab_header) # Fonts and colors
# Add the tabs to the tab header
self.tab_header.add(self.tab_general, text='General')
self.tab_header.add(self.tab_appearance, text='Appearance')
self.tab_header.add(self.tab_about, text='About stellapy')
self.tab_header.pack(expand=1, fill='both')
# Attach the root so the classes can acces them
self.tab_general.root = self.root
self.tab_about.root = self.root
self.tab_appearance.root = self.root
# Fill the tabs with widgets through classes
self.tabGeneral = TabGeneral(self.tab_general)
self.TabAbout = TabAbout(self.tab_about)
self.tabAppearance = TabAppearance(self.tab_appearance)
# Closing event: apply the changes, then close the window
def on_closing():
# Apply the changes
self.tabAppearance.apply_changesTheme()
self.tabGeneral.apply_changesTextEdiror()
self.tabGeneral.apply_changesPaths()
# Destroy the tkinter window
self.window_preferences.destroy()
self.window_preferences.protocol("WM_DELETE_WINDOW", on_closing)
| [
"tkinter.ttk.Button",
"tkinter.ttk.Frame",
"stellapy.GUI.interface.PreferencesWindow.TabGeneral.TabGeneral",
"tkinter.Toplevel",
"stellapy.GUI.interface.PreferencesWindow.TabAbout.TabAbout",
"stellapy.GUI.interface.PreferencesWindow.TabAppearance.TabAppearance",
"tkinter.ttk.Notebook"
] | [((2181, 2254), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.root'], {'height': 'header_height', 'width': '(100)', 'style': '"""dot.TFrame"""'}), "(self.root, height=header_height, width=100, style='dot.TFrame')\n", (2190, 2254), False, 'from tkinter import ttk\n'), ((2565, 2652), 'tkinter.ttk.Button', 'ttk.Button', ([], {'master': 'self.frame_openPreferences', 'text': '""" ... """', 'style': '"""dot.TButton"""'}), "(master=self.frame_openPreferences, text=' ... ', style=\n 'dot.TButton')\n", (2575, 2652), False, 'from tkinter import ttk\n'), ((3037, 3059), 'tkinter.Toplevel', 'tk.Toplevel', (['self.root'], {}), '(self.root)\n', (3048, 3059), True, 'import tkinter as tk\n'), ((3469, 3532), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['self.window_preferences'], {'style': '"""header.TNotebook"""'}), "(self.window_preferences, style='header.TNotebook')\n", (3481, 3532), False, 'from tkinter import ttk\n'), ((3638, 3664), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.tab_header'], {}), '(self.tab_header)\n', (3647, 3664), False, 'from tkinter import ttk\n'), ((3711, 3737), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.tab_header'], {}), '(self.tab_header)\n', (3720, 3737), False, 'from tkinter import ttk\n'), ((3781, 3807), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.tab_header'], {}), '(self.tab_header)\n', (3790, 3807), False, 'from tkinter import ttk\n'), ((4412, 4440), 'stellapy.GUI.interface.PreferencesWindow.TabGeneral.TabGeneral', 'TabGeneral', (['self.tab_general'], {}), '(self.tab_general)\n', (4422, 4440), False, 'from stellapy.GUI.interface.PreferencesWindow.TabGeneral import TabGeneral\n'), ((4471, 4495), 'stellapy.GUI.interface.PreferencesWindow.TabAbout.TabAbout', 'TabAbout', (['self.tab_about'], {}), '(self.tab_about)\n', (4479, 4495), False, 'from stellapy.GUI.interface.PreferencesWindow.TabAbout import TabAbout\n'), ((4526, 4560), 'stellapy.GUI.interface.PreferencesWindow.TabAppearance.TabAppearance', 'TabAppearance', (['self.tab_appearance'], {}), '(self.tab_appearance)\n', (4539, 4560), False, 'from stellapy.GUI.interface.PreferencesWindow.TabAppearance import TabAppearance\n')] |
# Copyright 2021-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The nRF Connect SDK contains the Nordic-specific source code additions to open source
projects (Zephyr RTOS and MCUboot). It must be combined with nrfxlib and the repositories
that use the same naming convention to build the provided samples and to use the additional
subsystems and libraries.
https://github.com/nrfconnect/sdk-nrf
"""
from os.path import join
from SCons.Script import Import, SConscript
Import("env")
SConscript(
join(env.PioPlatform().get_package_dir("framework-nrfconnect"), "scripts",
"platformio", "platformio-build.py"), exports="env")
| [
"SCons.Script.Import"
] | [((1004, 1017), 'SCons.Script.Import', 'Import', (['"""env"""'], {}), "('env')\n", (1010, 1017), False, 'from SCons.Script import Import, SConscript\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20160309_0918'),
]
operations = [
migrations.CreateModel(
name='Salt_Module',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20, verbose_name='Salt\u6a21\u5757')),
('info', models.TextField(max_length=200, verbose_name='\u6a21\u5757\u8bf4\u660e')),
],
options={
'verbose_name': 'Salt\u6a21\u5757',
'verbose_name_plural': 'Salt\u6a21\u5757\u5217\u8868',
},
),
]
| [
"django.db.models.TextField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((347, 440), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (363, 440), False, 'from django.db import migrations, models\n'), ((464, 531), 'django.db.models.CharField', 'models.CharField', ([], {'unique': '(True)', 'max_length': '(20)', 'verbose_name': '"""Salt模块"""'}), "(unique=True, max_length=20, verbose_name='Salt模块')\n", (480, 531), False, 'from django.db import migrations, models\n'), ((569, 622), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)', 'verbose_name': '"""模块说明"""'}), "(max_length=200, verbose_name='模块说明')\n", (585, 622), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import SocketServer
from RequestHandler import buildRequestHandler
from DirectSyncronizedSimulatorFacade import DirectSyncronizedSimulatorFacade
class Server:
def __init__(self, ip, port, simulator):
self.ip = ip
self.port = port
self.simulator = simulator
def start(self):
RequestHandler = buildRequestHandler(self.simulator)
self.server = SocketServer.ThreadingTCPServer((self.ip, self.port), RequestHandler)
self.server.serve_forever()
self.server.server_close()
def stop(self):
self.server.server_close()
if __name__ == "__main__":
simulator = DirectSyncronizedSimulatorFacade()
server = Server("localhost", 8883, simulator)
server.start()
| [
"SocketServer.ThreadingTCPServer",
"RequestHandler.buildRequestHandler",
"DirectSyncronizedSimulatorFacade.DirectSyncronizedSimulatorFacade"
] | [((692, 726), 'DirectSyncronizedSimulatorFacade.DirectSyncronizedSimulatorFacade', 'DirectSyncronizedSimulatorFacade', ([], {}), '()\n', (724, 726), False, 'from DirectSyncronizedSimulatorFacade import DirectSyncronizedSimulatorFacade\n'), ((385, 420), 'RequestHandler.buildRequestHandler', 'buildRequestHandler', (['self.simulator'], {}), '(self.simulator)\n', (404, 420), False, 'from RequestHandler import buildRequestHandler\n'), ((443, 512), 'SocketServer.ThreadingTCPServer', 'SocketServer.ThreadingTCPServer', (['(self.ip, self.port)', 'RequestHandler'], {}), '((self.ip, self.port), RequestHandler)\n', (474, 512), False, 'import SocketServer\n')] |
import sys
import arbor
import matplotlib.pyplot as plt
class ring_recipe (arbor.recipe):
def __init__(self, n=4):
# The base C++ class constructor must be called first, to ensure that
# all memory in the C++ class is initialized correctly.
arbor.recipe.__init__(self)
self.ncells = n
self.params = arbor.cell_parameters()
# The num_cells method that returns the total number of cells in the model
# must be implemented.
def num_cells(self):
return self.ncells
# The cell_description method returns a cell
def cell_description(self, gid):
return arbor.make_cable_cell(gid, self.params)
def num_targets(self, gid):
return 1
def num_sources(self, gid):
return 1
# The kind method returns the type of cell with gid.
# Note: this must agree with the type returned by cell_description.
def cell_kind(self, gid):
return arbor.cell_kind.cable
# Make a ring network
def connections_on(self, gid):
src = (gid-1)%self.ncells
w = 0.01
d = 10
return [arbor.connection(arbor.cell_member(src,0), arbor.cell_member(gid,0), w, d)]
# Attach a generator to the first cell in the ring.
def event_generators(self, gid):
if gid==0:
sched = arbor.explicit_schedule([1])
return [arbor.event_generator(arbor.cell_member(0,0), 0.1, sched)]
return []
# Define one probe (for measuring voltage at the soma) on each cell.
def num_probes(self, gid):
return 1
def get_probe(self, id):
loc = arbor.location(0, 0) # at the soma
return arbor.cable_probe('voltage', id, loc)
context = arbor.context(threads=4, gpu_id=None)
print(context)
meters = arbor.meter_manager()
meters.start(context)
recipe = ring_recipe(10)
print(f'{recipe}')
meters.checkpoint('recipe-create', context)
decomp = arbor.partition_load_balance(recipe, context)
print(f'{decomp}')
hint = arbor.partition_hint()
hint.prefer_gpu = True
hint.gpu_group_size = 1000
print(f'{hint}')
hints = dict([(arbor.cell_kind.cable, hint)])
decomp = arbor.partition_load_balance(recipe, context, hints)
print(f'{decomp}')
meters.checkpoint('load-balance', context)
sim = arbor.simulation(recipe, decomp, context)
meters.checkpoint('simulation-init', context)
spike_recorder = arbor.attach_spike_recorder(sim)
pid = arbor.cell_member(0,0) # cell 0, probe 0
# Attach a sampler to the voltage probe on cell 0.
# Sample rate of 1 sample every ms.
sampler = arbor.attach_sampler(sim, 1, pid)
sim.run(100)
print(f'{sim} finished')
meters.checkpoint('simulation-run', context)
print(f'{arbor.meter_report(meters, context)}')
for sp in spike_recorder.spikes:
print(sp)
print('voltage samples for probe id ', end = '')
print(pid, end = '')
print(':')
time = []
value = []
for sa in sampler.samples(pid):
print(sa)
time.append(sa.time)
value.append(sa.value)
# plot the recorded voltages over time
fig, ax = plt.subplots()
ax.plot(time, value)
ax.set(xlabel='time (ms)', ylabel='voltage (mV)', title='ring demo')
ax.legend(['voltage'])
plt.xlim(0,100)
ax.grid()
fig.savefig("voltages.png", dpi=300)
| [
"arbor.meter_report",
"arbor.cell_parameters",
"arbor.context",
"matplotlib.pyplot.xlim",
"arbor.simulation",
"arbor.attach_spike_recorder",
"arbor.meter_manager",
"arbor.explicit_schedule",
"arbor.cell_member",
"arbor.partition_load_balance",
"arbor.recipe.__init__",
"arbor.cable_probe",
"arbor.attach_sampler",
"arbor.make_cable_cell",
"arbor.location",
"matplotlib.pyplot.subplots",
"arbor.partition_hint"
] | [((1711, 1748), 'arbor.context', 'arbor.context', ([], {'threads': '(4)', 'gpu_id': 'None'}), '(threads=4, gpu_id=None)\n', (1724, 1748), False, 'import arbor\n'), ((1774, 1795), 'arbor.meter_manager', 'arbor.meter_manager', ([], {}), '()\n', (1793, 1795), False, 'import arbor\n'), ((1918, 1963), 'arbor.partition_load_balance', 'arbor.partition_load_balance', (['recipe', 'context'], {}), '(recipe, context)\n', (1946, 1963), False, 'import arbor\n'), ((1991, 2013), 'arbor.partition_hint', 'arbor.partition_hint', ([], {}), '()\n', (2011, 2013), False, 'import arbor\n'), ((2137, 2189), 'arbor.partition_load_balance', 'arbor.partition_load_balance', (['recipe', 'context', 'hints'], {}), '(recipe, context, hints)\n', (2165, 2189), False, 'import arbor\n'), ((2260, 2301), 'arbor.simulation', 'arbor.simulation', (['recipe', 'decomp', 'context'], {}), '(recipe, decomp, context)\n', (2276, 2301), False, 'import arbor\n'), ((2367, 2399), 'arbor.attach_spike_recorder', 'arbor.attach_spike_recorder', (['sim'], {}), '(sim)\n', (2394, 2399), False, 'import arbor\n'), ((2407, 2430), 'arbor.cell_member', 'arbor.cell_member', (['(0)', '(0)'], {}), '(0, 0)\n', (2424, 2430), False, 'import arbor\n'), ((2545, 2578), 'arbor.attach_sampler', 'arbor.attach_sampler', (['sim', '(1)', 'pid'], {}), '(sim, 1, pid)\n', (2565, 2578), False, 'import arbor\n'), ((3013, 3027), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3025, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3157), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (3149, 3157), True, 'import matplotlib.pyplot as plt\n'), ((271, 298), 'arbor.recipe.__init__', 'arbor.recipe.__init__', (['self'], {}), '(self)\n', (292, 298), False, 'import arbor\n'), ((345, 368), 'arbor.cell_parameters', 'arbor.cell_parameters', ([], {}), '()\n', (366, 368), False, 'import arbor\n'), ((630, 669), 'arbor.make_cable_cell', 'arbor.make_cable_cell', (['gid', 'self.params'], {}), '(gid, self.params)\n', (651, 669), False, 'import arbor\n'), ((1612, 1632), 'arbor.location', 'arbor.location', (['(0)', '(0)'], {}), '(0, 0)\n', (1626, 1632), False, 'import arbor\n'), ((1662, 1699), 'arbor.cable_probe', 'arbor.cable_probe', (['"""voltage"""', 'id', 'loc'], {}), "('voltage', id, loc)\n", (1679, 1699), False, 'import arbor\n'), ((1320, 1348), 'arbor.explicit_schedule', 'arbor.explicit_schedule', (['[1]'], {}), '([1])\n', (1343, 1348), False, 'import arbor\n'), ((2674, 2709), 'arbor.meter_report', 'arbor.meter_report', (['meters', 'context'], {}), '(meters, context)\n', (2692, 2709), False, 'import arbor\n'), ((1128, 1153), 'arbor.cell_member', 'arbor.cell_member', (['src', '(0)'], {}), '(src, 0)\n', (1145, 1153), False, 'import arbor\n'), ((1154, 1179), 'arbor.cell_member', 'arbor.cell_member', (['gid', '(0)'], {}), '(gid, 0)\n', (1171, 1179), False, 'import arbor\n'), ((1391, 1414), 'arbor.cell_member', 'arbor.cell_member', (['(0)', '(0)'], {}), '(0, 0)\n', (1408, 1414), False, 'import arbor\n')] |
# Generated by Django 2.0.4 on 2018-05-07 07:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0004_custompermission'),
]
operations = [
migrations.CreateModel(
name='IntegratedDose',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dose', models.DecimalField(decimal_places=7, max_digits=14, null=True)),
('gamma', models.DecimalField(decimal_places=7, max_digits=14, null=True)),
('neutron', models.DecimalField(decimal_places=7, max_digits=14, null=True)),
('datafile_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.DataFile')),
('flight_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Flight')),
],
),
]
| [
"django.db.models.DecimalField",
"django.db.models.AutoField",
"django.db.models.ForeignKey"
] | [((366, 459), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (382, 459), False, 'from django.db import migrations, models\n'), ((483, 546), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(7)', 'max_digits': '(14)', 'null': '(True)'}), '(decimal_places=7, max_digits=14, null=True)\n', (502, 546), False, 'from django.db import migrations, models\n'), ((575, 638), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(7)', 'max_digits': '(14)', 'null': '(True)'}), '(decimal_places=7, max_digits=14, null=True)\n', (594, 638), False, 'from django.db import migrations, models\n'), ((669, 732), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(7)', 'max_digits': '(14)', 'null': '(True)'}), '(decimal_places=7, max_digits=14, null=True)\n', (688, 732), False, 'from django.db import migrations, models\n'), ((767, 854), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main.DataFile"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'main.DataFile')\n", (784, 854), False, 'from django.db import migrations, models\n'), ((882, 967), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main.Flight"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='main.Flight'\n )\n", (899, 967), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 8 17:00:42 2020
@author: lilis
"""
import pygame
import sys
import os
import random
# to include images, we need the png sources in the working directory
# I'll use John's head as example - johnpic.jpg
_image_library = {}
image = pygame.image.load('johnpic.jpg')
_songs = ['stressclock.mp3', 'stressclock.mp3']#'stresspulse.mp3']
_currently_playing_song = None
def get_image(path):
global _image_library
image = _image_library.get(path)
if image == None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
_image_library[path] = image
return image
def play_a_different_song():
global _currently_playing_song, _songs
next_song = random.choice(_songs)
while next_song == _currently_playing_song:
next_song = random.choice(_songs)
_currently_playing_song = next_song
pygame.mixer.music.load(next_song)
pygame.mixer.music.play()
_sound_library = {}
def play_sound(path):
global _sound_library
sound = _sound_library.get(path)
if sound == None:
cannonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
sound = pygame.mixer.Sound(cannonicalized_path)
_sound_library[path] = sound
sound.play()
pygame.init()
# Including music -- can play a song once or on a loop
pygame.mixer.music.load('stressclock.mp3')
pygame.mixer.music.play(-1) # indexes from 1, so zero means play once.
# to play infinitely, use -1, to play once is 0
# pygame.mixer.music.stop() -- stops current song and also erases whole queue
# window of desired size, a surface obj
screen = pygame.display.set_mode((400,300))
done = False
is_blue = True
x = 30
y = 30
clock = pygame.time.Clock()
while not done:
# pygame.event.get() clears the event
#queue. If don't call, the window's
#messages will pile up, game gets slow
# EVENT PUMPING
for event in pygame.event.get():
# pygame.QUIT called when you hit
# x marker in corner
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
is_blue = not is_blue
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP]: y -= 3
if pressed[pygame.K_DOWN]: y += 3
if pressed[pygame.K_LEFT]: x -= 3
if pressed[pygame.K_RIGHT]: x += 3
if x < 0 or y < 0 or x>400 or y>300:
# pygame.mixer.music.load('stresspulse.mp3')
# pygame.mixer.music.play(0)
# this version plays our sound only once
effect = pygame.mixer.Sound('stresspulse.mp3')
effect.play(0)
# queues the next song to start after...
# pygame.mixer.music.queue('stressclock.mp3')
#interactivity from if statements in the event queue
if is_blue:
color = (0,128,255)
else:
color = (255,100,0)
# display some objects
# first, reset the screen before displaying things otherwise won't update right:
screen.fill((0,0,0))
# render the rect
pygame.draw.rect(screen, color, pygame.Rect(x,y,60,60))
# Create a surface object on which we will put an image.
#If no image, it's just a black square
# The SRCALPHA makes an empty transparent image, we can't see it
surface = pygame.Surface((100,100), pygame.SRCALPHA)
screen.blit(get_image("johnpic.jpg"), (400-x,300-y))
# pygame is double buffered -- has 2
# buffers. Whatever is the latest screen
# needs to be displayed. So a load
# buffer and a display buffer
pygame.display.flip()
# We first adjust the frame update rate to be reasonable
clock.tick(60) # wait 1/60s before executing this loop, 60fps
pygame.quit()
sys.exit() | [
"random.choice",
"sys.exit",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.Surface",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.mixer.Sound",
"pygame.key.get_pressed",
"pygame.mixer.music.load",
"pygame.time.Clock",
"pygame.image.load",
"pygame.mixer.music.play",
"pygame.Rect"
] | [((304, 336), 'pygame.image.load', 'pygame.image.load', (['"""johnpic.jpg"""'], {}), "('johnpic.jpg')\n", (321, 336), False, 'import pygame\n'), ((1386, 1399), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1397, 1399), False, 'import pygame\n'), ((1457, 1499), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['"""stressclock.mp3"""'], {}), "('stressclock.mp3')\n", (1480, 1499), False, 'import pygame\n'), ((1500, 1527), 'pygame.mixer.music.play', 'pygame.mixer.music.play', (['(-1)'], {}), '(-1)\n', (1523, 1527), False, 'import pygame\n'), ((1749, 1784), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(400, 300)'], {}), '((400, 300))\n', (1772, 1784), False, 'import pygame\n'), ((1839, 1858), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (1856, 1858), False, 'import pygame\n'), ((3892, 3905), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (3903, 3905), False, 'import pygame\n'), ((3906, 3916), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3914, 3916), False, 'import sys\n'), ((817, 838), 'random.choice', 'random.choice', (['_songs'], {}), '(_songs)\n', (830, 838), False, 'import random\n'), ((2055, 2073), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2071, 2073), False, 'import pygame\n'), ((2339, 2363), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (2361, 2363), False, 'import pygame\n'), ((3441, 3484), 'pygame.Surface', 'pygame.Surface', (['(100, 100)', 'pygame.SRCALPHA'], {}), '((100, 100), pygame.SRCALPHA)\n', (3455, 3484), False, 'import pygame\n'), ((3718, 3739), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3737, 3739), False, 'import pygame\n'), ((636, 673), 'pygame.image.load', 'pygame.image.load', (['canonicalized_path'], {}), '(canonicalized_path)\n', (653, 673), False, 'import pygame\n'), ((907, 928), 'random.choice', 'random.choice', (['_songs'], {}), '(_songs)\n', (920, 928), False, 'import random\n'), ((981, 1015), 'pygame.mixer.music.load', 'pygame.mixer.music.load', (['next_song'], {}), '(next_song)\n', (1004, 1015), False, 'import pygame\n'), ((1024, 1049), 'pygame.mixer.music.play', 'pygame.mixer.music.play', ([], {}), '()\n', (1047, 1049), False, 'import pygame\n'), ((1280, 1319), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['cannonicalized_path'], {}), '(cannonicalized_path)\n', (1298, 1319), False, 'import pygame\n'), ((2715, 2752), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""stresspulse.mp3"""'], {}), "('stresspulse.mp3')\n", (2733, 2752), False, 'import pygame\n'), ((3219, 3244), 'pygame.Rect', 'pygame.Rect', (['x', 'y', '(60)', '(60)'], {}), '(x, y, 60, 60)\n', (3230, 3244), False, 'import pygame\n')] |
from typing import Dict, List
import numpy as np
import torch as t
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from transformers import PreTrainedTokenizer
from unify_eval.model.mixins.classification import Classifier
from unify_eval.model.types import Tensor
from unify_eval.utils.label_mapper import LabelMapper
class TransformerClassifier(t.nn.Module):
"""
A classifier that is composed of 2 components:
a. an attention-based encoder to produce meaningful embeddings -> can be finetuned
b. a decoder whose architecture can be
1. "mlp": feedforward layer(s)
2. "attention": multi-head self-attention
3. something else, e.g. "lstm"
"""
# currently the architecture is expected to be one of the following:
# "mlp", "attention", "lstm", "gru", "rnn"
def __init__(self, encoder: t.nn.Module, clf: t.nn.Module, finetuning=False, clf_architecture="mlp") -> None:
super().__init__()
self.encoder = encoder
self.clf = clf
self.finetuning = finetuning
self.clf_architecture = clf_architecture
def forward_encoder(self, token_indices: t.Tensor, attention_mask: t.Tensor, token_type_ids: t.Tensor = None) -> t.Tensor:
return self.encoder(token_indices, attention_mask=attention_mask)[0] if token_type_ids is None \
else self.encoder(token_indices, attention_mask=attention_mask, token_type_ids=token_type_ids)[0]
def forward_clf(self, embedded: t.Tensor, attention_mask: t.Tensor) -> t.Tensor:
if self.clf_architecture == "attention":
return self.clf(embedded, attention_mask, reconstruct_mask=True)
elif self.clf_architecture == "mlp":
return self.clf(embedded.mean(axis=-2))
else:
return self.clf(embedded, attention_mask)
def forward(self, token_indices: t.Tensor, attention_mask: t.Tensor, token_type_ids: t.Tensor = None) -> t.Tensor:
if not self.finetuning:
with t.no_grad():
embedded = self.forward_encoder(token_indices, attention_mask, token_type_ids)
with t.enable_grad():
return self.forward_clf(embedded, attention_mask)
else:
embedded = self.forward_encoder(token_indices, attention_mask, token_type_ids)
return self.forward_clf(embedded, attention_mask)
class TransformerClassificationModel(Classifier):
"""
A full-fledged model built around the TransformerClassifier class.
It takes care of any necessary data preparation steps and manages
the necessary information for the training procedure.
Currently, gpt2 models from huggingface can be directly plugged im this class.
"""
def __init__(self, label_mapper: LabelMapper, transformer_classifier: TransformerClassifier,
tokenizer: PreTrainedTokenizer, lr: float = 0.001, weight_decay: float = 0.01):
super().__init__(label_mapper)
self.transformer_classifier = transformer_classifier
self.tokenizer = tokenizer
self.lr = lr
self.weight_decay = weight_decay
self._xent = CrossEntropyLoss()
trainable_params = list(self.transformer_classifier.clf.parameters())
if self.transformer_classifier.finetuning:
trainable_params = list(self.transformer_classifier.encoder.parameters()) + trainable_params
self._opt = t.optim.AdamW(params=trainable_params, lr=lr, weight_decay=weight_decay)
self._opt.zero_grad()
self.max_len = 512
def preprocess(self, texts: List[str]) -> Dict[str, t.Tensor]:
tokenized_texts = [self.tokenizer.tokenize(text)[:self.max_len] for text in texts]
# Convert token to vocabulary indices
max_len_found = max([len(text) for text in tokenized_texts])
indexed_texts = [self.tokenizer.convert_tokens_to_ids(text) + (max_len_found - len(text)) * [0] for text in
tokenized_texts]
attention_mask = [[1 if token != 0 else 0 for token in text] + (max_len_found - len(text)) * [0] for text in
tokenized_texts]
# Convert inputs to PyTorch tensors
token_indices = t.tensor(indexed_texts).to(self.current_device)
attention_mask = t.tensor(attention_mask).to(self.current_device)
return {
"token_indices": token_indices,
"attention_mask": attention_mask
}
def predict_label_probabilities(self, **kwargs) -> Tensor:
return F.softmax(self.get_logits(**kwargs), dim=-1).detach().cpu().numpy()
def get_logits(self, **kwargs) -> Tensor:
return self.transformer_classifier.forward(**self.preprocess(texts=kwargs["clauses"]))
def train(self, **kwargs) -> "TransformerClassificationModel":
loss = self.get_loss(as_tensor=True, **kwargs)["cross_entropy"]
loss.backward()
self._opt.step()
self._opt.zero_grad()
return self
def get_loss(self, as_tensor: bool = False, **kwargs) -> Dict[str, Tensor]:
logits = self.get_logits(**kwargs)
loss = self._xent.forward(input=logits,
target=t.from_numpy(self.label_mapper.map_to_indices(kwargs["labels"]))
.long().to(self.current_device))
if not as_tensor:
loss = loss.detach().cpu().item()
return {
"cross_entropy": loss
}
@staticmethod
def from_components(**kwargs) -> "TransformerClassificationModel":
return TransformerClassificationModel(**kwargs)
def get_components(self) -> dict:
return {
"transformer_classifier": self.transformer_classifier,
"tokenizer": self.tokenizer,
"label_mapper": self.label_mapper,
"lr": self.lr,
"weight_decay": self.weight_decay
}
def get_numpy_parameters(self) -> Dict[str, np.ndarray]:
return dict((n, p.detach().cpu().numpy()) for n, p in self.transformer_classifier.named_parameters())
def to_device(self, name: str) -> "TransformerClassificationModel":
super().to_device(name)
self.transformer_classifier.to(name)
return self
class BertClassificationModel(TransformerClassificationModel):
"""
Modifies the necessary methods in TransformerClassificationModel class
to make it usable for BERT-based models (incl. Distilbert)
"""
def __init__(self, label_mapper: LabelMapper, transformer_classifier: TransformerClassifier,
tokenizer: PreTrainedTokenizer, lr: float = 0.001, weight_decay: float = 0.01, distilling=False):
TransformerClassificationModel.__init__(self, label_mapper, transformer_classifier, tokenizer, lr, weight_decay)
self.distilling = distilling
def preprocess(self, texts: List[str]) -> Dict[str, t.Tensor]:
texts = [f"[CLS] {text} [SEP]" for text in texts]
# text = "[CLS] When does my package arrive ? [SEP]"
tokenized_texts = [self.tokenizer.tokenize(text)[:self.max_len] for text in texts]
max_len_found = max([len(text) for text in tokenized_texts])
indexed_texts = [self.tokenizer.convert_tokens_to_ids(text) + (max_len_found - len(text)) * [0] for text in
tokenized_texts]
attention_mask = [[1 if token != 0 else 0 for token in text] + (max_len_found - len(text)) * [0] for text in
tokenized_texts]
# Convert inputs to PyTorch tensors
token_indices = t.tensor(indexed_texts).to(self.current_device)
attention_mask = t.tensor(attention_mask).to(self.current_device)
tensor_dict = {
"token_indices": token_indices,
"attention_mask": attention_mask
}
if not self.distilling:
segments_ids = [[0] * len(text) for text in indexed_texts]
token_type_ids = t.tensor(segments_ids).to(self.current_device)
tensor_dict["token_type_ids"] = token_type_ids
return tensor_dict
class RobertaClassificationModel(TransformerClassificationModel):
"""
Modifies the necessary methods in TransformerClassificationModel class
to make it usable for Roberta
"""
def __init__(self, label_mapper: LabelMapper, transformer_classifier: TransformerClassifier,
tokenizer: PreTrainedTokenizer, lr: float = 0.001, weight_decay: float = 0.01):
TransformerClassificationModel.__init__(self, label_mapper, transformer_classifier, tokenizer, lr, weight_decay)
def preprocess(self, texts: List[str]) -> Dict[str, t.Tensor]:
texts = [f"<s> {text} </s>" for text in texts]
# text = "[CLS] When does my package arrive ? [SEP]"
tokenized_texts = [self.tokenizer.tokenize(text)[:self.max_len] for text in texts]
max_len_found = max([len(text) for text in tokenized_texts])
indexed_texts = [self.tokenizer.convert_tokens_to_ids(text) + (max_len_found - len(text)) * [0] for text in
tokenized_texts]
attention_mask = [[1 if token != 0 else 0 for token in text] + (max_len_found - len(text)) * [0] for text in
tokenized_texts]
# Convert inputs to PyTorch tensors
token_indices = t.tensor(indexed_texts).to(self.current_device)
attention_mask = t.tensor(attention_mask).to(self.current_device)
return {
"token_indices": token_indices,
"attention_mask": attention_mask
}
| [
"torch.enable_grad",
"torch.nn.CrossEntropyLoss",
"torch.tensor",
"torch.no_grad",
"torch.optim.AdamW"
] | [((3160, 3178), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (3176, 3178), False, 'from torch.nn import CrossEntropyLoss\n'), ((3433, 3505), 'torch.optim.AdamW', 't.optim.AdamW', ([], {'params': 'trainable_params', 'lr': 'lr', 'weight_decay': 'weight_decay'}), '(params=trainable_params, lr=lr, weight_decay=weight_decay)\n', (3446, 3505), True, 'import torch as t\n'), ((2022, 2033), 'torch.no_grad', 't.no_grad', ([], {}), '()\n', (2031, 2033), True, 'import torch as t\n'), ((2147, 2162), 'torch.enable_grad', 't.enable_grad', ([], {}), '()\n', (2160, 2162), True, 'import torch as t\n'), ((4225, 4248), 'torch.tensor', 't.tensor', (['indexed_texts'], {}), '(indexed_texts)\n', (4233, 4248), True, 'import torch as t\n'), ((4298, 4322), 'torch.tensor', 't.tensor', (['attention_mask'], {}), '(attention_mask)\n', (4306, 4322), True, 'import torch as t\n'), ((7577, 7600), 'torch.tensor', 't.tensor', (['indexed_texts'], {}), '(indexed_texts)\n', (7585, 7600), True, 'import torch as t\n'), ((7650, 7674), 'torch.tensor', 't.tensor', (['attention_mask'], {}), '(attention_mask)\n', (7658, 7674), True, 'import torch as t\n'), ((9342, 9365), 'torch.tensor', 't.tensor', (['indexed_texts'], {}), '(indexed_texts)\n', (9350, 9365), True, 'import torch as t\n'), ((9415, 9439), 'torch.tensor', 't.tensor', (['attention_mask'], {}), '(attention_mask)\n', (9423, 9439), True, 'import torch as t\n'), ((7956, 7978), 'torch.tensor', 't.tensor', (['segments_ids'], {}), '(segments_ids)\n', (7964, 7978), True, 'import torch as t\n')] |
from textgenrnn import textgenrnn
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--file", help="Input file containing text data", default="./allposts.txt")
ap.add_argument("-e", "--epochs", help="Number of epochs to train", default=3)
args = ap.parse_args()
args.epochs = int(args.epochs)
generator = textgenrnn()
generator.train_from_file(args.file, num_epochs=int(args.epochs)) | [
"textgenrnn.textgenrnn",
"argparse.ArgumentParser"
] | [((56, 81), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (79, 81), False, 'import argparse\n'), ((327, 339), 'textgenrnn.textgenrnn', 'textgenrnn', ([], {}), '()\n', (337, 339), False, 'from textgenrnn import textgenrnn\n')] |
from google.appengine.ext import ndb
from protorpc import messages
from google.appengine.ext.ndb import msgprop
from csvmodel import CsvModel
class FeedInfo(CsvModel):
_csv_file = 'feed_info.txt'
feed_publisher_name = ndb.StringProperty(required=True)
feed_publisher_url = ndb.StringProperty(required=True)
feed_lang = ndb.StringProperty(required=True)
feed_start_date = ndb.DateProperty()
feed_end_date = ndb.DateProperty()
feed_version = ndb.StringProperty()
| [
"google.appengine.ext.ndb.DateProperty",
"google.appengine.ext.ndb.StringProperty"
] | [((227, 260), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)'}), '(required=True)\n', (245, 260), False, 'from google.appengine.ext import ndb\n'), ((286, 319), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)'}), '(required=True)\n', (304, 319), False, 'from google.appengine.ext import ndb\n'), ((336, 369), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'required': '(True)'}), '(required=True)\n', (354, 369), False, 'from google.appengine.ext import ndb\n'), ((392, 410), 'google.appengine.ext.ndb.DateProperty', 'ndb.DateProperty', ([], {}), '()\n', (408, 410), False, 'from google.appengine.ext import ndb\n'), ((431, 449), 'google.appengine.ext.ndb.DateProperty', 'ndb.DateProperty', ([], {}), '()\n', (447, 449), False, 'from google.appengine.ext import ndb\n'), ((469, 489), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (487, 489), False, 'from google.appengine.ext import ndb\n')] |
# Generated by Django 3.2.8 on 2022-01-06 08:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organization', '0001_initial'),
('students', '0008_auto_20211028_1226'),
]
operations = [
migrations.RemoveField(
model_name='institute',
name='under_campus',
),
migrations.AlterField(
model_name='graduates',
name='under_campus',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='organization.campus'),
),
migrations.AlterField(
model_name='graduates',
name='under_institute',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='organization.institute'),
),
migrations.DeleteModel(name='Campus', ),
migrations.DeleteModel(name='Institute', ),
]
| [
"django.db.migrations.DeleteModel",
"django.db.migrations.RemoveField",
"django.db.models.ForeignKey"
] | [((311, 378), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""institute"""', 'name': '"""under_campus"""'}), "(model_name='institute', name='under_campus')\n", (333, 378), False, 'from django.db import migrations, models\n'), ((933, 970), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Campus"""'}), "(name='Campus')\n", (955, 970), False, 'from django.db import migrations, models\n'), ((982, 1022), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Institute"""'}), "(name='Institute')\n", (1004, 1022), False, 'from django.db import migrations, models\n'), ((533, 626), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""organization.campus"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'organization.campus')\n", (550, 626), False, 'from django.db import migrations, models\n'), ((788, 884), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""organization.institute"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'organization.institute')\n", (805, 884), False, 'from django.db import migrations, models\n')] |
import sys
import pyvisa
from pathlib import Path
from serial import Serial
from calibration_log import CalibrationLog
from calibration_process import CalibrationProcess
from calibration_results_loader import CalibrationResultsLoader
from calibrator import Calibrator
from cluck2sesame_device import Cluck2SesameDevice
from sdm3065x_frequency_counter import Sdm3065xFrequencyCounter
from tf930_frequency_counter import Tf930FrequencyCounter
calibration_dir = Path('../calibrated')
def calibrate_devices():
# with Serial('/dev/ttyWHATEVER', baudrate=115200, timeout=30, xonxoff=True) as frequency_counter_port:
# frequency_counter = Tf930FrequencyCounter(frequency_counter_port)
global calibration_dir
resources = pyvisa.ResourceManager('@py')
with resources.open_resource('USB0::62700::60984::SDM36GBQ5R0755::0::INSTR') as resource:
frequency_counter = Sdm3065xFrequencyCounter(resource)
calibrator = CalibrationProcess(
lambda: Calibrator(
CalibrationLog(calibration_dir),
frequency_counter,
lambda: Cluck2SesameDevice(lambda: Serial('/dev/ttyS0', baudrate=9600, timeout=30)),
vdd_volts_range=[2.7, 4.5]))
calibrator.calibrate()
def recalculate_results_for(device_id):
global calibration_dir
loader = CalibrationResultsLoader(Path(calibration_dir, device_id))
results = loader.load()
log = CalibrationLog(calibration_dir).for_device(device_id)
log.add_calibration_results(results)
log.add_calibrated_nvm_settings(results.calibrated_nvm_settings)
if __name__ == '__main__':
if len(sys.argv) == 1:
calibrate_devices()
else:
recalculate_results_for(sys.argv[1])
| [
"pathlib.Path",
"calibration_log.CalibrationLog",
"sdm3065x_frequency_counter.Sdm3065xFrequencyCounter",
"pyvisa.ResourceManager",
"serial.Serial"
] | [((462, 483), 'pathlib.Path', 'Path', (['"""../calibrated"""'], {}), "('../calibrated')\n", (466, 483), False, 'from pathlib import Path\n'), ((721, 750), 'pyvisa.ResourceManager', 'pyvisa.ResourceManager', (['"""@py"""'], {}), "('@py')\n", (743, 750), False, 'import pyvisa\n'), ((864, 898), 'sdm3065x_frequency_counter.Sdm3065xFrequencyCounter', 'Sdm3065xFrequencyCounter', (['resource'], {}), '(resource)\n', (888, 898), False, 'from sdm3065x_frequency_counter import Sdm3065xFrequencyCounter\n'), ((1265, 1297), 'pathlib.Path', 'Path', (['calibration_dir', 'device_id'], {}), '(calibration_dir, device_id)\n', (1269, 1297), False, 'from pathlib import Path\n'), ((1331, 1362), 'calibration_log.CalibrationLog', 'CalibrationLog', (['calibration_dir'], {}), '(calibration_dir)\n', (1345, 1362), False, 'from calibration_log import CalibrationLog\n'), ((961, 992), 'calibration_log.CalibrationLog', 'CalibrationLog', (['calibration_dir'], {}), '(calibration_dir)\n', (975, 992), False, 'from calibration_log import CalibrationLog\n'), ((1056, 1103), 'serial.Serial', 'Serial', (['"""/dev/ttyS0"""'], {'baudrate': '(9600)', 'timeout': '(30)'}), "('/dev/ttyS0', baudrate=9600, timeout=30)\n", (1062, 1103), False, 'from serial import Serial\n')] |
import tensorflow as tf
import transformers
def prediction(features, params, mode):
"""
TODO: enable pretrained model
- add [cls], [sep] tokens
- add a embedding
:param features:
:param params:
:param mode:
:return:
"""
words = features['words'] # [batch_size, doc_len]
with tf.variable_scope('base_transformer'):
tparams = transformers.base_transformer_params(params)
base_transformer = transformers.create(params, tparams, mode)
# if pretrained, add leading [cls] and trailing [sep]
if params.pretrained:
# prepend [CLS] token
words = tf.concat([tf.ones([words.shape[0], 1], tf.int64) * params.word2id['[CLS]'], words], axis=1)[:, :-1]
# append [SEP] token to end of each doc
word_len = seqlen(words)
onehot = tf.one_hot(word_len, params.max_doc_len, dtype=tf.int64) # [batch, doc_len]
zerohot = tf.to_int64(tf.equal(onehot, 0))
words = (zerohot * words) + (onehot * params.word2id['[SEP]'])
# apply A sentence embedding
attn_bias = tf.get_variable('a_emb', shape=[1, 1, params.embedding_size])
else:
attn_bias = None if params.transformer_type == 'adaptive' else transformers.model_utils.get_padding_bias(words)
base_word_encoding = base_transformer.encode(words, attn_bias)
if params.transformer_type == 'adaptive':
base_word_encoding, (features['ponder_times'], features['remainders']) = base_word_encoding
# print('ACT Graph nodes:')
# for t in tf.get_default_graph().as_graph_def().node:
# if 'n_updates' in t.name:
# print(t.name)
logits = tf.layers.dense(base_word_encoding, len(params.boundary2id), name='cb_logits')
if mode == tf.estimator.ModeKeys.TRAIN:
logits = tf.layers.dropout(logits, rate=params.drop_prob)
return logits, base_word_encoding
def loss(features, labels, logits, params):
if 'concept' in params.modules:
boundary_eval_mask = tf.sequence_mask(tf.squeeze(features['doclen']), maxlen=params.max_doc_len, dtype=tf.float32)
# weight non-O classes x10
class_weights = (tf.to_float(tf.not_equal(labels['boundary_labels'], params.boundary2id['O'])) + 1.) * 10.
boundary_eval_mask *= class_weights
boundary_loss = tf.losses.sparse_softmax_cross_entropy(labels['boundary_labels'],
logits,
weights=boundary_eval_mask,
scope='boundary')
tf.summary.scalar('boundary_loss', boundary_loss)
if params.transformer_type == 'adaptive':
act_loss = tf.reduce_mean(features['ponder_times'] + features['remainders'])
tf.summary.scalar('boundary_act_loss', act_loss)
boundary_loss += params.act_loss_param * act_loss
return boundary_loss
else:
return 0.
def evaluation(features, labels, predictions, params, eval_metric_ops):
boundary_eval_mask = tf.sequence_mask(tf.squeeze(features['doclen']), maxlen=params.max_doc_len, dtype=tf.float32)
f1s = []
for name, i in params.boundary2id.iteritems():
labels_i = tf.equal(labels['boundary_labels'], i)
predictions_i = tf.equal(predictions, i)
r = tf.metrics.recall(labels_i, predictions_i, weights=boundary_eval_mask)
p = tf.metrics.precision(labels_i, predictions_i, weights=boundary_eval_mask)
f1 = f1_metric(p, r)
if name != "O":
f1s.append(f1[0])
eval_metric_ops['%s-f1' % name] = f1
if params.verbose_eval:
eval_metric_ops['%s-acc' % name] = tf.metrics.accuracy(labels_i, predictions_i, weights=boundary_eval_mask)
eval_metric_ops['%s-recall' % name] = r
eval_metric_ops['%s-prec' % name] = p
macro_avg_f1 = tf.reduce_mean(tf.stack(f1s))
eval_metric_ops['boundary-f1'] = macro_avg_f1, macro_avg_f1 # tf.group(f1_update_ops)
def boundary_predictions_to_labels(boundary_predictions, features, params):
# TODO: fill features['activity_labels'] and features['other_labels'] with predictions
raise NotImplementedError()
def f1_metric(prec_metric, recall_metric):
p, r = prec_metric[0], recall_metric[0]
f1 = 2 * p * r / (p + r + 1e-5)
return f1, tf.group(prec_metric[1], recall_metric[1])
def seqlen(idxs, dtype=tf.int32):
return tf.reduce_sum(tf.cast(tf.greater(idxs, 0), dtype), axis=-1)
| [
"tensorflow.equal",
"tensorflow.get_variable",
"tensorflow.group",
"tensorflow.reduce_mean",
"transformers.base_transformer_params",
"transformers.create",
"tensorflow.metrics.accuracy",
"tensorflow.not_equal",
"tensorflow.layers.dropout",
"tensorflow.greater",
"tensorflow.summary.scalar",
"transformers.model_utils.get_padding_bias",
"tensorflow.stack",
"tensorflow.one_hot",
"tensorflow.variable_scope",
"tensorflow.metrics.recall",
"tensorflow.ones",
"tensorflow.metrics.precision",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.squeeze"
] | [((301, 338), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""base_transformer"""'], {}), "('base_transformer')\n", (318, 338), True, 'import tensorflow as tf\n'), ((354, 398), 'transformers.base_transformer_params', 'transformers.base_transformer_params', (['params'], {}), '(params)\n', (390, 398), False, 'import transformers\n'), ((422, 464), 'transformers.create', 'transformers.create', (['params', 'tparams', 'mode'], {}), '(params, tparams, mode)\n', (441, 464), False, 'import transformers\n'), ((1723, 1771), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['logits'], {'rate': 'params.drop_prob'}), '(logits, rate=params.drop_prob)\n', (1740, 1771), True, 'import tensorflow as tf\n'), ((2210, 2333), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', (["labels['boundary_labels']", 'logits'], {'weights': 'boundary_eval_mask', 'scope': '"""boundary"""'}), "(labels['boundary_labels'], logits,\n weights=boundary_eval_mask, scope='boundary')\n", (2248, 2333), True, 'import tensorflow as tf\n'), ((2511, 2560), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""boundary_loss"""', 'boundary_loss'], {}), "('boundary_loss', boundary_loss)\n", (2528, 2560), True, 'import tensorflow as tf\n'), ((2962, 2992), 'tensorflow.squeeze', 'tf.squeeze', (["features['doclen']"], {}), "(features['doclen'])\n", (2972, 2992), True, 'import tensorflow as tf\n'), ((3114, 3152), 'tensorflow.equal', 'tf.equal', (["labels['boundary_labels']", 'i'], {}), "(labels['boundary_labels'], i)\n", (3122, 3152), True, 'import tensorflow as tf\n'), ((3173, 3197), 'tensorflow.equal', 'tf.equal', (['predictions', 'i'], {}), '(predictions, i)\n', (3181, 3197), True, 'import tensorflow as tf\n'), ((3206, 3276), 'tensorflow.metrics.recall', 'tf.metrics.recall', (['labels_i', 'predictions_i'], {'weights': 'boundary_eval_mask'}), '(labels_i, predictions_i, weights=boundary_eval_mask)\n', (3223, 3276), True, 'import tensorflow as tf\n'), ((3285, 3358), 'tensorflow.metrics.precision', 'tf.metrics.precision', (['labels_i', 'predictions_i'], {'weights': 'boundary_eval_mask'}), '(labels_i, predictions_i, weights=boundary_eval_mask)\n', (3305, 3358), True, 'import tensorflow as tf\n'), ((3734, 3747), 'tensorflow.stack', 'tf.stack', (['f1s'], {}), '(f1s)\n', (3742, 3747), True, 'import tensorflow as tf\n'), ((4169, 4211), 'tensorflow.group', 'tf.group', (['prec_metric[1]', 'recall_metric[1]'], {}), '(prec_metric[1], recall_metric[1])\n', (4177, 4211), True, 'import tensorflow as tf\n'), ((785, 841), 'tensorflow.one_hot', 'tf.one_hot', (['word_len', 'params.max_doc_len'], {'dtype': 'tf.int64'}), '(word_len, params.max_doc_len, dtype=tf.int64)\n', (795, 841), True, 'import tensorflow as tf\n'), ((1033, 1094), 'tensorflow.get_variable', 'tf.get_variable', (['"""a_emb"""'], {'shape': '[1, 1, params.embedding_size]'}), "('a_emb', shape=[1, 1, params.embedding_size])\n", (1048, 1094), True, 'import tensorflow as tf\n'), ((1931, 1961), 'tensorflow.squeeze', 'tf.squeeze', (["features['doclen']"], {}), "(features['doclen'])\n", (1941, 1961), True, 'import tensorflow as tf\n'), ((2624, 2689), 'tensorflow.reduce_mean', 'tf.reduce_mean', (["(features['ponder_times'] + features['remainders'])"], {}), "(features['ponder_times'] + features['remainders'])\n", (2638, 2689), True, 'import tensorflow as tf\n'), ((2696, 2744), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""boundary_act_loss"""', 'act_loss'], {}), "('boundary_act_loss', act_loss)\n", (2713, 2744), True, 'import tensorflow as tf\n'), ((3539, 3611), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['labels_i', 'predictions_i'], {'weights': 'boundary_eval_mask'}), '(labels_i, predictions_i, weights=boundary_eval_mask)\n', (3558, 3611), True, 'import tensorflow as tf\n'), ((4279, 4298), 'tensorflow.greater', 'tf.greater', (['idxs', '(0)'], {}), '(idxs, 0)\n', (4289, 4298), True, 'import tensorflow as tf\n'), ((890, 909), 'tensorflow.equal', 'tf.equal', (['onehot', '(0)'], {}), '(onehot, 0)\n', (898, 909), True, 'import tensorflow as tf\n'), ((1174, 1222), 'transformers.model_utils.get_padding_bias', 'transformers.model_utils.get_padding_bias', (['words'], {}), '(words)\n', (1215, 1222), False, 'import transformers\n'), ((2072, 2136), 'tensorflow.not_equal', 'tf.not_equal', (["labels['boundary_labels']", "params.boundary2id['O']"], {}), "(labels['boundary_labels'], params.boundary2id['O'])\n", (2084, 2136), True, 'import tensorflow as tf\n'), ((603, 641), 'tensorflow.ones', 'tf.ones', (['[words.shape[0], 1]', 'tf.int64'], {}), '([words.shape[0], 1], tf.int64)\n', (610, 641), True, 'import tensorflow as tf\n')] |
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os
import re
import defusedxml.ElementTree as ET
import gcs_async
from github import models
import log_parser
import view_base
def parse_junit(xml, filename):
"""Generate failed tests as a series of (name, duration, text, filename) tuples."""
tree = ET.fromstring(xml)
if tree.tag == 'testsuite':
for child in tree:
name = child.attrib['name']
time = float(child.attrib['time'])
for param in child.findall('failure'):
yield name, time, param.text, filename
elif tree.tag == 'testsuites':
for testsuite in tree:
suite_name = testsuite.attrib['name']
for child in testsuite.findall('testcase'):
name = '%s %s' % (suite_name, child.attrib['name'])
time = float(child.attrib['time'])
for param in child.findall('failure'):
yield name, time, param.text, filename
else:
logging.error('unable to find failures, unexpected tag %s', tree.tag)
@view_base.memcache_memoize('build-details://', expires=60 * 60 * 4)
def build_details(build_dir):
"""
Collect information from a build directory.
Args:
build_dir: GCS path containing a build's results.
Returns:
started: value from started.json {'version': ..., 'timestamp': ...}
finished: value from finished.json {'timestamp': ..., 'result': ...}
failures: list of (name, duration, text) tuples
build_log: a hilighted portion of errors in the build log. May be None.
"""
started_fut = gcs_async.read(build_dir + '/started.json')
finished = gcs_async.read(build_dir + '/finished.json').get_result()
started = started_fut.get_result()
if finished and not started:
started = 'null'
if started and not finished:
finished = 'null'
elif not (started and finished):
return
started = json.loads(started)
finished = json.loads(finished)
failures = []
junit_paths = [f.filename for f in view_base.gcs_ls('%s/artifacts' % build_dir)
if re.match(r'junit_.*\.xml', os.path.basename(f.filename))]
junit_futures = {}
for f in junit_paths:
junit_futures[gcs_async.read(f)] = f
for future in junit_futures:
junit = future.get_result()
if junit is None:
continue
failures.extend(parse_junit(junit, junit_futures[future]))
failures.sort()
build_log = None
if finished and finished.get('result') != 'SUCCESS' and len(failures) == 0:
build_log = gcs_async.read(build_dir + '/build-log.txt').get_result()
if build_log:
build_log = log_parser.digest(build_log.decode('utf8', 'replace'))
logging.info('fallback log parser emitted %d lines',
build_log.count('\n'))
return started, finished, failures, build_log
class BuildHandler(view_base.BaseHandler):
"""Show information about a Build and its failing tests."""
def get(self, prefix, job, build):
self.check_bucket(prefix)
job_dir = '/%s/%s/' % (prefix, job)
build_dir = job_dir + build
details = build_details(build_dir)
if not details:
logging.warning('unable to load %s', build_dir)
self.render('build_404.html', {"build_dir": build_dir})
self.response.set_status(404)
return
started, finished, failures, build_log = details
if started:
commit = started['version'].split('+')[-1]
else:
commit = None
pr = None
pr_digest = None
if prefix.startswith(view_base.PR_PREFIX):
pr = os.path.basename(prefix)
pr_digest = models.GHIssueDigest.get('kubernetes/kubernetes', pr)
self.render('build.html', dict(
job_dir=job_dir, build_dir=build_dir, job=job, build=build,
commit=commit, started=started, finished=finished,
failures=failures, build_log=build_log, pr=pr, pr_digest=pr_digest))
class BuildListHandler(view_base.BaseHandler):
"""Show a list of Builds for a Job."""
def get(self, prefix, job):
self.check_bucket(prefix)
job_dir = '/%s/%s/' % (prefix, job)
fstats = view_base.gcs_ls(job_dir)
fstats.sort(key=lambda f: view_base.pad_numbers(f.filename),
reverse=True)
self.render('build_list.html',
dict(job=job, job_dir=job_dir, fstats=fstats))
class JobListHandler(view_base.BaseHandler):
"""Show a list of Jobs in a directory."""
def get(self, prefix):
self.check_bucket(prefix)
jobs_dir = '/%s' % prefix
fstats = view_base.gcs_ls(jobs_dir)
fstats.sort()
self.render('job_list.html', dict(jobs_dir=jobs_dir, fstats=fstats))
| [
"json.loads",
"defusedxml.ElementTree.fromstring",
"view_base.memcache_memoize",
"logging.warning",
"view_base.gcs_ls",
"os.path.basename",
"github.models.GHIssueDigest.get",
"logging.error",
"gcs_async.read",
"view_base.pad_numbers"
] | [((1672, 1739), 'view_base.memcache_memoize', 'view_base.memcache_memoize', (['"""build-details://"""'], {'expires': '(60 * 60 * 4)'}), "('build-details://', expires=60 * 60 * 4)\n", (1698, 1739), False, 'import view_base\n'), ((905, 923), 'defusedxml.ElementTree.fromstring', 'ET.fromstring', (['xml'], {}), '(xml)\n', (918, 923), True, 'import defusedxml.ElementTree as ET\n'), ((2223, 2266), 'gcs_async.read', 'gcs_async.read', (["(build_dir + '/started.json')"], {}), "(build_dir + '/started.json')\n", (2237, 2266), False, 'import gcs_async\n'), ((2562, 2581), 'json.loads', 'json.loads', (['started'], {}), '(started)\n', (2572, 2581), False, 'import json\n'), ((2597, 2617), 'json.loads', 'json.loads', (['finished'], {}), '(finished)\n', (2607, 2617), False, 'import json\n'), ((4925, 4950), 'view_base.gcs_ls', 'view_base.gcs_ls', (['job_dir'], {}), '(job_dir)\n', (4941, 4950), False, 'import view_base\n'), ((5365, 5391), 'view_base.gcs_ls', 'view_base.gcs_ls', (['jobs_dir'], {}), '(jobs_dir)\n', (5381, 5391), False, 'import view_base\n'), ((1599, 1668), 'logging.error', 'logging.error', (['"""unable to find failures, unexpected tag %s"""', 'tree.tag'], {}), "('unable to find failures, unexpected tag %s', tree.tag)\n", (1612, 1668), False, 'import logging\n'), ((2282, 2326), 'gcs_async.read', 'gcs_async.read', (["(build_dir + '/finished.json')"], {}), "(build_dir + '/finished.json')\n", (2296, 2326), False, 'import gcs_async\n'), ((2676, 2720), 'view_base.gcs_ls', 'view_base.gcs_ls', (["('%s/artifacts' % build_dir)"], {}), "('%s/artifacts' % build_dir)\n", (2692, 2720), False, 'import view_base\n'), ((2873, 2890), 'gcs_async.read', 'gcs_async.read', (['f'], {}), '(f)\n', (2887, 2890), False, 'import gcs_async\n'), ((3885, 3932), 'logging.warning', 'logging.warning', (['"""unable to load %s"""', 'build_dir'], {}), "('unable to load %s', build_dir)\n", (3900, 3932), False, 'import logging\n'), ((4347, 4371), 'os.path.basename', 'os.path.basename', (['prefix'], {}), '(prefix)\n', (4363, 4371), False, 'import os\n'), ((4396, 4449), 'github.models.GHIssueDigest.get', 'models.GHIssueDigest.get', (['"""kubernetes/kubernetes"""', 'pr'], {}), "('kubernetes/kubernetes', pr)\n", (4420, 4449), False, 'from github import models\n'), ((2770, 2798), 'os.path.basename', 'os.path.basename', (['f.filename'], {}), '(f.filename)\n', (2786, 2798), False, 'import os\n'), ((3222, 3266), 'gcs_async.read', 'gcs_async.read', (["(build_dir + '/build-log.txt')"], {}), "(build_dir + '/build-log.txt')\n", (3236, 3266), False, 'import gcs_async\n'), ((4985, 5018), 'view_base.pad_numbers', 'view_base.pad_numbers', (['f.filename'], {}), '(f.filename)\n', (5006, 5018), False, 'import view_base\n')] |
"""django_py_grader URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from py_grader import views
from py_grader.handler import add_grading_methods_to_db
add_grading_methods_to_db()
urlpatterns = [
# site utils
path('admin/', admin.site.urls),
# student pages
path('', views.index, name='index'),
path('submit/', views.submit, name='submit'),
path('submit_get/', views.submit_get, name='submit_get'),
path('submit/<str:assignment_name>/', views.submit_assignment, name='submit_assignment'),
path('view_submission_result/', views.view_any_submission_result, name='view_any_submission_result'),
path('view_submission_result_get/', views.view_any_submission_result_get, name='view_any_submission_result_get'),
path('view_submission_result/<int:submission_id>/', views.view_submission_result, name='view_submission_result'),
# grader pages
path('grader/', views.grader_index, name='grader_index'),
path('grader/test_submit/', views.test_submit, name='test_submit'),
path('grader/test_submit_get/', views.test_submit_get, name='test_submit_get'),
path('grader/test_submit/<str:assignment_name>/', views.test_submit_assignment, name='test_submit_assignment'),
path('grader/view_assignment_results/', views.view_results, name='view_results'),
path('grader/view_assignment_results_get/', views.view_results_get, name='view_results_get'),
path('grader/view_assignment_results/<str:assignment_name>/', views.view_assignment_results,
name='view_assignment_results'),
path('grader/create_assignment/', views.create_assignment, name='create_assignment'),
path('grader/assignments/', views.assignments, name='assignments'),
path('grader/add_test_case/', views.add_any_test_case, name='add_any_test_case'),
path('grader/add_any_test_case_get/', views.add_any_test_case_get, name='add_any_test_case_get'),
path('grader/add_test_case/<str:assignment_name>', views.add_test_case, name='add_test_case'),
path('grader/manage_net_ids/', views.manage_net_ids, name='manage_net_ids'),
path('grader/manage_net_ids/add_net_id/', views.add_net_id, name='add_net_id'),
path('grader/manage_net_ids/remove_net_id/', views.remove_net_id, name='remove_net_id'),
path('grader/manage_net_ids/upload_net_id_csv/', views.upload_net_id_csv, name='upload_net_id_csv'),
path('grader/manage_net_ids/clear_net_id/', views.clear_net_id, name='clear_net_id'),
path('accounts/', include('django.contrib.auth.urls'))
]
| [
"py_grader.handler.add_grading_methods_to_db",
"django.urls.path",
"django.urls.include"
] | [((797, 824), 'py_grader.handler.add_grading_methods_to_db', 'add_grading_methods_to_db', ([], {}), '()\n', (822, 824), False, 'from py_grader.handler import add_grading_methods_to_db\n'), ((857, 888), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (861, 888), False, 'from django.urls import include, path\n'), ((909, 944), 'django.urls.path', 'path', (['""""""', 'views.index'], {'name': '"""index"""'}), "('', views.index, name='index')\n", (913, 944), False, 'from django.urls import include, path\n'), ((948, 992), 'django.urls.path', 'path', (['"""submit/"""', 'views.submit'], {'name': '"""submit"""'}), "('submit/', views.submit, name='submit')\n", (952, 992), False, 'from django.urls import include, path\n'), ((995, 1051), 'django.urls.path', 'path', (['"""submit_get/"""', 'views.submit_get'], {'name': '"""submit_get"""'}), "('submit_get/', views.submit_get, name='submit_get')\n", (999, 1051), False, 'from django.urls import include, path\n'), ((1054, 1147), 'django.urls.path', 'path', (['"""submit/<str:assignment_name>/"""', 'views.submit_assignment'], {'name': '"""submit_assignment"""'}), "('submit/<str:assignment_name>/', views.submit_assignment, name=\n 'submit_assignment')\n", (1058, 1147), False, 'from django.urls import include, path\n'), ((1146, 1251), 'django.urls.path', 'path', (['"""view_submission_result/"""', 'views.view_any_submission_result'], {'name': '"""view_any_submission_result"""'}), "('view_submission_result/', views.view_any_submission_result, name=\n 'view_any_submission_result')\n", (1150, 1251), False, 'from django.urls import include, path\n'), ((1249, 1365), 'django.urls.path', 'path', (['"""view_submission_result_get/"""', 'views.view_any_submission_result_get'], {'name': '"""view_any_submission_result_get"""'}), "('view_submission_result_get/', views.view_any_submission_result_get,\n name='view_any_submission_result_get')\n", (1253, 1365), False, 'from django.urls import include, path\n'), ((1364, 1481), 'django.urls.path', 'path', (['"""view_submission_result/<int:submission_id>/"""', 'views.view_submission_result'], {'name': '"""view_submission_result"""'}), "('view_submission_result/<int:submission_id>/', views.\n view_submission_result, name='view_submission_result')\n", (1368, 1481), False, 'from django.urls import include, path\n'), ((1496, 1552), 'django.urls.path', 'path', (['"""grader/"""', 'views.grader_index'], {'name': '"""grader_index"""'}), "('grader/', views.grader_index, name='grader_index')\n", (1500, 1552), False, 'from django.urls import include, path\n'), ((1556, 1622), 'django.urls.path', 'path', (['"""grader/test_submit/"""', 'views.test_submit'], {'name': '"""test_submit"""'}), "('grader/test_submit/', views.test_submit, name='test_submit')\n", (1560, 1622), False, 'from django.urls import include, path\n'), ((1625, 1703), 'django.urls.path', 'path', (['"""grader/test_submit_get/"""', 'views.test_submit_get'], {'name': '"""test_submit_get"""'}), "('grader/test_submit_get/', views.test_submit_get, name='test_submit_get')\n", (1629, 1703), False, 'from django.urls import include, path\n'), ((1706, 1821), 'django.urls.path', 'path', (['"""grader/test_submit/<str:assignment_name>/"""', 'views.test_submit_assignment'], {'name': '"""test_submit_assignment"""'}), "('grader/test_submit/<str:assignment_name>/', views.\n test_submit_assignment, name='test_submit_assignment')\n", (1710, 1821), False, 'from django.urls import include, path\n'), ((1820, 1905), 'django.urls.path', 'path', (['"""grader/view_assignment_results/"""', 'views.view_results'], {'name': '"""view_results"""'}), "('grader/view_assignment_results/', views.view_results, name='view_results'\n )\n", (1824, 1905), False, 'from django.urls import include, path\n'), ((1903, 2000), 'django.urls.path', 'path', (['"""grader/view_assignment_results_get/"""', 'views.view_results_get'], {'name': '"""view_results_get"""'}), "('grader/view_assignment_results_get/', views.view_results_get, name=\n 'view_results_get')\n", (1907, 2000), False, 'from django.urls import include, path\n'), ((1998, 2127), 'django.urls.path', 'path', (['"""grader/view_assignment_results/<str:assignment_name>/"""', 'views.view_assignment_results'], {'name': '"""view_assignment_results"""'}), "('grader/view_assignment_results/<str:assignment_name>/', views.\n view_assignment_results, name='view_assignment_results')\n", (2002, 2127), False, 'from django.urls import include, path\n'), ((2132, 2221), 'django.urls.path', 'path', (['"""grader/create_assignment/"""', 'views.create_assignment'], {'name': '"""create_assignment"""'}), "('grader/create_assignment/', views.create_assignment, name=\n 'create_assignment')\n", (2136, 2221), False, 'from django.urls import include, path\n'), ((2219, 2285), 'django.urls.path', 'path', (['"""grader/assignments/"""', 'views.assignments'], {'name': '"""assignments"""'}), "('grader/assignments/', views.assignments, name='assignments')\n", (2223, 2285), False, 'from django.urls import include, path\n'), ((2288, 2373), 'django.urls.path', 'path', (['"""grader/add_test_case/"""', 'views.add_any_test_case'], {'name': '"""add_any_test_case"""'}), "('grader/add_test_case/', views.add_any_test_case, name='add_any_test_case'\n )\n", (2292, 2373), False, 'from django.urls import include, path\n'), ((2371, 2472), 'django.urls.path', 'path', (['"""grader/add_any_test_case_get/"""', 'views.add_any_test_case_get'], {'name': '"""add_any_test_case_get"""'}), "('grader/add_any_test_case_get/', views.add_any_test_case_get, name=\n 'add_any_test_case_get')\n", (2375, 2472), False, 'from django.urls import include, path\n'), ((2470, 2567), 'django.urls.path', 'path', (['"""grader/add_test_case/<str:assignment_name>"""', 'views.add_test_case'], {'name': '"""add_test_case"""'}), "('grader/add_test_case/<str:assignment_name>', views.add_test_case,\n name='add_test_case')\n", (2474, 2567), False, 'from django.urls import include, path\n'), ((2567, 2642), 'django.urls.path', 'path', (['"""grader/manage_net_ids/"""', 'views.manage_net_ids'], {'name': '"""manage_net_ids"""'}), "('grader/manage_net_ids/', views.manage_net_ids, name='manage_net_ids')\n", (2571, 2642), False, 'from django.urls import include, path\n'), ((2645, 2723), 'django.urls.path', 'path', (['"""grader/manage_net_ids/add_net_id/"""', 'views.add_net_id'], {'name': '"""add_net_id"""'}), "('grader/manage_net_ids/add_net_id/', views.add_net_id, name='add_net_id')\n", (2649, 2723), False, 'from django.urls import include, path\n'), ((2726, 2818), 'django.urls.path', 'path', (['"""grader/manage_net_ids/remove_net_id/"""', 'views.remove_net_id'], {'name': '"""remove_net_id"""'}), "('grader/manage_net_ids/remove_net_id/', views.remove_net_id, name=\n 'remove_net_id')\n", (2730, 2818), False, 'from django.urls import include, path\n'), ((2816, 2919), 'django.urls.path', 'path', (['"""grader/manage_net_ids/upload_net_id_csv/"""', 'views.upload_net_id_csv'], {'name': '"""upload_net_id_csv"""'}), "('grader/manage_net_ids/upload_net_id_csv/', views.upload_net_id_csv,\n name='upload_net_id_csv')\n", (2820, 2919), False, 'from django.urls import include, path\n'), ((2918, 3007), 'django.urls.path', 'path', (['"""grader/manage_net_ids/clear_net_id/"""', 'views.clear_net_id'], {'name': '"""clear_net_id"""'}), "('grader/manage_net_ids/clear_net_id/', views.clear_net_id, name=\n 'clear_net_id')\n", (2922, 3007), False, 'from django.urls import include, path\n'), ((3024, 3059), 'django.urls.include', 'include', (['"""django.contrib.auth.urls"""'], {}), "('django.contrib.auth.urls')\n", (3031, 3059), False, 'from django.urls import include, path\n')] |
'''
XData.py
Data object for holding a dense matrix X of real 64-bit floating point numbers,
Each row of X represents a single observation.
Example
--------
>> import numpy as np
>> from bnpy.data import XData
>> X = np.random.randn(1000, 3) # Create 1000x3 matrix
>> myData = XData(X)
>> print myData.nObs
1000
>> print myData.D
3
>> print myData.X.shape
(1000,3)
'''
import numpy as np
from .DataObj import DataObj
from .MinibatchIterator import MinibatchIterator
class XData(DataObj):
@classmethod
def read_from_mat(cls, matfilepath, nObsTotal=None, **kwargs):
''' Static Constructor for building an instance of XData from disk
'''
import scipy.io
InDict = scipy.io.loadmat( matfilepath, **kwargs)
if 'X' not in InDict:
raise KeyError('Stored matfile needs to have data in field named X')
return cls( InDict['X'], nObsTotal )
def __init__(self, X, nObsTotal=None, TrueZ=None):
''' Create an instance of XData given an array
Reallocation of memory may occur, to ensure that
X is a 2D numpy array with proper byteorder, contiguity, and ownership.
'''
X = np.asarray(X)
if X.ndim < 2:
X = X[np.newaxis,:]
self.X = np.float64(X.newbyteorder('=').copy())
self.set_dependent_params(nObsTotal=nObsTotal)
self.check_dims()
if TrueZ is not None:
self.addTrueLabels(TrueZ)
def addTrueLabels(self, TrueZ):
''' Adds a "true" discrete segmentation of this data,
so that each of the nObs items have a single label
'''
assert self.nObs == TrueZ.size
self.TrueLabels = TrueZ
def to_minibatch_iterator(self, **kwargs):
return MinibatchIterator(self, **kwargs)
######################################################### internal methods
#########################################################
def set_dependent_params( self, nObsTotal=None):
self.nObs = self.X.shape[0]
self.dim = self.X.shape[1]
if nObsTotal is None:
self.nObsTotal = self.nObs
else:
self.nObsTotal = nObsTotal
def check_dims( self ):
assert self.X.ndim == 2
assert self.X.flags.c_contiguous
assert self.X.flags.owndata
assert self.X.flags.aligned
assert self.X.flags.writeable
######################################################### DataObj operations
#########################################################
def select_subset_by_mask(self, mask, doTrackFullSize=True):
''' Creates new XData object by selecting certain rows (observations)
If doTrackFullSize is True,
ensure nObsTotal attribute is the same as the full dataset.
'''
if doTrackFullSize:
return XData(self.X[mask], nObsTotal=self.nObsTotal)
return XData(self.X[mask])
def add_data(self, XDataObj):
''' Updates (in-place) this object by adding new data
'''
if not self.dim == XDataObj.dim:
raise ValueError("Dimensions must match!")
self.nObs += XDataObj.nObs
self.nObsTotal += XDataObj.nObsTotal
self.X = np.vstack([self.X, XDataObj.X])
def get_random_sample(self, nObs, randstate=np.random):
nObs = np.minimum(nObs, self.nObs)
mask = randstate.permutation(self.nObs)[:nObs]
Data = self.select_subset_by_mask(mask, doTrackFullSize=False)
return Data
######################################################### I/O methods
#########################################################
def __str__(self):
np.set_printoptions(precision=5)
return self.X.__str__()
def summarize_num_observations(self):
return ' num obs: %d' % (self.nObsTotal)
| [
"numpy.set_printoptions",
"numpy.asarray",
"numpy.vstack",
"numpy.minimum"
] | [((1133, 1146), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (1143, 1146), True, 'import numpy as np\n'), ((3037, 3068), 'numpy.vstack', 'np.vstack', (['[self.X, XDataObj.X]'], {}), '([self.X, XDataObj.X])\n', (3046, 3068), True, 'import numpy as np\n'), ((3139, 3166), 'numpy.minimum', 'np.minimum', (['nObs', 'self.nObs'], {}), '(nObs, self.nObs)\n', (3149, 3166), True, 'import numpy as np\n'), ((3461, 3493), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)'}), '(precision=5)\n', (3480, 3493), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from dataflow.batch.views import (
custom_calculates_views,
custom_jobs_views,
data_makeup_views,
debug_views,
hdfs_views,
interactive_servers_views,
job_views,
processings_views,
views,
yarn_views,
)
router = DefaultRouter(trailing_slash=True)
router.register(r"hdfs/namenode", hdfs_views.NameNodeViewSet, basename="namenode")
router.register(r"hdfs/(?P<hdfs_ns_id>\w+)", hdfs_views.HDFSUtilViewSet, basename="hdfs_liststatus")
router.register(
r"hdfs/result_tables/(?P<result_table_id>\w+)",
hdfs_views.ResultTableViewSet,
basename="hdfs_result_table",
)
router.register(
r"yarn/(?P<yarn_id>\w+)/resourcemanager",
yarn_views.ResourceManagerViewSet,
basename="resourcemanager",
)
router.register(r"jobs", job_views.JobViewSet, basename="batch_job")
router.register(
r"jobs/(?P<job_id>\w+)/processings",
job_views.JobProcessingsViewSet,
basename="batch_job_processings",
)
router.register(r"processings", processings_views.ProcessingsViewSet, basename="batch_processings")
# router.register(r'adhoc', adhoc_views.ADHocViewSet, basename='batch_adhoc')
router.register(r"debugs", debug_views.DebugViewSet, basename="batch_debug")
router.register(
r"custom_calculates",
custom_calculates_views.CustomCalculatesViewSet,
basename="batch_recalculates",
)
router.register(r"cluster_config", views.ConfigViewSet, basename="batch_cluster_config")
router.register(r"data_makeup", data_makeup_views.DataMakeupViewSet, basename="batch_data_makeup")
router.register(
r"interactive_servers",
interactive_servers_views.InteractiveServersViewSet,
basename="batch_interactive_server",
)
router.register(
r"interactive_servers/(?P<server_id>\w+)/codes",
interactive_servers_views.InteractiveServersCodeViewSet,
basename="batch_interactive_server",
)
router.register(r"custom_jobs", custom_jobs_views.CustomJobsViewSet, basename="batch_custom_jobs")
urlpatterns = [
url(r"^", include(router.urls)),
url(
r"^healthz/$",
views.HealthCheckView.as_view({"get": "healthz"}),
name="batch_healthz",
),
url(r"^deploy/$", views.DeployView.as_view({"get": "deploy"}), name="batch_deploy"),
url(
r"^errorcodes/$",
views.BatchErrorCodesView.as_view({"get": "errorcodes"}),
name="batch_errorcodes",
),
]
| [
"dataflow.batch.views.views.HealthCheckView.as_view",
"dataflow.batch.views.views.BatchErrorCodesView.as_view",
"django.conf.urls.include",
"dataflow.batch.views.views.DeployView.as_view",
"rest_framework.routers.DefaultRouter"
] | [((1717, 1751), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {'trailing_slash': '(True)'}), '(trailing_slash=True)\n', (1730, 1751), False, 'from rest_framework.routers import DefaultRouter\n'), ((3442, 3462), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (3449, 3462), False, 'from django.conf.urls import include, url\n'), ((3505, 3554), 'dataflow.batch.views.views.HealthCheckView.as_view', 'views.HealthCheckView.as_view', (["{'get': 'healthz'}"], {}), "({'get': 'healthz'})\n", (3534, 3554), False, 'from dataflow.batch.views import custom_calculates_views, custom_jobs_views, data_makeup_views, debug_views, hdfs_views, interactive_servers_views, job_views, processings_views, views, yarn_views\n'), ((3615, 3658), 'dataflow.batch.views.views.DeployView.as_view', 'views.DeployView.as_view', (["{'get': 'deploy'}"], {}), "({'get': 'deploy'})\n", (3639, 3658), False, 'from dataflow.batch.views import custom_calculates_views, custom_jobs_views, data_makeup_views, debug_views, hdfs_views, interactive_servers_views, job_views, processings_views, views, yarn_views\n'), ((3725, 3781), 'dataflow.batch.views.views.BatchErrorCodesView.as_view', 'views.BatchErrorCodesView.as_view', (["{'get': 'errorcodes'}"], {}), "({'get': 'errorcodes'})\n", (3758, 3781), False, 'from dataflow.batch.views import custom_calculates_views, custom_jobs_views, data_makeup_views, debug_views, hdfs_views, interactive_servers_views, job_views, processings_views, views, yarn_views\n')] |
"""
Pydoctor API Runner
---------------------
Using pkg_resources, we attempt to see if pydoctor is installed,
if so, we use its cli program to compile the documents
"""
try:
import sys
import os
import shutil
import pkg_resources
pkg_resources.require("pydoctor")
from pydoctor.driver import main
sys.argv = '''pydoctor.py --quiet
--project-name=Pymodbus3
--project-url=http://uzumaxy.github.io/pymodbus3/
--add-package=../../../pymodbus3
--html-output=html
--html-write-function-pages --make-html'''.split()
print("Building Pydoctor API Documentation")
main(sys.argv[1:])
if os.path.exists('../../../build'):
shutil.move("html", "../../../build/pydoctor")
except:
print("Pydoctor unavailable...not building")
| [
"os.path.exists",
"pkg_resources.require",
"pydoctor.driver.main",
"shutil.move"
] | [((252, 285), 'pkg_resources.require', 'pkg_resources.require', (['"""pydoctor"""'], {}), "('pydoctor')\n", (273, 285), False, 'import pkg_resources\n'), ((634, 652), 'pydoctor.driver.main', 'main', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (638, 652), False, 'from pydoctor.driver import main\n'), ((661, 693), 'os.path.exists', 'os.path.exists', (['"""../../../build"""'], {}), "('../../../build')\n", (675, 693), False, 'import os\n'), ((703, 749), 'shutil.move', 'shutil.move', (['"""html"""', '"""../../../build/pydoctor"""'], {}), "('html', '../../../build/pydoctor')\n", (714, 749), False, 'import shutil\n')] |
# made by <NAME>
# <EMAIL>
from discordwebhook.app import main
if __name__ == '__main__':
main().main_loop()
| [
"discordwebhook.app.main"
] | [((96, 102), 'discordwebhook.app.main', 'main', ([], {}), '()\n', (100, 102), False, 'from discordwebhook.app import main\n')] |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
# Copyright 2007-2008 <NAME> <<EMAIL>>
""" Device-side action class used for implementing UPnP actions.
"""
from brisa.upnp.base_action import BaseAction, BaseArgument
from brisa.core import log
class InvalidActionOutput(Exception):
pass
class InvalidActionInput(Exception):
pass
class Argument(BaseArgument):
pass
class Action(BaseAction):
def __init__(self, service, name, arguments = []):
BaseAction.__init__(self, service, name, arguments)
self.run_function = self.run
def add_argument(self, argument):
""" Adds an argument to the action.
@param argument: the argument
@type argument: ArgumentDevice
"""
if argument:
self.arguments.append(argument)
def get_in_argument(self, name):
""" Returns the in argument with the given name.
@param name: argument name
@type name: string
@rtype: Argument
"""
for arg in self.arguments:
if arg.direction == Argument.IN and arg.name == name:
return arg
return None
def get_out_argument(self, name):
""" Returns the out argument with the given name.
@param name: argument name
@type name: string
@rtype: Argument
"""
for arg in self.arguments:
if arg.direction == Argument.OUT and arg.name == name:
return arg
return None
def __call__(self, *args, **kwargs):
log.debug('Entering at action %s __call__' % self.name)
# Update in arguments
in_kwargs = {}
log.debug('Updating IN variables')
for arg_name, arg_value in list(kwargs.items()):
if arg_name != '__header__':
arg = self.get_in_argument(arg_name)
if not arg:
log.error('Input argument "%s" not' \
' present on action definition.' \
% arg_name)
raise InvalidActionInput('Input argument "%s" not' \
' present on action definition.' \
% arg_name)
if arg.state_var:
arg.state_var.update(arg_value)
in_kwargs[arg_name] = arg_value
log.debug('Calling run function')
returned_kwargs = self.run_function(*(), **in_kwargs)
if not isinstance(returned_kwargs, dict):
msg = 'returned value from service function is not a dict.'
log.error(msg)
raise InvalidActionOutput(msg)
# Update out arguments
out_kwargs = {}
log.debug('Updating OUT variables')
for arg_name, arg_value in list(returned_kwargs.items()):
if arg_name != '__header__':
arg = self.get_out_argument(arg_name)
if not arg:
log.error('output contains argument "%s" not'\
' present on action definition' % \
arg_name)
raise InvalidActionOutput('output contains argument "%s" not'\
' present on action definition' % \
arg_name)
if arg.state_var:
arg.state_var.update(arg_value)
out_kwargs[arg_name] = arg_value
log.debug('Returning from action %s __call__' % self.name)
if "__header__" in out_kwargs:
header = out_kwargs.pop('__header__', {})
return {self.name + "Response": out_kwargs,
"__header__": header}
else:
return {self.name + "Response": out_kwargs}
def run(self, *args, **kwargs):
return {}
| [
"brisa.upnp.base_action.BaseAction.__init__",
"brisa.core.log.debug",
"brisa.core.log.error"
] | [((531, 582), 'brisa.upnp.base_action.BaseAction.__init__', 'BaseAction.__init__', (['self', 'service', 'name', 'arguments'], {}), '(self, service, name, arguments)\n', (550, 582), False, 'from brisa.upnp.base_action import BaseAction, BaseArgument\n'), ((1663, 1718), 'brisa.core.log.debug', 'log.debug', (["('Entering at action %s __call__' % self.name)"], {}), "('Entering at action %s __call__' % self.name)\n", (1672, 1718), False, 'from brisa.core import log\n'), ((1781, 1815), 'brisa.core.log.debug', 'log.debug', (['"""Updating IN variables"""'], {}), "('Updating IN variables')\n", (1790, 1815), False, 'from brisa.core import log\n'), ((2509, 2542), 'brisa.core.log.debug', 'log.debug', (['"""Calling run function"""'], {}), "('Calling run function')\n", (2518, 2542), False, 'from brisa.core import log\n'), ((2863, 2898), 'brisa.core.log.debug', 'log.debug', (['"""Updating OUT variables"""'], {}), "('Updating OUT variables')\n", (2872, 2898), False, 'from brisa.core import log\n'), ((3627, 3685), 'brisa.core.log.debug', 'log.debug', (["('Returning from action %s __call__' % self.name)"], {}), "('Returning from action %s __call__' % self.name)\n", (3636, 3685), False, 'from brisa.core import log\n'), ((2740, 2754), 'brisa.core.log.error', 'log.error', (['msg'], {}), '(msg)\n', (2749, 2754), False, 'from brisa.core import log\n'), ((2015, 2092), 'brisa.core.log.error', 'log.error', (['(\'Input argument "%s" not present on action definition.\' % arg_name)'], {}), '(\'Input argument "%s" not present on action definition.\' % arg_name)\n', (2024, 2092), False, 'from brisa.core import log\n'), ((3108, 3198), 'brisa.core.log.error', 'log.error', (['(\'output contains argument "%s" not present on action definition\' % arg_name)'], {}), '(\'output contains argument "%s" not present on action definition\' %\n arg_name)\n', (3117, 3198), False, 'from brisa.core import log\n')] |
from ctypes import c_void_p
from OpenGL.GL import (
GL_FLOAT, GL_FALSE,
glGenVertexArrays, glBindVertexArray, glDeleteVertexArrays,
glEnableVertexAttribArray, glVertexAttribPointer
)
class VertexArray():
def __init__(self):
self.id = glGenVertexArrays(1)
self.vertexBuffer = None
self.elementBuffer = None
def cleanup(self):
self.vertexBuffer.cleanup()
if self.elementBuffer:
self.elementBuffer.cleanup()
glDeleteVertexArrays(1, [self.id])
def bind(self):
glBindVertexArray(self.id)
def unbind(self):
glBindVertexArray(0)
def set_vertex_buffer(self, vertexBuffer):
self.bind()
vertexBuffer.bind()
#Iterate through layout's vertex buffer elements
stride = vertexBuffer.layout.stride
for i, element in enumerate(vertexBuffer.layout.vbElements):
glEnableVertexAttribArray(i)
glVertexAttribPointer(
i,
element.dataType,
GL_FLOAT,
GL_FALSE,
stride,
c_void_p(element.offset))
self.vertexBuffer = vertexBuffer
def set_element_buffer(self, elementBuffer):
self.bind()
elementBuffer.bind()
self.elementBuffer = elementBuffer
| [
"OpenGL.GL.glGenVertexArrays",
"OpenGL.GL.glBindVertexArray",
"OpenGL.GL.glEnableVertexAttribArray",
"ctypes.c_void_p",
"OpenGL.GL.glDeleteVertexArrays"
] | [((241, 261), 'OpenGL.GL.glGenVertexArrays', 'glGenVertexArrays', (['(1)'], {}), '(1)\n', (258, 261), False, 'from OpenGL.GL import GL_FLOAT, GL_FALSE, glGenVertexArrays, glBindVertexArray, glDeleteVertexArrays, glEnableVertexAttribArray, glVertexAttribPointer\n'), ((427, 461), 'OpenGL.GL.glDeleteVertexArrays', 'glDeleteVertexArrays', (['(1)', '[self.id]'], {}), '(1, [self.id])\n', (447, 461), False, 'from OpenGL.GL import GL_FLOAT, GL_FALSE, glGenVertexArrays, glBindVertexArray, glDeleteVertexArrays, glEnableVertexAttribArray, glVertexAttribPointer\n'), ((482, 508), 'OpenGL.GL.glBindVertexArray', 'glBindVertexArray', (['self.id'], {}), '(self.id)\n', (499, 508), False, 'from OpenGL.GL import GL_FLOAT, GL_FALSE, glGenVertexArrays, glBindVertexArray, glDeleteVertexArrays, glEnableVertexAttribArray, glVertexAttribPointer\n'), ((531, 551), 'OpenGL.GL.glBindVertexArray', 'glBindVertexArray', (['(0)'], {}), '(0)\n', (548, 551), False, 'from OpenGL.GL import GL_FLOAT, GL_FALSE, glGenVertexArrays, glBindVertexArray, glDeleteVertexArrays, glEnableVertexAttribArray, glVertexAttribPointer\n'), ((789, 817), 'OpenGL.GL.glEnableVertexAttribArray', 'glEnableVertexAttribArray', (['i'], {}), '(i)\n', (814, 817), False, 'from OpenGL.GL import GL_FLOAT, GL_FALSE, glGenVertexArrays, glBindVertexArray, glDeleteVertexArrays, glEnableVertexAttribArray, glVertexAttribPointer\n'), ((918, 942), 'ctypes.c_void_p', 'c_void_p', (['element.offset'], {}), '(element.offset)\n', (926, 942), False, 'from ctypes import c_void_p\n')] |
import os, django
import sys
path = os.path.dirname(os.path.abspath(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(path)
# print(sys.path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
django.setup()
import requests
import datetime
from state import models
import requests
import json
from urllib.request import Request, urlopen
import pandas as pd
def state():
url = "https://api.covidactnow.org/v2/states.timeseries.json?apiKey=<KEY>"
firefox_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
request = requests.get(url = url, headers = firefox_headers)
json_data = request.json()
json_list = []
for i in json_data:
json_dict = {}
json_dict["state"] = i['state']
json_dict["cases"] = i['actuals']['cases']
json_dict["cases_day"] = i['actualsTimeseries'][-4]['newCases']
cases_week = 0
deaths_week = 0
j = len(i['actualsTimeseries']) - 5
count = 0
while count < 7:
if i['actualsTimeseries'][j]['newCases'] == None:
cases_week = cases_week + 0
else:
cases_week = cases_week + i['actualsTimeseries'][j]['newCases']
if i['actualsTimeseries'][j]['newDeaths'] == None:
deaths_week = deaths_week + 0
else:
deaths_week = deaths_week + i['actualsTimeseries'][j]['newDeaths']
count = count + 1
j = j - 1
json_dict["cases_week"] = cases_week
json_dict["deaths"] = i['actuals']['deaths']
json_dict["deaths_day"] = i['actualsTimeseries'][-4]['newDeaths']
json_dict["deaths_week"] = deaths_week
json_dict["vac_adm"] = i['actuals']['vaccinesAdministered']
json_dict["vac_first"] = i['actuals']['vaccinationsInitiated']
json_dict["vac_second"] = i['actuals']['vaccinationsCompleted']
json_list.append(json_dict)
for i in json_list:
if i['deaths_day'] == None:
i['deaths_day'] = 0
if i['cases_day'] == None:
i['cases_day'] = 0
data = pd.DataFrame(json_list)
data = data.fillna(0)
for i in range(0, len(data)):
if models.State.objects.filter(state = data['state'][i]):
models.State.objects.filter(state = data['state'][i]).update(
cases = data['cases'][i],
cases_day = data['cases_day'][i],
cases_week = data['cases_week'][i],
deaths = data['deaths'][i],
deaths_day = data['deaths_day'][i],
deaths_week = data['deaths_week'][i],
vac_adm = data['vac_adm'][i],
vac_first = data['vac_first'][i],
vac_second = data['vac_second'][i]
)
else:
models.State.objects.create(
state = data['state'][i],
cases = data['cases'][i],
cases_day = data['cases_day'][i],
cases_week = data['cases_week'][i],
deaths = data['deaths'][i],
deaths_day = data['deaths_day'][i],
deaths_week = data['deaths_week'][i],
vac_adm = data['vac_adm'][i],
vac_first = data['vac_first'][i],
vac_second = data['vac_second'][i]
)
def main():
url = 'https://api.covidactnow.org/v2/states.timeseries.json?apiKey={}'.format('181793668aad431a9b54ccbb61004edb')
html = requests.get(url=url,verify=False)
for resu in html.json():
state = resu['state'] #State name
print(state
)
actualsTimeseries = resu['actualsTimeseries'] #get time series data
for actuals in actualsTimeseries:
newCases = actuals['newCases']
if not newCases:
newCases = 0
newDeaths = actuals['newDeaths']
if not newDeaths:
newDeaths = 0
detester = actuals['date']
date = datetime.datetime.strptime(detester,'%Y-%m-%d')
# print(date)
# print(models.Covid.objects.all())
if not models.Covid.objects.filter(date=date).filter(state=state): #Check if the data is already in the db
models.Covid.objects.create( #append data to db
date=date,
state=state,
Cases=newCases,
Deaths=newDeaths
)
if __name__ == '__main__':
main()
state()
| [
"os.environ.setdefault",
"state.models.State.objects.filter",
"django.setup",
"state.models.Covid.objects.filter",
"datetime.datetime.strptime",
"os.path.abspath",
"requests.get",
"state.models.State.objects.create",
"state.models.Covid.objects.create",
"pandas.DataFrame",
"sys.path.append"
] | [((116, 137), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (131, 137), False, 'import sys\n'), ((160, 227), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""project.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'project.settings')\n", (181, 227), False, 'import os, django\n'), ((229, 243), 'django.setup', 'django.setup', ([], {}), '()\n', (241, 243), False, 'import os, django\n'), ((631, 677), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'firefox_headers'}), '(url=url, headers=firefox_headers)\n', (643, 677), False, 'import requests\n'), ((2273, 2296), 'pandas.DataFrame', 'pd.DataFrame', (['json_list'], {}), '(json_list)\n', (2285, 2296), True, 'import pandas as pd\n'), ((3714, 3749), 'requests.get', 'requests.get', ([], {'url': 'url', 'verify': '(False)'}), '(url=url, verify=False)\n', (3726, 3749), False, 'import requests\n'), ((2381, 2432), 'state.models.State.objects.filter', 'models.State.objects.filter', ([], {'state': "data['state'][i]"}), "(state=data['state'][i])\n", (2408, 2432), False, 'from state import models\n'), ((86, 111), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import os, django\n'), ((3004, 3353), 'state.models.State.objects.create', 'models.State.objects.create', ([], {'state': "data['state'][i]", 'cases': "data['cases'][i]", 'cases_day': "data['cases_day'][i]", 'cases_week': "data['cases_week'][i]", 'deaths': "data['deaths'][i]", 'deaths_day': "data['deaths_day'][i]", 'deaths_week': "data['deaths_week'][i]", 'vac_adm': "data['vac_adm'][i]", 'vac_first': "data['vac_first'][i]", 'vac_second': "data['vac_second'][i]"}), "(state=data['state'][i], cases=data['cases'][i],\n cases_day=data['cases_day'][i], cases_week=data['cases_week'][i],\n deaths=data['deaths'][i], deaths_day=data['deaths_day'][i], deaths_week\n =data['deaths_week'][i], vac_adm=data['vac_adm'][i], vac_first=data[\n 'vac_first'][i], vac_second=data['vac_second'][i])\n", (3031, 3353), False, 'from state import models\n'), ((4257, 4305), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['detester', '"""%Y-%m-%d"""'], {}), "(detester, '%Y-%m-%d')\n", (4283, 4305), False, 'import datetime\n'), ((4520, 4610), 'state.models.Covid.objects.create', 'models.Covid.objects.create', ([], {'date': 'date', 'state': 'state', 'Cases': 'newCases', 'Deaths': 'newDeaths'}), '(date=date, state=state, Cases=newCases, Deaths=\n newDeaths)\n', (4547, 4610), False, 'from state import models\n'), ((2449, 2500), 'state.models.State.objects.filter', 'models.State.objects.filter', ([], {'state': "data['state'][i]"}), "(state=data['state'][i])\n", (2476, 2500), False, 'from state import models\n'), ((4401, 4439), 'state.models.Covid.objects.filter', 'models.Covid.objects.filter', ([], {'date': 'date'}), '(date=date)\n', (4428, 4439), False, 'from state import models\n')] |
import os
import logging
import math
from pyvivado import builder, interface, signal, project
from rfgnocchi import config, noc, ettus
logger = logging.getLogger(__name__)
if __name__ == '__main__':
config.setup_logging(logging.DEBUG)
builder = ettus.get_builder('e300')
directory = os.path.abspath('proj_qa_e300')
p = project.BuilderProject.create_or_update(
design_builders=[builder],
simulation_builders=[],
part='xc7z020clg484-1',
parameters={'factory_name': 'e300'},
directory=directory,
)
| [
"logging.getLogger",
"rfgnocchi.ettus.get_builder",
"rfgnocchi.config.setup_logging",
"os.path.abspath",
"pyvivado.project.BuilderProject.create_or_update"
] | [((147, 174), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (164, 174), False, 'import logging\n'), ((207, 242), 'rfgnocchi.config.setup_logging', 'config.setup_logging', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (227, 242), False, 'from rfgnocchi import config, noc, ettus\n'), ((257, 282), 'rfgnocchi.ettus.get_builder', 'ettus.get_builder', (['"""e300"""'], {}), "('e300')\n", (274, 282), False, 'from rfgnocchi import config, noc, ettus\n'), ((299, 330), 'os.path.abspath', 'os.path.abspath', (['"""proj_qa_e300"""'], {}), "('proj_qa_e300')\n", (314, 330), False, 'import os\n'), ((339, 520), 'pyvivado.project.BuilderProject.create_or_update', 'project.BuilderProject.create_or_update', ([], {'design_builders': '[builder]', 'simulation_builders': '[]', 'part': '"""xc7z020clg484-1"""', 'parameters': "{'factory_name': 'e300'}", 'directory': 'directory'}), "(design_builders=[builder],\n simulation_builders=[], part='xc7z020clg484-1', parameters={\n 'factory_name': 'e300'}, directory=directory)\n", (378, 520), False, 'from pyvivado import builder, interface, signal, project\n')] |
import argparse
import json
import logging
import os
from time import sleep, time
from typing import Any, Dict
import gym
import pybulletgym
from .gym_simulator import GymSimulator
log = logging.getLogger("PyBulletSimulator")
log.setLevel(level='INFO')
class PyBulletSimulator(GymSimulator):
""" GymSimulator class
End users should subclass GymSimulator to interface OpenAI Gym
environments to the Bonsai platform. The derived class should provide
the mapping between Bonsai and OpenAI environment's action and states and
specify the name of the OpenAI environemnt
"""
environment_name = '' # name of the OpenAI Gym environment specified in derived class
def __init__(self, iteration_limit=200, skip_frame=1):
""" Initializes the PyBulletSimulator object
"""
super().__init__(iteration_limit, skip_frame)
def make_environment(self, headless):
log.debug("Making PyBullet environment {}...".format(self.environment_name))
self._env = gym.make(self.environment_name)
if not headless:
self._env.render()
self._env.reset()
| [
"logging.getLogger",
"gym.make"
] | [((189, 227), 'logging.getLogger', 'logging.getLogger', (['"""PyBulletSimulator"""'], {}), "('PyBulletSimulator')\n", (206, 227), False, 'import logging\n'), ((1034, 1065), 'gym.make', 'gym.make', (['self.environment_name'], {}), '(self.environment_name)\n', (1042, 1065), False, 'import gym\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-10 11:13
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('distances', '0011_auto_20170602_1044'),
]
operations = [
migrations.AddField(
model_name='exercise',
name='sub_sport',
field=models.CharField(default='', max_length=25),
),
migrations.AlterField(
model_name='dates',
name='startDate',
field=models.DateField(default=datetime.datetime(2017, 6, 3, 14, 13, 55, 386022)),
),
migrations.AlterField(
model_name='exercise',
name='sport',
field=models.CharField(choices=[('Running', 'Running'), ('Skiing', 'Skiing'), ('Cycling', 'Cycling'), ('Walking', 'Walking'), ('Swimming', 'Swimming'), ('Other', 'Other')], default='Running', max_length=20),
),
]
| [
"datetime.datetime",
"django.db.models.CharField"
] | [((420, 463), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(25)'}), "(default='', max_length=25)\n", (436, 463), False, 'from django.db import migrations, models\n'), ((785, 994), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Running', 'Running'), ('Skiing', 'Skiing'), ('Cycling', 'Cycling'), (\n 'Walking', 'Walking'), ('Swimming', 'Swimming'), ('Other', 'Other')]", 'default': '"""Running"""', 'max_length': '(20)'}), "(choices=[('Running', 'Running'), ('Skiing', 'Skiing'), (\n 'Cycling', 'Cycling'), ('Walking', 'Walking'), ('Swimming', 'Swimming'),\n ('Other', 'Other')], default='Running', max_length=20)\n", (801, 994), False, 'from django.db import migrations, models\n'), ((612, 661), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(3)', '(14)', '(13)', '(55)', '(386022)'], {}), '(2017, 6, 3, 14, 13, 55, 386022)\n', (629, 661), False, 'import datetime\n')] |
import os
import importlib
from avalon import api, lib, style
from pype.api import Logger, resources
from pype.lib import (
ApplictionExecutableNotFound,
ApplicationLaunchFailed
)
from Qt import QtWidgets, QtGui
class ProjectManagerAction(api.Action):
name = "projectmanager"
label = "Project Manager"
icon = "gear"
order = 999 # at the end
def is_compatible(self, session):
return "AVALON_PROJECT" in session
def process(self, session, **kwargs):
return lib.launch(
executable="python",
args=[
"-u", "-m", "avalon.tools.projectmanager",
session['AVALON_PROJECT']
]
)
class LoaderAction(api.Action):
name = "loader"
label = "Loader"
icon = "cloud-download"
order = 998
def is_compatible(self, session):
return "AVALON_PROJECT" in session
def process(self, session, **kwargs):
return lib.launch(
executable="python",
args=[
"-u", "-m", "avalon.tools.loader", session['AVALON_PROJECT']
]
)
class LoaderLibrary(api.Action):
name = "loader_os"
label = "Library Loader"
icon = "book"
order = 997 # at the end
def is_compatible(self, session):
return True
def process(self, session, **kwargs):
return lib.launch(
executable="python",
args=["-u", "-m", "avalon.tools.libraryloader"]
)
def register_default_actions():
"""Register default actions for Launcher"""
api.register_plugin(api.Action, ProjectManagerAction)
api.register_plugin(api.Action, LoaderAction)
api.register_plugin(api.Action, LoaderLibrary)
def register_config_actions():
"""Register actions from the configuration for Launcher"""
module_name = os.environ["AVALON_CONFIG"]
config = importlib.import_module(module_name)
if not hasattr(config, "register_launcher_actions"):
print(
"Current configuration `%s` has no 'register_launcher_actions'"
% config.__name__
)
return
config.register_launcher_actions()
def register_actions_from_paths(paths):
if not paths:
return
for path in paths:
if not path:
continue
if path.startswith("."):
print((
"BUG: Relative paths are not allowed for security reasons. {}"
).format(path))
continue
if not os.path.exists(path):
print("Path was not found: {}".format(path))
continue
api.register_plugin_path(api.Action, path)
def register_environment_actions():
"""Register actions from AVALON_ACTIONS for Launcher."""
paths_str = os.environ.get("AVALON_ACTIONS") or ""
register_actions_from_paths(paths_str.split(os.pathsep))
class ApplicationAction(api.Action):
"""Pype's application launcher
Application action based on pype's ApplicationManager system.
"""
# Application object
application = None
# Action attributes
name = None
label = None
label_variant = None
group = None
icon = None
color = None
order = 0
_log = None
required_session_keys = (
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK"
)
@property
def log(self):
if self._log is None:
self._log = Logger().get_logger(self.__class__.__name__)
return self._log
def is_compatible(self, session):
for key in self.required_session_keys:
if key not in session:
return False
return True
def _show_message_box(self, title, message, details=None):
dialog = QtWidgets.QMessageBox()
icon = QtGui.QIcon(resources.pype_icon_filepath())
dialog.setWindowIcon(icon)
dialog.setStyleSheet(style.load_stylesheet())
dialog.setWindowTitle(title)
dialog.setText(message)
if details:
dialog.setDetailedText(details)
dialog.exec_()
def process(self, session, **kwargs):
"""Process the full Application action"""
project_name = session["AVALON_PROJECT"]
asset_name = session["AVALON_ASSET"]
task_name = session["AVALON_TASK"]
try:
self.application.launch(
project_name=project_name,
asset_name=asset_name,
task_name=task_name
)
except ApplictionExecutableNotFound as exc:
details = exc.details
msg = exc.msg
log_msg = str(msg)
if details:
log_msg += "\n" + details
self.log.warning(log_msg)
self._show_message_box(
"Application executable not found", msg, details
)
except ApplicationLaunchFailed as exc:
msg = str(exc)
self.log.warning(msg, exc_info=True)
self._show_message_box("Application launch failed", msg)
| [
"os.path.exists",
"avalon.lib.launch",
"importlib.import_module",
"os.environ.get",
"avalon.api.register_plugin_path",
"pype.api.Logger",
"Qt.QtWidgets.QMessageBox",
"pype.api.resources.pype_icon_filepath",
"avalon.style.load_stylesheet",
"avalon.api.register_plugin"
] | [((1581, 1634), 'avalon.api.register_plugin', 'api.register_plugin', (['api.Action', 'ProjectManagerAction'], {}), '(api.Action, ProjectManagerAction)\n', (1600, 1634), False, 'from avalon import api, lib, style\n'), ((1639, 1684), 'avalon.api.register_plugin', 'api.register_plugin', (['api.Action', 'LoaderAction'], {}), '(api.Action, LoaderAction)\n', (1658, 1684), False, 'from avalon import api, lib, style\n'), ((1689, 1735), 'avalon.api.register_plugin', 'api.register_plugin', (['api.Action', 'LoaderLibrary'], {}), '(api.Action, LoaderLibrary)\n', (1708, 1735), False, 'from avalon import api, lib, style\n'), ((1892, 1928), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1915, 1928), False, 'import importlib\n'), ((512, 624), 'avalon.lib.launch', 'lib.launch', ([], {'executable': '"""python"""', 'args': "['-u', '-m', 'avalon.tools.projectmanager', session['AVALON_PROJECT']]"}), "(executable='python', args=['-u', '-m',\n 'avalon.tools.projectmanager', session['AVALON_PROJECT']])\n", (522, 624), False, 'from avalon import api, lib, style\n'), ((960, 1064), 'avalon.lib.launch', 'lib.launch', ([], {'executable': '"""python"""', 'args': "['-u', '-m', 'avalon.tools.loader', session['AVALON_PROJECT']]"}), "(executable='python', args=['-u', '-m', 'avalon.tools.loader',\n session['AVALON_PROJECT']])\n", (970, 1064), False, 'from avalon import api, lib, style\n'), ((1380, 1465), 'avalon.lib.launch', 'lib.launch', ([], {'executable': '"""python"""', 'args': "['-u', '-m', 'avalon.tools.libraryloader']"}), "(executable='python', args=['-u', '-m', 'avalon.tools.libraryloader']\n )\n", (1390, 1465), False, 'from avalon import api, lib, style\n'), ((2620, 2662), 'avalon.api.register_plugin_path', 'api.register_plugin_path', (['api.Action', 'path'], {}), '(api.Action, path)\n', (2644, 2662), False, 'from avalon import api, lib, style\n'), ((2779, 2811), 'os.environ.get', 'os.environ.get', (['"""AVALON_ACTIONS"""'], {}), "('AVALON_ACTIONS')\n", (2793, 2811), False, 'import os\n'), ((3757, 3780), 'Qt.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (3778, 3780), False, 'from Qt import QtWidgets, QtGui\n'), ((2511, 2531), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2525, 2531), False, 'import os\n'), ((3808, 3838), 'pype.api.resources.pype_icon_filepath', 'resources.pype_icon_filepath', ([], {}), '()\n', (3836, 3838), False, 'from pype.api import Logger, resources\n'), ((3904, 3927), 'avalon.style.load_stylesheet', 'style.load_stylesheet', ([], {}), '()\n', (3925, 3927), False, 'from avalon import api, lib, style\n'), ((3436, 3444), 'pype.api.Logger', 'Logger', ([], {}), '()\n', (3442, 3444), False, 'from pype.api import Logger, resources\n')] |
import copy
import pickle
# from n^etworkx.algorithms.centrality.current_flow_betweenness import edge_current_flow_betweenness_centrality
from model.entity_quotes import *
from model.complete_quote import *
from model.utils import *
from itertools import *
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
ground_truth_path = config.get("Paths","ground_truth_path")
our_languages = ["en", "it", "de"]
our_languages.reverse()
folder = "data/ground_truth"
corpus_filename = "corpus_evaluation_subset.pkl"
ground_truth = dict()
clusters = dict()
create_subcorpus = False
ground_truth_entities = {"Q105167": "<NAME>", "Q57661": "<NAME>", "Q13424289": "<NAME>",
"Q7251": "<NAME>", "Q47365": "<NAME>", "Q7304": "<NAME>",
"Q8409": "<NAME>", "Q7186": "<NAME>"}
# collect pairs of aligned and pairs of unaligned quotations from ground truth
for filename in os.listdir(folder):
if filename.endswith(".tsv"):
wikidata_id = filename.replace(".tsv", "")
print("Wikidata ID", wikidata_id)
ground_truth[wikidata_id] = dict()
clusters[wikidata_id] = dict()
file = open(folder + "/" + filename)
all_texts = set()
ground_truth_tps = set()
ground_truth_tns = set()
ground_truth[wikidata_id]["tps"] = ground_truth_tps
ground_truth[wikidata_id]["tns"] = ground_truth_tns
ground_truth[wikidata_id]["all"] = all_texts
clusters[wikidata_id]["unclustered"] = set()
clusters[wikidata_id]["clustered"] = set()
clusters[wikidata_id]["all"] = set()
for line in file:
parts = line.split("\t")
parts = list(map(lambda s: s.strip(), parts))
for part in parts:
part = part.strip()
if part:
all_texts.add(part)
for text1 in parts:
text1 = text1.strip()
if not text1:
continue
for text2 in parts:
text2 = text2.strip()
if not text2:
continue
if text1 != text2:
# collect all pairs of quotations that are aligned
pair = frozenset((text1, text2))
ground_truth_tps.add(pair)
# collect all pairs of quotations that are not aligned
for text1 in all_texts:
for text2 in all_texts:
if text1 != text2:
pair = frozenset((text1, text2))
if pair not in ground_truth_tps:
ground_truth_tns.add(pair)
for tp in ground_truth[wikidata_id]["tps"]:
print("---")
for quote in tp:
print(quote)
print("Load corpus", flush=True)
with open(corpus_filename, "rb") as f:
corpus = pickle.load(f)
print("Corpus loaded.", flush=True)
completeQuotesInGT = dict()
# load quotation clusters from our data
for completeQuote in corpus.completeQuotes.values():
# completeEntity = completeQuote.entity
# wikidata_id = completeEntity.wikidata_id
wikidata_id = completeQuote.id.split("_")[0]
if wikidata_id not in ground_truth.keys():
continue
completeQuotesInGT.update({completeQuote.id: completeQuote})
quote_texts = set()
if "en" in completeQuote.quotes.keys() or "de" in completeQuote.quotes.keys() or "it" in completeQuote.quotes.keys():
print("###")
if len(completeQuote.quotes.keys()) > 1:
for lang, quotes in completeQuote.quotes.items():
for quote in quotes:
if hasattr(quote, "quote"):
print(lang,quote.quote)
elif hasattr(quote, "translation"):
print(lang, quote.translation.text)
else:
print(lang, quote.original.text)
print("###")
for lang, quotes in completeQuote.quotes.items():
for quote in quotes:
if lang not in our_languages:
continue
if hasattr(quote, "quote"):
quote_text = lang + ": " + quote.quote
elif hasattr(quote, "translation"):
quote_text = lang + ": " + quote.translation.text
else:
quote_text = lang + ": " + quote.translation.text
quote_text = quote_text.replace("\n", "")
if quote_text in ground_truth[wikidata_id]["all"]:
if quote_text not in clusters[wikidata_id]["all"]:
clusters[wikidata_id]["all"].add(quote_text)
quote_texts.add(quote_text)
else:
#print("$$$")
print("(", wikidata_id, ") Missing in ground truth:", quote_text)
for text1 in quote_texts:
for text2 in quote_texts:
if text1 == text2:
continue
pair = frozenset((text1, text2))
clusters[wikidata_id]["clustered"].add(pair)
if create_subcorpus:
corpus.completeQuotes = completeQuotesInGT
with open("corpus_evaluation_subset.pkl", "wb") as f:
pickle.dump(corpus, f)
# collect pairs of quotations that are not within the same cluster
for wikidata_id in clusters.keys():
for text1 in clusters[wikidata_id]["all"]:
for text2 in clusters[wikidata_id]["all"]:
if text1 != text2:
pair = frozenset((text1, text2))
if pair not in clusters[wikidata_id]["clustered"]:
clusters[wikidata_id]["unclustered"].add(pair)
# remove non-existing texts from ground truth (because we evaluate the clustering here, not the selection of quotations)
for wikidata_id in ground_truth.keys():
for text in ground_truth[wikidata_id]["all"]:
if text not in clusters[wikidata_id]["all"]:
print("(", wikidata_id, ") Missing in corpus:", text)
pairs_to_remove = set()
for pair in ground_truth[wikidata_id]["tps"]:
if text in pair:
pairs_to_remove.add(pair)
ground_truth[wikidata_id]["tps"].difference_update(pairs_to_remove)
pairs_to_remove = set()
for pair in ground_truth[wikidata_id]["tns"]:
if text in pair:
pairs_to_remove.add(pair)
ground_truth[wikidata_id]["tns"].difference_update(pairs_to_remove)
# evaluation
average_f_score = 0
average_p = 0
average_r = 0
tp_total = 0
tn_total = 0
fp_total = 0
fn_total = 0
for wikidata_id in ground_truth.keys():
print("wikidata_id:", wikidata_id)
tps = ground_truth[wikidata_id]["tps"]
tns = ground_truth[wikidata_id]["tns"]
clustered = clusters[wikidata_id]["clustered"]
unclustered = clusters[wikidata_id]["unclustered"]
print("Ground truth, tps:", len(tps))
print("Ground truth, tns:", len(tns))
print("Clustered:", len(clustered))
print("Unclustered:", len(unclustered))
print("tps+tns:", len(tps) + len(tns))
print("clustered+unclustered:", len(unclustered) + len(clustered))
# TP: How many pairs that are in tps are also in clustered?
tp = len(tps.intersection(clustered))
print("TP:", tp)
tp_total += tp
tn = len(tns.intersection(unclustered))
print("TN:", tn)
tn_total += tn
fp = len(clustered.difference(tps))
for cluster in clustered.difference(tps):
print("FP:",cluster)
print("FP:", fp)
fp_total += fp
fn = len(unclustered.difference(tns))
print("FN:", fn)
fn_total += fn
for cluster in unclustered.difference(tns):
print("FN:",cluster)
print("Sum", tp + fp + tn + fn)
if fp == 0:
p = 1
else:
p = tp / (tp + fp)
print("P", p)
average_p += p
r = tp / (tp + fn)
print("R", r)
average_r += r
if tp == 0 and (fn > 0 or fp > 0):
f = 0
else:
beta = 1
f = ((beta ** 2 + 1) * p * r) / (beta ** 2 * p + r)
average_f_score += f
# LaTeX output
print(ground_truth_entities[wikidata_id], "&", tp, "&", tn, "&", fp, "&", fn, "&", p, "&", r, "&", f)
print("")
average_p = average_p / len(ground_truth.keys())
average_r = average_r / len(ground_truth.keys())
average_f_score = average_f_score / len(ground_truth.keys())
print("Average F score: ", average_f_score)
print("Average precision: ", average_p)
print("Average recall: ", average_r)
print("TP, total: ", tp_total)
print("TN, total: ", tn_total)
print("FP, total: ", fp_total)
print("FN, total: ", fn_total)
# LaTeX output
print(tp_total, "&", tn_total, "&", fp_total, "&", fn_total, "&", average_p, "&", average_r, "&", average_f_score)
| [
"pickle.dump",
"pickle.load",
"configparser.ConfigParser"
] | [((287, 314), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (312, 314), False, 'import configparser\n'), ((2929, 2943), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2940, 2943), False, 'import pickle\n'), ((5235, 5257), 'pickle.dump', 'pickle.dump', (['corpus', 'f'], {}), '(corpus, f)\n', (5246, 5257), False, 'import pickle\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.