metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpegbert/text2vec",
"score": 3
} |
#### File: text2vec/tests/emb_w2v_test.py
```python
import text2vec
from text2vec.embeddings.word_embedding import WordEmbedding
def test_word_emb():
b = WordEmbedding()
data1 = '你 好 啊'.split(' ')
r = b.embed([data1], True)
print(r)
print(r.shape)
def test_oov_emb():
char = ','
result = text2vec.encode(char)
print(char, result)
char = '特价机票'
result = text2vec.encode(char)
print(char, result)
char = '特价'
result = text2vec.encode(char)
print(char, result)
char = '机票'
result = text2vec.encode(char)
print(char, result)
def test_oov_sim():
from text2vec import Similarity
sim = Similarity()
a = ','
b = '花'
s = sim.get_score(a, b)
print(a, b, s)
a = ',画画'
b = '花画画'
s = sim.get_score(a, b)
print(a, b, s)
a = ','
b = '花画画'
s = sim.get_score(a, b)
print(a, b, s)
a = ',机票'
b = '特价机票'
s = sim.get_score(a, b)
print(a, b, s)
a = '机票'
b = '特价机票'
s = sim.get_score(a, b)
print(a, b, s)
a = '机票'
b = '特价的机票'
s = sim.get_score(a, b)
print(a, b, s)
def test_sentence_emb():
char = '你'
result = text2vec.encode(char)
print(char, result)
char = '好'
result = text2vec.encode(char)
print(char, result)
char = '吗'
result = text2vec.encode(char)
print(char, result)
char = '你好'
result = text2vec.encode(char)
print(char, result)
char = '你好吗'
result = text2vec.encode(char)
print(char, result)
import numpy as np
emb = [text2vec.encode('你好'), text2vec.encode('吗')]
average = np.array(emb).sum(axis=0) / 2.0
print('average:', average)
act = text2vec.encode('你好吗')
if str(act) == str(average):
print("same")
else:
print('diff')
```
#### File: text2vec/text2vec/vector.py
```python
import os
pwd_path = os.path.abspath(os.path.dirname(__file__))
class EmbType(object):
BERT = 'bert'
W2V = 'w2v'
class Vector(object):
def __init__(self, embedding_type=EmbType.W2V,
w2v_path='',
w2v_kwargs=None,
sequence_length=128,
processor=None,
trainable=False,
bert_model_folder='',
bert_layer_nums=4):
self.embedding_type = embedding_type
self.w2v_path = w2v_path
self.w2v_kwargs = w2v_kwargs # default: {binary:False}
self.sequence_length = sequence_length
self.processor = processor
self.trainable = trainable
self.bert_model_folder = bert_model_folder
self.bert_layer_nums = bert_layer_nums
self.model = None
self.stopwords_file = os.path.join(pwd_path, 'data/stopwords.txt')
def load_model(self):
if not self.model:
if self.embedding_type == EmbType.BERT:
from text2vec.embeddings.bert_embedding import BERTEmbedding
self.model = BERTEmbedding(model_folder=self.bert_model_folder,
layer_nums=self.bert_layer_nums,
trainable=self.trainable,
sequence_length=self.sequence_length,
processor=self.processor)
elif self.embedding_type == EmbType.W2V:
from text2vec.embeddings.word_embedding import WordEmbedding
self.model = WordEmbedding(w2v_path=self.w2v_path,
w2v_kwargs=self.w2v_kwargs,
sequence_length=self.sequence_length,
processor=self.processor,
trainable=self.trainable,
stopwords_file=self.stopwords_file)
else:
raise ValueError('set error embedding type.')
def tokenize(self, text):
if not text.strip():
return []
self.load_model()
return self.model.tokenizer.tokenize(text.lower().strip())
def encode(self, tokens):
ret = 0.0
if not tokens:
return ret
self.load_model()
if isinstance(tokens, str):
tokens = self.tokenize(tokens)
return self.model.embed_one(tokens)
def set_stopwords_file(self, stopwords_file_path):
self.stopwords_file = stopwords_file_path
self.load_model()
``` |
{
"source": "jpeirce21/api",
"score": 2
} |
#### File: api/handlers/event.py
```python
from _main_.utils.route_handler import RouteHandler
from _main_.utils.common import get_request_contents, parse_list, parse_bool, check_length, parse_date, parse_int, parse_location
from api.services.event import EventService
from _main_.utils.massenergize_response import MassenergizeResponse
from types import FunctionType as function
from _main_.utils.context import Context
from _main_.utils.validator import Validator
from api.decorators import admins_only, super_admins_only, login_required
class EventHandler(RouteHandler):
def __init__(self):
super().__init__()
self.service = EventService()
self.registerRoutes()
def registerRoutes(self) -> None:
self.add("/events.info", self.info)
self.add("/events.create", self.create)
self.add("/events.add", self.create)
self.add("/events.copy", self.copy)
self.add("/events.list", self.list)
self.add("/events.update", self.update)
self.add("/events.delete", self.delete)
self.add("/events.remove", self.delete)
self.add("/events.rank", self.rank)
self.add("/events.rsvp", self.rsvp)
self.add("/events.rsvp.update", self.rsvp_update)
self.add("/events.rsvp.remove", self.rsvp_remove)
self.add("/events.todo", self.save_for_later)
#admin routes
self.add("/events.listForCommunityAdmin", self.community_admin_list)
self.add("/events.listForSuperAdmin", self.super_admin_list)
def info(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect("event_id", is_required=True)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.get_event_info(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@admins_only
def copy(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect("event_id", is_required=True)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.copy_event(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@login_required
def rsvp(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect("event_id", is_required=True)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.rsvp(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@login_required
def rsvp_update(self, request) -> function:
context: Context = request.context
args: dict = context.args
self.validator.expect("event_id", is_required=True)
self.validator.expect("status", is_required=False)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.rsvp_update(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@login_required
def rsvp_remove(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect("rsvp_id", is_required=True)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.rsvp_remove(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@login_required
def save_for_later(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect("event_id", is_required=True)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.get_event_info(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
# TODO implement validator
@admins_only
def create(self, request):
context: Context = request.context
args: dict = context.args
ok, err = check_length(args, 'name', min_length=5, max_length=100)
if not ok:
return MassenergizeResponse(error=str(err), status=err.status)
args['tags'] = parse_list(args.get('tags', []))
args['is_global'] = parse_bool(args.pop('is_global', None))
args['archive'] = parse_bool(args.pop('archive', None))
args['is_published'] = parse_bool(args.pop('is_published', None))
args['have_address'] = parse_bool(args.pop('have_address', False))
args = parse_location(args)
event_info, err = self.service.create_event(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
def list(self, request):
context: Context = request.context
args: dict = context.args
community_id = args.pop('community_id', None)
subdomain = args.pop('subdomain', None)
user_id = args.pop('user_id', None)
self.validator.expect("community_id", is_required=False)
self.validator.expect("subdomain", is_required=False)
self.validator.expect("user_id", is_required=False)
args, err = self.validator.verify(args, strict=True)
if err:
return err
event_info, err = self.service.list_events(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
# TODO implement validator
@admins_only
def update(self, request):
context: Context = request.context
args: dict = context.args
event_id = args.pop('event_id', None)
ok, err = check_length(args, 'name', min_length=5, max_length=100)
if not ok:
return MassenergizeResponse(error=str(err), status=err.status)
args['tags'] = parse_list(args.get('tags', []))
args['is_global'] = parse_bool(args.pop('is_global', None))
args['archive'] = parse_bool(args.pop('archive', None))
args['is_published'] = parse_bool(args.pop('is_published', None))
args['have_address'] = parse_bool(args.pop('have_address', False))
args = parse_location(args)
event_info, err = self.service.update_event(context, event_id, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@admins_only
def rank(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect('id', int, is_required=True)
self.validator.expect('rank', int, is_required=True)
self.validator.rename('event_id', 'id')
args, err = self.validator.verify(args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
event_info, err = self.service.rank_event(args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@admins_only
def delete(self, request):
context: Context = request.context
args: dict = context.args
event_id = args.get("event_id", None)
event_info, err = self.service.delete_event(context, event_id)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=event_info)
@admins_only
def community_admin_list(self, request):
context: Context = request.context
args: dict = context.args
self.validator.expect("community_id", is_required=True)
args, err = self.validator.verify(args)
if err:
return err
events, err = self.service.list_events_for_community_admin(context, args)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=events)
@super_admins_only
def super_admin_list(self, request):
context: Context = request.context
events, err = self.service.list_events_for_super_admin(context)
if err:
return MassenergizeResponse(error=str(err), status=err.status)
return MassenergizeResponse(data=events)
```
#### File: api/handlers/page_settings__events.py
```python
from database.models import EventsPageSettings
from api.handlers.page_settings import PageSettingsHandler
class EventsPageSettingsHandler(PageSettingsHandler):
def __init__(self):
super().__init__('events', EventsPageSettings)
```
#### File: api/store/event.py
```python
from database.models import Event, UserProfile, EventAttendee, Media, Community
from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError, NotAuthorizedError
from _main_.utils.massenergize_response import MassenergizeResponse
from django.db.models import Q
from _main_.utils.context import Context
from sentry_sdk import capture_message
from .utils import get_user_or_die
class EventStore:
def __init__(self):
self.name = "Event Store/DB"
def get_event_info(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
event_id = args.pop("event_id")
events_selected = Event.objects.select_related('image', 'community').prefetch_related('tags', 'invited_communities').filter(id=event_id)
event = events_selected.first()
if not event:
return None, InvalidResourceError()
return event, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def copy_event(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
event_id = args.pop("event_id")
events_selected = Event.objects.select_related('image', 'community').prefetch_related('tags', 'invited_communities').filter(id=event_id)
event_to_copy: Event = events_selected.first()
if not event_to_copy:
return None, InvalidResourceError()
old_tags = event_to_copy.tags.all()
event_to_copy.pk = None
new_event = event_to_copy
new_event.name = event_to_copy.name + "-Copy"
new_event.is_published=False
new_event.start_date_and_time = event_to_copy.start_date_and_time
new_event.end_date_and_time = event_to_copy.end_date_and_time
new_event.description = event_to_copy.description
new_event.featured_summary = event_to_copy.featured_summary
new_event.location = event_to_copy.location
new_event.save()
#copy tags over
for t in old_tags:
new_event.tags.add(t)
return new_event, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_events(self, context: Context, args) -> (list, MassEnergizeAPIError):
community_id = args.pop("community_id", None)
subdomain = args.pop("subdomain", None)
user_id = args.pop("user_id", None)
if community_id:
#TODO: also account for communities who are added as invited_communities
query =Q(community__id=community_id)
events = Event.objects.select_related('image', 'community').prefetch_related('tags', 'invited_communities').filter(query)
elif subdomain:
query = Q(community__subdomain=subdomain)
events = Event.objects.select_related('image', 'community').prefetch_related('tags', 'invited_communities').filter(query)
elif user_id:
events = EventAttendee.objects.filter(attendee=user_id)
else:
events = []
if not context.is_sandbox and events:
events = events.filter(is_published=True)
return events, None
def create_event(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
image = args.pop('image', None)
tags = args.pop('tags', [])
community = args.pop("community_id", None)
have_address = args.pop('have_address', False)
if not have_address:
args['location'] = None
if community:
community = Community.objects.get(pk=community)
if not community:
return None, CustomMassenergizeError("Please provide a valid community_id")
new_event: Event = Event.objects.create(**args)
if community:
new_event.community = community
if image:
media = Media.objects.create(file=image, name=f"ImageFor{args.get('name', '')}Event")
new_event.image = media
new_event.save()
if tags:
new_event.tags.set(tags)
return new_event, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def update_event(self, context: Context, event_id, args) -> (dict, MassEnergizeAPIError):
try:
image = args.pop('image', None)
tags = args.pop('tags', [])
events = Event.objects.filter(id=event_id)
have_address = args.pop('have_address', False)
if not have_address:
args['location'] = None
community = args.pop("community_id", None)
if community:
community = Community.objects.filter(pk=community).first()
events.update(**args)
event: Event = events.first()
if not event:
return None, CustomMassenergizeError(f"No event with id: {event_id}")
events.update(**args)
if image:
media = Media.objects.create(file=image, name=f"ImageFor{args.get('name', '')}Event")
event.image = media
if community:
event.community = community
else:
event.community = None
event.save()
if tags:
event.tags.set(tags)
return event, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def rank_event(self, args) -> (dict, MassEnergizeAPIError):
try:
id = args.get('id', None)
rank = args.get('rank', None)
if id and rank:
events = Event.objects.filter(id=id)
events.update(rank=rank)
return events.first(), None
else:
raise Exception("Rank and ID not provided to events.rank")
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def delete_event(self, context: Context, event_id) -> (dict, MassEnergizeAPIError):
try:
events = Event.objects.filter(id=event_id)
if not events:
return None, InvalidResourceError()
if len(events) > 1:
return None, CustomMassenergizeError("Deleting multiple events not supported")
events.delete()
return events.first(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_events_for_community_admin(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
community_id = args.pop("community_id", None)
if context.user_is_super_admin:
return self.list_events_for_super_admin(context)
elif not context.user_is_community_admin:
return None, NotAuthorizedError()
# community_id coming from admin portal is 'undefined'
if not community_id or community_id=='undefined':
user = UserProfile.objects.get(pk=context.user_id)
admin_groups = user.communityadmingroup_set.all()
comm_ids = [ag.community.id for ag in admin_groups]
events = Event.objects.filter(Q(community__id__in = comm_ids) | Q(is_global=True), is_deleted=False).select_related('image', 'community').prefetch_related('tags')
return events, None
events = Event.objects.filter(Q(community__id = community_id) | Q(is_global=True), is_deleted=False).select_related('image', 'community').prefetch_related('tags')
return events, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_events_for_super_admin(self, context: Context):
try:
if not context.user_is_super_admin:
return None, NotAuthorizedError()
events = Event.objects.filter(is_deleted=False).select_related('image', 'community').prefetch_related('tags')
return events, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def rsvp(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
event_id = args.pop("event_id", None)
args: dict = context.args
user = get_user_or_die(context, args)
event = Event.objects.filter(pk=event_id).first()
if not event:
return None, InvalidResourceError()
event_attendee = EventAttendee.objects.create(
event=event, attendee=user, status="RSVP")
return event_attendee, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def rsvp_update(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
event_id = args.pop("event_id", None)
status = args.pop("status", "SAVE")
args: dict = context.args
user = get_user_or_die(context, args)
event = Event.objects.filter(pk=event_id).first()
if not event:
return None, InvalidResourceError()
event_attendee = EventAttendee.objects.filter(
event=event, attendee=user).update(status=status)
return event_attendee, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def rsvp_remove(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
rsvp_id = args.pop("rsvp_id", None)
result = EventAttendee.objects.filter(pk=rsvp_id).delete()
return result, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
```
#### File: api/store/userprofile.py
```python
from database.models import UserProfile, CommunityMember, EventAttendee, RealEstateUnit, Location, UserActionRel, Vendor, Action, Data, Community
from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError, NotAuthorizedError
from _main_.utils.massenergize_response import MassenergizeResponse
from _main_.utils.context import Context
from django.db.models import F
from sentry_sdk import capture_message
from .utils import get_community, get_user, get_user_or_die, get_community_or_die, get_admin_communities, remove_dups, find_reu_community, split_location_string, check_location
import json
def _get_or_create_reu_location(args, user=None):
unit_type=args.pop('unit_type', None)
location=args.pop('location', None)
# this address location now will contain the parsed address
address = args.pop('address', None)
if address:
# address passed as a JSON string
address = json.loads(address)
street = address.get('street', '')
unit_number = address.get('unit_number', '')
zipcode = address.get('zipcode', '')
city = address.get('city', '')
county = address.get('county', '')
state = address.get('state', '')
country = address.get('country','US')
else:
# Legacy: get address from location string
loc_parts = split_location_string(location)
street = unit_number = city = county = state = zipcode = None
if len(loc_parts)>= 4:
street = loc_parts[0]
unit_number = ''
city = loc_parts[1]
county = ''
state = loc_parts[2]
zipcode = loc_parts[3]
country = 'US'
# check location is valid
location_type, valid = check_location(street, unit_number, city, state, zipcode, county, country)
if not valid:
print(location_type)
raise Exception(location_type)
reuloc, created = Location.objects.get_or_create(
location_type = location_type,
street = street,
unit_number = unit_number,
zipcode = zipcode,
city = city,
county = county,
state = state,
country = country
)
if created:
print("Location with zipcode "+zipcode+" created for user "+user.preferred_name)
else:
print("Location with zipcode "+zipcode+" found for user "+user.preferred_name)
return reuloc
class UserStore:
def __init__(self):
self.name = "UserProfile Store/DB"
def _has_access(self, context: Context, user_id=None, email=None):
"""
Checks to make sure if the user has access to the user profile they want to
access
"""
if (not user_id and not email):
return False
if not context.user_is_logged_in:
return False
if context.user_is_admin():
# TODO: update this to only super admins. Do specific checks for
# community admins to make sure user is in their community first
return True
if user_id and (context.user_id == user_id):
return True
if email and (context.user_email == email):
return True
return False
def get_user_info(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
#email = args.get('email', None)
#user_id = args.get('user_id', None)
# if not self._has_access(context, user_id, email):
# return None, CustomMassenergizeError("permission_denied")
user = get_user_or_die(context, args)
return user, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def remove_household(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
household_id = args.get('household_id', None) or args.get('household_id', None)
if not household_id:
return None, CustomMassenergizeError("Please provide household_id")
return RealEstateUnit.objects.get(pk=household_id).delete(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def add_household(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
name = args.pop('name', None)
unit_type=args.pop('unit_type', None)
reuloc = _get_or_create_reu_location(args, user)
reu = RealEstateUnit.objects.create(name=name, unit_type=unit_type)
reu.address = reuloc
community = find_reu_community(reu)
if community: reu.community = community
reu.save()
user.real_estate_units.add(reu)
user.save()
return reu, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def edit_household(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
name = args.pop('name', None)
household_id = args.get('household_id', None)
if not household_id:
return None, CustomMassenergizeError("Please provide household_id")
reuloc = _get_or_create_reu_location(args, user)
reu = RealEstateUnit.objects.get(pk=household_id)
reu.name = name
reu.unit_type = unit_type
reu.address = reuloc
verbose = False
community = find_reu_community(reu, verbose)
if community:
if verbose: print("Updating the REU with zipcode " + zipcode + " to the community " + community.name)
reu.community = community
reu.save()
return reu, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def list_households(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
return user.real_estate_units.all(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def list_users(self, community_id) -> (list, MassEnergizeAPIError):
community,err = get_community(community_id)
if not community:
print(err)
return [], None
return community.userprofile_set.all(), None
def list_events_for_user(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
if not user:
return []
return EventAttendee.objects.filter(attendee=user), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def create_user(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
email = args.get('email', None)
community = get_community_or_die(context, args)
# allow home address to be passed in
location = args.pop('location', '')
if not email:
return None, CustomMassenergizeError("email required for sign up")
user = UserProfile.objects.filter(email=email).first()
if not user:
new_user: UserProfile = UserProfile.objects.create(
full_name = args.get('full_name'),
preferred_name = args.get('preferred_name', None),
email = args.get('email'),
is_vendor = args.get('is_vendor', False),
accepts_terms_and_conditions = args.pop('accepts_terms_and_conditions', False)
)
else:
new_user: UserProfile = user
community_member_exists = CommunityMember.objects.filter(user=new_user, community=community).exists()
if not community_member_exists:
# add them as a member to community
CommunityMember.objects.create(user=new_user, community=community)
#create their first household
household = RealEstateUnit.objects.create(name="Home", unit_type="residential", community=community, location=location)
new_user.real_estate_units.add(household)
res = {
"user": new_user,
"community": community
}
return res, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def update_user(self, context: Context, user_id, args) -> (dict, MassEnergizeAPIError):
try:
email = args.get('email', None)
# user_id = args.get('user_id', None)
if not self._has_access(context, user_id, email):
return None, CustomMassenergizeError("permission_denied")
if context.user_is_logged_in and ((context.user_id == user_id) or (context.user_is_admin())):
user = UserProfile.objects.filter(id=user_id)
if not user:
return None, InvalidResourceError()
user.update(**args)
return user.first(), None
else:
return None, CustomMassenergizeError('permission_denied')
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def delete_user(self, context: Context, user_id) -> (dict, MassEnergizeAPIError):
try:
if not user_id:
return None, InvalidResourceError()
#check to make sure the one deleting is an admin
if not context.user_is_admin():
# if they are not an admin make sure they can only delete themselves
if not context.user_id != user_id:
return None, NotAuthorizedError()
users = UserProfile.objects.filter(id=user_id)
users.update(is_deleted=True)
return users.first(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_users_for_community_admin(self, context: Context, community_id) -> (list, MassEnergizeAPIError):
try:
if context.user_is_super_admin:
return self.list_users_for_super_admin(context)
elif not context.user_is_community_admin:
return None, NotAuthorizedError()
community, err = get_community(community_id)
if not community and context.user_id:
communities, err = get_admin_communities(context)
comm_ids = [c.id for c in communities]
users = [cm.user for cm in CommunityMember.objects.filter(community_id__in=comm_ids, user__is_deleted=False)]
#now remove all duplicates
users = remove_dups(users)
return users, None
elif not community:
print(err)
return [], None
users = [cm.user for cm in CommunityMember.objects.filter(community=community, is_deleted=False, user__is_deleted=False)]
users = remove_dups(users)
return users, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_users_for_super_admin(self, context: Context):
try:
if not context.user_is_super_admin:
return None, NotAuthorizedError()
users = UserProfile.objects.filter(is_deleted=False)
return users, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def add_action_todo(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user = get_user_or_die(context, args)
action_id = args.get("action_id", None)
household_id = args.get("household_id", None)
vendor_id = args.get("vendor_id", None)
if not user:
return None, CustomMassenergizeError("sign_in_required / provide user_id or user_email")
action: Action = Action.objects.get(id=action_id)
if not action:
return None, CustomMassenergizeError("Please provide a valid action_id")
if household_id:
household: RealEstateUnit = RealEstateUnit.objects.get(id=household_id)
else:
household = user.real_estate_units.all().first()
if not household:
household = RealEstateUnit(name=f"{user.preferred_name}'s Home'")
household.save()
user.real_estate_units.add(household)
if vendor_id:
vendor = Vendor.objects.get(id=vendor_id) #not required
#if this already exists as a todo just move it over
completed = UserActionRel.objects.filter(user=user, real_estate_unit=household, action=action)
if completed:
#TODO: update action stats
completed.update(status="TODO")
return completed.first(), None
# create a new one since we didn't find it existed before
new_user_action_rel = UserActionRel(user=user, action=action, real_estate_unit=household, status="TODO")
if vendor_id:
new_user_action_rel.vendor = vendor
new_user_action_rel.save()
return new_user_action_rel, None
except Exception as e:
capture_message(str(e), level="error")
import traceback
traceback.print_exc()
return None, CustomMassenergizeError(str(e))
def add_action_completed(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
user_id = context.user_id or args.get('user_id')
user_email = context.user_email or args.get('user_email')
action_id = args.get("action_id", None)
household_id = args.get("household_id", None)
vendor_id = args.get("vendor_id", None)
date_completed = args.get("date_completed", None)
# future use
carbon_impact = args.get("carbon_impact", 0)
user = None
if user_id:
user = UserProfile.objects.get(id=user_id)
elif user_email:
user = UserProfile.objects.get(email=user_email)
if not user:
return None, CustomMassenergizeError("sign_in_required / Provide user_id")
action = Action.objects.get(id=action_id)
if not action:
return None, CustomMassenergizeError("Please provide an action_id")
household = RealEstateUnit.objects.get(id=household_id)
if not household:
return None, CustomMassenergizeError("Please provide a household_id")
# update all data points
for t in action.tags.all():
data = Data.objects.filter(community=action.community, tag=t)
if data:
data.update(value=F("value") + 1)
else:
#data for this community, action does not exist so create one
d = Data(tag=t, community=action.community, value=1, name=f"{t.name}")
d.save()
#if this already exists as a todo just move it over
completed = UserActionRel.objects.filter(user=user, real_estate_unit=household, action=action)
if completed:
completed.update(
status="DONE",
date_completed=date_completed,
carbon_impact=carbon_impact
)
completed = completed.first()
if vendor_id:
vendor = Vendor.objects.get(id=vendor_id) #not required
completed.vendor = vendor
return completed, None
# create a new one since we didn't find it existed before
new_user_action_rel = UserActionRel(
user=user,
action=action,
real_estate_unit=household,
status="DONE",
date_completed=date_completed,
carbon_impact=carbon_impact
)
if vendor_id:
vendor = Vendor.objects.get(id=vendor_id) #not required
new_user_action_rel.vendor = vendor
new_user_action_rel.save()
return new_user_action_rel, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def list_todo_actions(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
if not context.user_is_logged_in:
return [], CustomMassenergizeError("sign_in_required")
user = get_user_or_die(context, args)
household_id = args.get("household_id", None)
if household_id:
todo = UserActionRel.objects.filter(status="TODO", user=user, real_state_unit__id=household_id)
else:
todo = UserActionRel.objects.filter(status="TODO", user=user)
return todo, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def list_completed_actions(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
if not context.user_is_logged_in:
return [], CustomMassenergizeError("sign_in_required")
user = get_user_or_die(context, args)
household_id = args.get("household_id", None)
if household_id:
todo = UserActionRel.objects.filter(status="DONE", user=user, real_state_unit__id=household_id)
else:
todo = UserActionRel.objects.filter(status="DONE", user=user)
return todo, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
def remove_user_action(self, context: Context, user_action_id) -> (dict, MassEnergizeAPIError):
try:
if not context.user_is_logged_in:
return [], CustomMassenergizeError("sign_in_required")
user_action = UserActionRel.objects.get(pk=user_action_id)
result = user_action.delete()
return result, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
```
#### File: api/store/vendor.py
```python
from database.models import Vendor, UserProfile, Media, Community
from _main_.utils.massenergize_errors import MassEnergizeAPIError, NotAuthorizedError, InvalidResourceError, ServerError, CustomMassenergizeError
from _main_.utils.massenergize_response import MassenergizeResponse
from django.utils.text import slugify
from _main_.utils.context import Context
from django.db.models import Q
from .utils import get_community_or_die, get_admin_communities
from _main_.utils.context import Context
from sentry_sdk import capture_message
class VendorStore:
def __init__(self):
self.name = "Vendor Store/DB"
def get_vendor_info(self, context, args) -> (dict, MassEnergizeAPIError):
try:
vendor_id = args.pop('vendor_id', None) or args.pop('id', None)
if not vendor_id:
return None, InvalidResourceError()
vendor = Vendor.objects.filter(pk=vendor_id).first()
if not vendor:
return None, InvalidResourceError()
return vendor, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_vendors(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
subdomain = args.pop('subdomain', None)
community_id = args.pop('community_id', None)
if community_id and community_id!='undefined':
community = Community.objects.get(pk=community_id)
elif subdomain:
community = Community.objects.get(subdomain=subdomain)
else:
community = None
if not community:
return [], None
vendors = community.vendor_set.filter(is_deleted=False)
if not context.is_sandbox:
vendors = vendors.filter(is_published=True)
return vendors, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def create_vendor(self, ctx: Context, args) -> (Vendor, MassEnergizeAPIError):
try:
tags = args.pop('tags', [])
communities = args.pop('communities', [])
image = args.pop('image', None)
onboarding_contact_email = args.pop('onboarding_contact_email', None)
key_contact_full_name = args.pop('key_contact_full_name', None)
key_contact_email = args.pop('key_contact_email', None)
website = args.pop('website', None)
args["key_contact"] = {
"name": key_contact_full_name,
"email": key_contact_email
}
have_address = args.pop('have_address', False)
if not have_address:
args['location'] = None
new_vendor = Vendor.objects.create(**args)
if image:
logo = Media(name=f"Logo-{slugify(new_vendor.name)}", file=image)
logo.save()
new_vendor.logo = logo
if onboarding_contact_email:
onboarding_contact = UserProfile.objects.filter(email=onboarding_contact_email).first()
if onboarding_contact:
new_vendor.onboarding_contact = onboarding_contact
if website:
new_vendor.more_info = {'website': website}
new_vendor.save()
if communities:
new_vendor.communities.set(communities)
if tags:
new_vendor.tags.set(tags)
new_vendor.save()
return new_vendor, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def update_vendor(self, context: Context, args) -> (dict, MassEnergizeAPIError):
try:
vendor_id = args.pop('vendor_id', None)
vendor = Vendor.objects.get(id=vendor_id)
if not vendor:
return None, InvalidResourceError()
have_address = args.pop('have_address', False)
if not have_address:
args['location'] = None
communities = args.pop('communities', [])
if communities:
vendor.communities.set(communities)
onboarding_contact_email = args.pop('onboarding_contact_email', None)
if onboarding_contact_email:
vendor.onboarding_contact_email = onboarding_contact_email
website = args.pop('website', None)
key_contact = args.pop('key_contact', {})
if key_contact:
if vendor.key_contact:
vendor.key_contact.update(key_contact)
else:
vendor.key_contact = args.pop('key_contact', key_contact)
image = args.pop('image', None)
if image:
logo = Media(name=f"Logo-{slugify(vendor.name)}", file=image)
logo.save()
vendor.logo = logo
if onboarding_contact_email:
onboarding_contact = UserProfile.objects.filter(email=onboarding_contact_email).first()
if onboarding_contact:
vendor.onboarding_contact = onboarding_contact
tags = args.pop('tags', [])
if tags:
vendor.tags.set(tags)
if website:
vendor.more_info = {'website': website}
vendor.save()
updated = Vendor.objects.filter(id=vendor_id).update(**args)
return vendor, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def rank_vendor(self, args) -> (dict, MassEnergizeAPIError):
try:
id = args.get("id", None)
rank = args.get("rank", None)
if id and rank:
vendors = Event.objects.filter(id=id)
vendors.update(rank=rank)
return vendors.first(), None
else:
raise Exception("Rank and ID not provided to vendors.rank")
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def delete_vendor(self, vendor_id) -> (dict, MassEnergizeAPIError):
try:
vendors = Vendor.objects.filter(id=vendor_id)
vendors.update(is_deleted=True)
#TODO: also remove it from all places that it was ever set in many to many or foreign key
return vendors.first(), None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def copy_vendor(self, vendor_id) -> (Vendor, MassEnergizeAPIError):
try:
vendor: Vendor = Vendor.objects.get(id=vendor_id)
if not vendor:
return CustomMassenergizeError(f"No vendor with id {vendor_id}")
vendor.pk = None
vendor.is_published = False
vendor.is_verified = False
vendor.name = vendor.name + "-Copy"
vendor.save()
return vendor, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_vendors_for_community_admin(self, context: Context, community_id) -> (list, MassEnergizeAPIError):
try:
if context.user_is_super_admin:
return self.list_vendors_for_super_admin(context)
elif not context.user_is_community_admin:
return None, NotAuthorizedError()
# community_id coming from admin portal as "null"
if not community_id or community_id=='undefined' or community_id=='null':
# different code in action.py/event.py
#user = UserProfile.objects.get(pk=context.user_id)
#admin_groups = user.communityadmingroup_set.all()
#comm_ids = [ag.community.id for ag in admin_groups]
#vendors = Vendor.objects.filter(community__id__in = comm_ids, is_deleted=False).select_related('logo', 'community')
communities, err = get_admin_communities(context)
vendors = None
for c in communities:
if vendors is not None:
vendors |= c.vendor_set.filter(is_deleted=False).select_related('logo').prefetch_related('communities', 'tags')
else:
vendors = c.vendor_set.filter(is_deleted=False).select_related('logo').prefetch_related('communities', 'tags')
return vendors.distinct(), None
community = get_community_or_die(context, {'community_id': community_id})
vendors = community.vendor_set.filter(is_deleted=False).select_related('logo').prefetch_related('communities', 'tags')
return vendors, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(e)
def list_vendors_for_super_admin(self, context: Context):
try:
vendors = Vendor.objects.filter(is_deleted=False).select_related('logo').prefetch_related('communities', 'tags')
return vendors, None
except Exception as e:
capture_message(str(e), level="error")
return None, CustomMassenergizeError(str(e))
```
#### File: api/tests/test_graphs.py
```python
from django.test import TestCase, Client
from database.models import Graph, Action, Community, Data, Tag, TagCollection, UserActionRel, Team, TeamMember
class GraphsTester(TestCase):
def setUp(self):
self.client = Client()
def test_info(self):
pass
def test_create(self):
pass
def test_add(self):
pass
def test_list(self):
pass
def test_actions_completed(self):
pass
def test_actions_completed_team(self):
pass
def test_community_impact(self):
pass
def test_update(self): # same as data update
pass
def test_delete(self): # same as remove
pass
def test_list_CAdmin(self):
pass
def test_list_SAdmin(self):
pass
```
#### File: carbon_calculator/tests/tests.py
```python
from django.test import TestCase, Client
from carbon_calculator.models import Event, Station, Action, Group, Question
from carbon_calculator.views import importcsv
from database.models import UserProfile
from django.db.models import Count
import json
import jsons
import requests
import os
import pprint, sys
from django.utils import timezone #For keeping track of when the consistency was last checked
from api.tests.common import signinAs, setupCC
OUTPUTS_FILE = "carbon_calculator/tests/expected_outputs.txt"
INPUTS_FILE = "carbon_calculator/tests/allPossibleInputs.txt"
VALUE_DIFF = "Value difference"
IMPORT_SUCCESS = {"status": True}
# Create your tests here.
class CarbonCalculatorTest(TestCase):
@classmethod
def setUpClass(self):
self.client = Client()
self.USER = UserProfile.objects.create(**{
'full_name': "<NAME>",
'email': '<EMAIL>',
})
self.CADMIN = UserProfile.objects.create(**{
'full_name': "Community Admin",
'email': '<EMAIL>',
'is_community_admin': True
})
self.SADMIN = UserProfile.objects.create(**{
'full_name': "<NAME>",
'email': '<EMAIL>',
'is_super_admin': True
})
signinAs(self.client, self.SADMIN)
setupCC(self.client)
generate_inputs = eval(os.getenv("GENERATE_INPUTS"))
if generate_inputs > 0:
print("Generating Carbon Calculator input files")
populate_inputs_file()
self.input_data = []
else:
infile = os.getenv("TEST_INPUTS",default=INPUTS_FILE)
print("Using input file: "+infile)
self.input_data = self.read_inputs(self,infile)
self.output_data = []
self.differences = []
@classmethod
def tearDownClass(self):
pass
#print("tearDownClass")
def test_info_actions(self):
# test routes function
# test there are actions
# test that one action has the average_points
response = self.client.get('/cc/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/cc/info/')
self.assertEqual(response.status_code, 200)
#for some reason this URL doesn't want the trailing slash
response = self.client.get('/cc/info/actions')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.decode('utf8'))
data = data.get("data",data)
self.assertGreaterEqual(len(data["actions"]),37)
name= data["actions"][0]["name"]
self.assertEqual(name,"energy_fair")
points = data["actions"][0]["average_points"]
self.assertEqual(points,15)
def test_consistency(self):
"""
Test if the results of all estimation calls match those of the last run.
Get the inputs to each method from the INPUTS_FILE, as well as the
previous outputs from the OUTPUTS_FILE. Call all methods of the carbon
calculator with the inputs retrieved earlier, and compare the results
with the results of the last run. Finally, pretty print the differences
between this test run and the last one. Don't return anything.
"""
#Check for required data
if len(self.input_data) <= 0:
return
self.output_data = self.eval_all_actions(self.input_data)
#Compare
if len(self.input_data) != len(self.output_data):
msg = "Consistency test: vector length mismatch, input = %d, output = %d" % (len(self.input_data), len(self.output_data))
print(msg)
self.differences = self.compare(self.input_data, self.output_data)
self.pretty_print_diffs(
self.differences,
self.input_timestamp)
numDifferences = len(self.differences)
if numDifferences > 0:
self.write_outputs(os.getenv("TEST_OUTPUTS",default=OUTPUTS_FILE))
self.assertEqual(numDifferences,0)
def read_inputs(self,filename):
try:
f = open(filename, 'r')
header = eval(f.readline().strip())
self.input_timestamp = header["Timestamp"]
inputs = [eval(i.strip()) for i in f.readlines()]
f.close()
except Exception as e:
inputs = []
print("Exception from read_inputs: " + str(e))
return inputs
def write_outputs(self, filename):
data = {"Timestamp": self.output_timestamp}
outputLine(data,filename,True)
for line in self.output_data:
outputLine(line,filename,False)
def eval_all_actions(self, inputs):
"""Run the estimate method of all the actions of the Carbon Calculator."""
self.output_timestamp = timezone.now().isoformat(" ") #Time of last test
output_data = []
for aip in inputs: #aip = action inputs pair
try:
outdata = jsons.loads( #Response of estimate in dict form
self.client.post(
"/cc/estimate/{}".format(aip['Action']), aip["inputs"]
).content)
outdata = outdata.get("data", outdata)
output_data.append(
{ "Action" : aip['Action'],
"inputs" : aip['inputs'],
'outputs' : outdata })
except Exception as e: #Some may throw errors w/o inputs
print('eval_all_inputs exception')
print(e)
print(aip)
return output_data
def compare(self, old, new):
"""
Compare the old set of results with the new set.
Populate a list of differences (tuples) according to the following rules:
For a new action (action found in new results aggregate but not old)
("New action", ACTION_NAME)
For a removed action (action found in old results aggregate but not new)
("Removed action", ACTION_NAME)
For a differing value between the two aggregates
("Value difference", NEW_VALUE, OLD_VALUE)
"""
differences = []
for i in range(len(old)):
action = old[i]["Action"]
inputs = old[i]["inputs"]
outputs_old = old[i]["outputs"]
outputs_new = new[i]["outputs"]
for key in ["status", "carbon_points", "cost", "savings"]:
if not key in outputs_old:
print("outputs_old error:")
print(old[i])
elif not key in outputs_new:
print("outputs_new error, key = "+key)
print(new[i])
elif not outputs_new[key] == outputs_old[key]:
differences.append((action, inputs,
key,
outputs_old[key],
outputs_new[key]))
return differences
def pretty_print_diffs(self, diffs, oldtime):
if len(diffs) > 0:
hdr = "\ncarbon_calculator results inconsistent with input data from "+str(oldtime) + "\n# differences: %d\n======================================================" % len(diffs)
print(hdr)
for diff in diffs:
print(str(diff)) #Not pretty yet
print("\n")
else:
print("carbon_calculator results consistent with input data from "+str(oldtime))
def test_info_events(self):
''' Tests /cc/info/events url and returns a json of events. '''
event_list = self.client.get("/cc/info/events", {}) # status code = 200
event_list_json = jsons.loads(event_list.content) # loads into json
#print(event_list_json) # uncomment this to see json
# 2 I belive this one works... little unsure
def test_info_all_events(self):
'''tests /cc/info/event/~eventName~ and should return all data about that event '''
#print("2")
obj = Event.objects.first() # getting the first object in model
field_object = Event._meta.get_field('name') # this and next is getting the name
#print("field object")
#print(field_object)
field_value = field_object.value_from_object(obj) # this returns actual name (as str)
#print("field value")
#print(field_value)
# UPDATE WORKS !!! Well pretty sure, gives me all of the info sooooo
event_url = "/cc/info/event/" + field_value
#print(event_url)
event_info = self.client.get(event_url, {})
event_json = jsons.loads(event_info.content)
#print(event_json)
# 4 !!!!!!! WORKS !!!!!!!
def test_info_impact_event(self):
#print("test info impact event")
obj = Event.objects.first() # getting the first object in model
field_object = Event._meta.get_field('name') # this and next is getting the name
field_value = field_object.value_from_object(obj) # this returns actual name (as str)
#print(field_value)
event_url = "/cc/info/impact/" + field_value
#print(event_url)
event_info = self.client.get(event_url, {})
event_json = jsons.loads(event_info.content)
#print(event_json)
# 6 !!!!!!! WORKS !!!!!!!
def test_info_on_one_group(self):
obj = Group.objects.first()
field_object = Group._meta.get_field('name')
field_value = field_object.value_from_object(obj)
#print(field_value)
event_url = "/cc/info/group/" + field_value
#print(event_url)
event_info = self.client.get(event_url, {})
event_json = jsons.loads(event_info.content)
#print(event_json)
# 8 !!!!!!! WORKS !!!!!!!
def test_info_stations_one_station(self):
obj = Station.objects.first()
field_object = Station._meta.get_field('name')
field_value = field_object.value_from_object(obj)
#print(field_value)
event_url = "/cc/info/station/" + field_value
#print(event_url)
event_info = self.client.get(event_url, {})
event_json = jsons.loads(event_info.content)
#print(event_json)
#extra
def test_get_action_list(self):
impact_info = self.client.get("/cc/info/actions", {})
#print("actions:")
#print(jsons.loads(impact_info.content))
# 12 !!!!!! WORKS !!!!!!!
def test_estimate_actions(self):
obj = Action.objects.first() # getting the first object in model
field_object = Action._meta.get_field('name') # this and next is getting the name
field_value = field_object.value_from_object(obj) # this returns actual name (as str)
event_url = '/cc/estimate/' + field_value
response = self.client.post(event_url, {})
self.assertEqual(response.status_code, 200)
# event_json = jsons.loads(event_info.content)
# print(event_json)
#test_action = self.client.post('/cc/estimate/')
# 13 !!!!!! WORKS !!!!!!
def test_undo_actions(self):
obj = Action.objects.first()
field_object = Action._meta.get_field('name')
field_value = field_object.value_from_object(obj)
#print(field_value)
event_url = '/cc/undo/' + field_value
response = self.client.post(event_url, {})
#print(response)
# 3 !!!!!! Works !!!!!!
def test_impact_url(self):
impact_info = self.client.get("/cc/info/impact", {})
#print("test impact url")
#print(jsons.loads(impact_info.content))
# 5 !!!!!! WORKS !!!!!!!
def test_info_group_url(self):
#print("test info group url")
group_info = self.client.get("/cc/info/groups", {})
#print("test info group url")
#print(jsons.loads(group_info.content))
# 7 !!!! Works !!!!
def test_info_stations_url(self):
station_info = self.client.get("/cc/info/stations", {})
#print("test info stations url")
#print(jsons.loads(station_info.content))
# 9 !!! Works But there is no users, I even checked by running the server
def test_info_users_url(self):
user_url = "/cc/info/users"
user_info = self.client.get(user_url, {})
#print("test info users url")
#print(jsons.loads(user_info.content))
# 11 !!!!! WORKS !!!!
def test_create_user(self):
response = self.client.post('/cc/users', {
'id':1,
'email':'<EMAIL>'
})
data = jsons.loads(response.content)
#print(data)
# 10 DOES NOT WORK becuase there are no users
def test_getting_user(self):
response = self.client.get("/cc/info/users")
#print(jsons.loads(response.content))
# honestly no idea if this works, it gives a response that its exporting but idk if it is
def test_exporting_csv(self):
self.client.post('/cc/export',
{
"Defaults": "carbon_calculator/content/exportdefaults.csv"
})
def outputLine(data, filename, new=False):
tag = "a"
if new:
tag = "w"
f = open(filename, tag)
f.write(str(data) + "\n")
f.close()
def outputInputs(data):
f = open("carbon_calculator/tests/Inputs.txt", "a")
f.write(str(data) + "\n")
f.close()
def populate_inputs_file():
client = Client()
response = client.get("/cc/info/actions")
data = jsons.loads(response.content)["actions"]
names = [i["name"] for i in data]
filename_all = "carbon_calculator/tests/" + "allPossibleInputs.txt"
data = {"Timestamp" : timezone.now().isoformat(" "), "Contents" : "All Possible Calculator Inputs"}
outputLine(data, filename_all, True)
filename_def = "carbon_calculator/tests/" + "defaultInputs.txt"
data = {"Timestamp" : timezone.now().isoformat(" "), "Contents" : "Default Calculator Inputs"}
outputLine(data, filename_def, True)
np = 0
for name in names:
# get info on the action to find allowed parameter values
#print("URL: /cc/info/action/{}".format(name))
response = client.get("/cc/info/action/{}".format(name))
data = response.json() #jsons.loads(response.content, {})
actionName = data["action"]["name"]
questions = data["action"]["questionInfo"]
qTot = []
qInd = []
for question in questions:
qType = question["questionType"]
qInd.append(0)
if qType == "Choice":
qTot.append(len(question["responses"]))
else:
if qType == "Number":
qTot.append(1)
else:
qTot.append(0)
nq = len(questions)
qr = range(nq)
done = False
ni = 0
while not done:
actionPars = {"Action": actionName, "inputs":{}}
q = 0
for q in qr:
question = questions[q]
if qTot[q] > 1:
actionPars["inputs"][question["name"]] = question["responses"][qInd[q]]["text"]
else:
if qTot[q] == 1:
val = 0.
typ = question["numeric_values"].get("typical_value",-999)
if typ > 0:
val = typ
actionPars["inputs"][question["name"]] = val
outputs = client.get("/cc/estimate/{}".format(actionPars['Action']), actionPars["inputs"]).content.decode("utf-8")
actionPars["outputs"] = eval(outputs)
outputLine(actionPars, filename_all)
np += 1
ni += 1
# update the response indices, increment one by one to get each combination
for q in qr:
if qTot[q]>0:
qInd[q] += 1
if qInd[q] == qTot[q]:
qInd[q] = 0
else:
break
if q == nq-1:
done = True
msg = "Action '%s', %d possible inputs" % (actionName, ni)
print(msg)
msg = "Number possible calculator inputs with all choices = %d" % np
print(msg)
```
#### File: _main_/utils/utils.py
```python
import json
import django.db.models.base as Base
import inspect
from django.db.models.fields.related import ManyToManyField, ForeignKey
def load_json(path):
"""
Loads the json file in the given path.
Precondition:
path: is a string of a valid json path.
"""
with open (path) as file:
return json.load(file)
return {}
def load_text_contents(path) -> str:
data = {}
with open(path) as f:
data = f.read()
return data
def get_all_models(models):
"""
This function takes a Django models.py class and extracts all the models
defined in there and returns them as a list.
"""
return [m[1] for m in inspect.getmembers(models, inspect.isclass)
if (isinstance(m[1], Base.ModelBase))]
def get_models_and_field_types(models):
"""
This method take a models.py class and makes a dictionary of all the models
mapping them to their fields in groups.
eg. {
model1: {"m2m": {...}, "fk": {....}, "other":{....},
"required_fields":{.....}}
....
}
Hence for each model, we collect and group all the many to many fields
as well as foreignkeys as well as get which fields are required
"""
all_models = get_all_models(models)
result = {}
for m in all_models:
result[m]={
"m2m": set(),"fk":set(), "other": set(),
"required_fields": set(), "all_fields": set()
}
for f in m._meta.get_fields():
result[m]["all_fields"].add(f.name)
if isinstance(f, ManyToManyField):
result[m]["m2m"].add(f.name)
elif isinstance(f, ForeignKey):
result[m]["fk"].add(f.name)
else:
result[m]["other"].add(f.name)
if hasattr(f, 'blank') and f.blank == False:
result[m]["required_fields"].add(f.name)
return result
``` |
{
"source": "jpekala/RadioReferenceParser",
"score": 3
} |
#### File: jpekala/RadioReferenceParser/dsdplus_parser.py
```python
import tkinter as tk
from tkinter import filedialog
from tkinter.ttk import Combobox
import csv
class dsdfreqparser(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.rrsystemNamelbl = tk.Label(self, text="System Name")
self.rrsystemNamelbl.grid(sticky=tk.W, column=0, row=0)
self.rrProtocollbl = tk.Label(self, text="Trunking Protocol")
self.rrProtocollbl.grid(sticky=tk.W, column=0, row=2)
self.rrNetIDlbl = tk.Label(self, text="Network ID")
self.rrNetIDlbl.grid(sticky=tk.W, column=0, row=3)
self.rrsystemNameEnt = tk.Entry(self)
self.rrsystemNameEnt.grid(sticky=tk.W, column=1, row=0)
self.rrProtocolCombo = Combobox(self)
self.rrProtocolCombo['values']= ('D-Star', 'IDAS', 'NEXEDGE48', 'NEXEDGE96', 'dPMR', 'DMR', 'Cap+', 'Con+', 'TIII', 'P25', 'ProVoice')
self.rrProtocolCombo.current(7)
self.rrProtocolCombo.grid(sticky=tk.W, column=1, row=2)
self.rrNetIDEnt = tk.Entry(self)
self.rrNetIDEnt.grid(sticky=tk.W, column=1, row=3)
self.convertBTN = tk.Button(self, text="Convert", command=self.parse_file)
self.convertBTN.grid(sticky=tk.W, padx=5, pady=5, column=0, row=5)
self.rrParsedOutput = tk.Text(self, selectborderwidth=2)
self.rrParsedOutput.grid(sticky=tk.W, padx=5, pady=5, columnspan=2, row=7)
def RadioRefCSV(self):
fileLocation = filedialog.askopenfilename(title="Select your Radio Reference CSV", filetypes=(("CSV files","*.csv"),("All Files","*.*")))
return fileLocation
def parse_file(self):
rrsysname = self.rrsystemNameEnt.get()
rrfilename = self.RadioRefCSV()
rrprotocol = self.rrProtocolCombo.get()
rrnetid = self.rrNetIDEnt.get()
with open(rrfilename) as csvfile:
readCSV = csv.DictReader(csvfile, delimiter=',')
self.rrParsedOutput.insert(tk.INSERT, ";" + rrsysname + "\n")
for row in readCSV:
self.rrParsedOutput.insert(tk.INSERT, rrprotocol + ", " + rrnetid + ", " + row['Site Dec'] + ', 1, ' + row['Frequencies'] + ", 0.0, 0\n")
for freqs in row[None]:
self.rrParsedOutput.insert(tk.INSERT, rrprotocol + ", " + rrnetid + ", " + row['Site Dec'] + ", ***OTANUMBER***, " + str(freqs) + ", 0.0, 0\n")
app = dsdfreqparser()
app.mainloop()
``` |
{
"source": "jpelaez82/misiontic2022",
"score": 4
} |
#### File: Fundamentos de Programacion/Ciclo 1 - Python/ejemplo_parqueadero_buses.py
```python
def parqueadero_buses(cantidad_buses, numero_bus):
# Evaluar cantidad de buses -> multiplo de 3
p = cantidad_buses % 3 == 0
q = numero_bus <= cantidad_buses and numero_bus > 0
# Validar tipo de datos que ingresa
r = isinstance(numero_bus, int)
s = isinstance(cantidad_buses, int)
# Validacion
if p and q and r and s:
# Primer lote
if numero_bus <= cantidad_buses / 3:
lote = 1
else:
# Segundo Lote
if numero_bus <= cantidad_buses * 2/3:
lote = 2
else:
# Tercer Lote
lote = 3
else:
lote = "No se puede parquear aqui"
return lote
# Comprobacion
print(parqueadero_buses(100, 1))
print(parqueadero_buses(99, -3))
print(parqueadero_buses(102, 40))
print(parqueadero_buses(30, 40))
print(parqueadero_buses(100, 33))
```
#### File: Fundamentos de Programacion/Ciclo 1 - Python/reto_semana3.py
```python
distancias = {('H', 'H'): 0, ('H', 'A'): 21, ('H', 'B'): 0, ('H', 'C'): 58, ('H', 'D'): 195, ('H', 'E'): 245, ('H', 'F'): 241,
('A', 'H'): 127, ('A', 'A'): 0, ('A', 'B'): 231, ('A', 'C'): 113, ('A', 'D'): 254, ('A', 'E'): 179, ('A', 'F'): 41,
('B', 'H'): 153, ('B', 'A'): 252, ('B', 'B'): 0, ('B', 'C'): 56, ('B', 'D'): 126, ('B', 'E'): 160, ('B', 'F'): 269,
('C', 'H'): 196, ('C', 'A'): 128, ('C', 'B'): 80, ('C', 'C'): 0, ('C', 'D'): 136, ('C', 'E'): 37, ('C', 'F'): 180,
('D', 'H'): 30, ('D', 'A'): 40, ('D', 'B'): 256, ('D', 'C'): 121, ('D', 'D'): 0, ('D', 'E'): 194, ('D', 'F'): 109,
('E', 'H'): 33, ('E', 'A'): 144, ('E', 'B'): 179, ('E', 'C'): 114, ('E', 'D'): 237, ('E', 'E'): 0, ('E', 'F'): 119,
('F', 'H'): 267, ('F', 'A'): 61, ('F', 'B'): 79, ('F', 'C'): 39, ('F', 'D'): 135, ('F', 'E'): 55, ('F', 'F'): 0}
ruta_inicial = ['H', 'A', 'B', 'C', 'D', 'E', 'F', 'H']
def diccionario_valido(distancias):
flag = False
for key, value in distancias.items():
if value >= 0:
if key[0] == key[1]:
tupla_pos1 = key[0]
tupla_pos2 = key[1]
tupla_iguales = (tupla_pos1, tupla_pos2)
#print(distancias[tupla_iguales])
if distancias[tupla_iguales] == 0:
flag = True
else:
return False
# i = 0
# while distancias[tupla_iguales][i] == 0:
# bandera = True
else:
return False
#print(tupla_iguales)
return flag
def calculo_distancia(ruta_inicial, distancias):
# Iniciar contador
i = 0
suma = 0
while i <= len(ruta_inicial) - 1:
# Armar las tuplas
pos1 = ruta_inicial[i]
pos2 = ruta_inicial[i+1]
tupla = (pos1, pos2)
suma = suma + distancias[tupla]
i += 1
# Evitar la ultima iteracion de la ruta
if (i == len(ruta_inicial) - 1):
i = len(ruta_inicial) + 1
return suma
def intercambio(tupla, ruta):
nueva_ruta = ruta.copy()
# Tomar los indices de cada posicion en las tuplas
index_pos1 = nueva_ruta.index(tupla[0])
index_pos2 = nueva_ruta.index(tupla[1])
nueva_ruta[index_pos1] = tupla[1]
nueva_ruta[index_pos2] = tupla[0]
return nueva_ruta
def ruteo(distancias: dict, ruta_inicial: list)-> dict:
# Validar si los datos del diccionario DISTANCIAS son correctos
if diccionario_valido(distancias):
# Copiar lista inicial
ruta_inicial_copia = ruta_inicial.copy()
# Calculo de la distancias en la ruta inicial y en la ruta nueva
distancia_ruta_inicial = calculo_distancia(ruta_inicial, distancias)
bandera = False
i = 1
while i <= len(ruta_inicial) - 2:
pos1 = ruta_inicial[i]
j = i + 1
while j <= len(ruta_inicial) - 2:
pos2 = ruta_inicial[j]
combinacion = (pos1, pos2)
# Creacion de nueva lista para el intercambio de tuplas (FUNCION)
ruta_nueva = intercambio(combinacion, ruta_inicial_copia)
distancia_nueva_ruta = calculo_distancia(ruta_nueva, distancias)
if distancia_nueva_ruta < distancia_ruta_inicial:
ruta_optima = ruta_nueva
distancia_optima = distancia_nueva_ruta
# Actualizar la distancia de la ruta inicial
distancia_ruta_inicial = distancia_optima
bandera = True
j += 1
i += 1
if bandera and i == len(ruta_inicial) - 2:
i = 1
ruta_inicial_copia = ruta_optima.copy()
bandera = False
if not bandera and i == len(ruta_inicial) - 2:
ruta_esperada = '-'.join(ruta_inicial_copia)
return {'ruta': ruta_esperada, 'distancia': distancia_optima}
else:
# Revisar los datos de entrada en el Diccionario
return "Por favor revisar los datos de entrada."
print(ruteo(distancias, ruta_inicial))
``` |
{
"source": "jpelaezClub/pyowm",
"score": 3
} |
#### File: pyowm/commons/weather_client.py
```python
try:
from urllib.error import HTTPError, URLError
from urllib.parse import urlencode
except ImportError:
from urllib2 import HTTPError, URLError
from urllib import urlencode
import socket
from pyowm.exceptions import api_call_error, unauthorized_error, not_found_error
from pyowm.webapi25.configuration25 import ROOT_API_URL
class WeatherHttpClient(object):
API_SUBSCRIPTION_SUBDOMAINS = {
'free': 'api',
'pro': 'pro'
}
"""
An HTTP client class for the OWM web API. The class can leverage a
caching mechanism
:param API_key: a Unicode object representing the OWM web API key
:type API_key: Unicode
:param cache: an *OWMCache* concrete instance that will be used to
cache OWM web API responses.
:type cache: an *OWMCache* concrete instance
:param subscription_type: the type of OWM web API subscription to be wrapped.
The value is used to pick the proper API subdomain for HTTP calls.
Defaults to: 'free'
:type subscription_type: str
"""
def __init__(self, API_key, cache, subscription_type='free'):
self._API_key = API_key
self._cache = cache
self._subscription_type = subscription_type
def _lookup_cache_or_invoke_API(self, cache, API_full_url, timeout):
cached = cache.get(API_full_url)
if cached:
return cached
else:
try:
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
response = urlopen(API_full_url, None, timeout)
except HTTPError as e:
if '401' in str(e):
raise unauthorized_error.UnauthorizedError('Invalid API key')
if '404' in str(e):
raise not_found_error.NotFoundError('The resource was not found')
if '502' in str(e):
raise api_call_error.BadGatewayError(str(e), e)
raise api_call_error.APICallError(str(e), e)
except URLError as e:
raise api_call_error.APICallError(str(e), e)
else:
data = response.read().decode('utf-8')
cache.set(API_full_url, data)
return data
def call_API(self, API_endpoint_URL, params_dict,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""
Invokes a specific OWM web API endpoint URL, returning raw JSON data.
:param API_endpoint_URL: the API endpoint to be invoked
:type API_endpoint_URL: str
:param params_dict: a dictionary containing the query parameters to be
used in the HTTP request (given as key-value couples in the dict)
:type params_dict: dict
:param timeout: how many seconds to wait for connection establishment
(defaults to ``socket._GLOBAL_DEFAULT_TIMEOUT``)
:type timeout: int
:returns: a string containing raw JSON data
:raises: *APICallError*
"""
try:
escaped = API_endpoint_URL % (self.API_SUBSCRIPTION_SUBDOMAINS[self._subscription_type],)
except:
escaped = API_endpoint_URL
url = self._build_full_URL(escaped, params_dict)
return self._lookup_cache_or_invoke_API(self._cache, url, timeout)
def _build_full_URL(self, API_endpoint_URL, params_dict):
"""
Adds the API key and the query parameters dictionary to the specified
API endpoint URL, returning a complete HTTP request URL.
:param API_endpoint_URL: the API endpoint base URL
:type API_endpoint_URL: str
:param params_dict: a dictionary containing the query parameters to be
used in the HTTP request (given as key-value couples in the dict)
:type params_dict: dict
:param API_key: the OWM web API key
:type API_key: str
:returns: a full string HTTP request URL
"""
params = params_dict.copy()
if self._API_key is not None:
params['APPID'] = self._API_key
return self._build_query_parameters(API_endpoint_URL, params)
def _build_query_parameters(self, base_URL, params_dict):
"""
Turns dictionary items into query parameters and adds them to the base
URL
:param base_URL: the base URL whom the query parameters must be added
to
:type base_URL: str
:param params_dict: a dictionary containing the query parameters to be
used in the HTTP request (given as key-value couples in the dict)
:type params_dict: dict
:returns: a full string HTTP request URL
"""
return base_URL + '?' + urlencode(params_dict)
def __repr__(self):
return "<%s.%s - cache=%s>" % \
(__name__, self.__class__.__name__, repr(self._cache))
```
#### File: pyowm/stationsapi30/stations_manager.py
```python
from pyowm.commons.http_client import HttpClient
from pyowm.stationsapi30.station_parser import StationParser
from pyowm.stationsapi30.aggregated_measurement_parser import AggregatedMeasurementParser
from pyowm.constants import STATIONS_API_VERSION
class StationsManager(object):
"""
A manager objects that provides a full interface to OWM Stations API. Mainly
it implements CRUD methods on Station entities and the corresponding
measured datapoints.
:param API_key: the OWM web API key (defaults to ``None``)
:type API_key: str
:returns: a *StationsManager* instance
:raises: *AssertionError* when no API Key is provided
"""
def __init__(self, API_key):
assert API_key is not None, 'You must provide a valid API Key'
self.API_key = API_key
self.stations_parser = StationParser()
self.aggregated_measurements_parser = AggregatedMeasurementParser()
self.http_client = HttpClient()
def stations_api_version(self):
return STATIONS_API_VERSION
# STATIONS Methods
def get_stations(self):
"""
Retrieves all of the user's stations registered on the Stations API.
:returns: list of *pyowm.stationsapi30.station.Station* objects
"""
status, data = self.http_client.get_json(
'http://api.openweathermap.org/data/3.0/stations',
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return [self.stations_parser.parse_dict(item) for item in data]
def get_station(self, id):
"""
Retrieves a named station registered on the Stations API.
:param id: the ID of the station
:type id: str
:returns: a *pyowm.stationsapi30.station.Station* object
"""
status, data = self.http_client.get_json(
'http://api.openweathermap.org/data/3.0/stations/%s' % str(id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
return self.stations_parser.parse_dict(data)
def create_station(self, external_id, name, lat, lon, alt=None):
"""
Create a new station on the Station API with the given parameters
:param external_id: the user-given ID of the station
:type external_id: str
:param name: the name of the station
:type name: str
:param lat: latitude of the station
:type lat: float
:param lon: longitude of the station
:type lon: float
:param alt: altitude of the station
:type alt: float
:returns: the new *pyowm.stationsapi30.station.Station* object
"""
assert external_id is not None
assert name is not None
assert lon is not None
assert lat is not None
if lon < -180.0 or lon > 180.0:
raise ValueError("'lon' value must be between -180 and 180")
if lat < -90.0 or lat > 90.0:
raise ValueError("'lat' value must be between -90 and 90")
if alt is not None:
if alt < 0.0:
raise ValueError("'alt' value must not be negative")
status, payload = self.http_client.post(
'http://api.openweathermap.org/data/3.0/stations',
params={'appid': self.API_key},
data=dict(external_id=external_id, name=name, lat=lat,
lon=lon, alt=alt),
headers={'Content-Type': 'application/json'})
return self.stations_parser.parse_dict(payload)
def update_station(self, station):
"""
Updates the Station API record identified by the ID of the provided
*pyowm.stationsapi30.station.Station* object with all of its fields
:param station: the *pyowm.stationsapi30.station.Station* object to be updated
:type station: *pyowm.stationsapi30.station.Station*
:returns: `None` if update is successful, an exception otherwise
"""
assert station.id is not None
status, _ = self.http_client.put(
'http://api.openweathermap.org/data/3.0/stations/%s' % str(station.id),
params={'appid': self.API_key},
data=dict(external_id=station.external_id, name=station.name,
lat=station.lat, lon=station.lon, alt=station.alt),
headers={'Content-Type': 'application/json'})
def delete_station(self, station):
"""
Deletes the Station API record identified by the ID of the provided
*pyowm.stationsapi30.station.Station*, along with all its related
measurements
:param station: the *pyowm.stationsapi30.station.Station* object to be deleted
:type station: *pyowm.stationsapi30.station.Station*
:returns: `None` if deletion is successful, an exception otherwise
"""
assert station.id is not None
status, _ = self.http_client.delete(
'http://api.openweathermap.org/data/3.0/stations/%s' % str(station.id),
params={'appid': self.API_key},
headers={'Content-Type': 'application/json'})
# Measurements-related methods
def send_measurement(self, measurement):
"""
Posts the provided Measurement object's data to the Station API.
:param measurement: the *pyowm.stationsapi30.measurement.Measurement*
object to be posted
:type measurement: *pyowm.stationsapi30.measurement.Measurement* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert measurement is not None
assert measurement.station_id is not None
status, _ = self.http_client.post(
'http://api.openweathermap.org/data/3.0/measurements',
params={'appid': self.API_key},
data=[measurement.to_dict()],
headers={'Content-Type': 'application/json'})
def send_measurements(self, list_of_measurements):
"""
Posts data about the provided list of Measurement objects to the
Station API. The objects may be related to different station IDs.
:param list_of_measurements: list of *pyowm.stationsapi30.measurement.Measurement*
objects to be posted
:type list_of_measurements: list of *pyowm.stationsapi30.measurement.Measurement*
instances
:returns: `None` if creation is successful, an exception otherwise
"""
assert list_of_measurements is not None
assert all([m.station_id is not None for m in list_of_measurements])
msmts = [m.to_dict() for m in list_of_measurements]
status, _ = self.http_client.post(
'http://api.openweathermap.org/data/3.0/measurements',
params={'appid': self.API_key},
data=msmts,
headers={'Content-Type': 'application/json'})
def get_measurements(self, station_id, aggregated_on, from_timestamp,
to_timestamp, limit=100):
"""
Reads measurements of a specified station recorded in the specified time
window and aggregated on minute, hour or day. Optionally, the number of
resulting measurements can be limited.
:param station_id: unique station identifier
:type station_id: str
:param aggregated_on: aggregation time-frame for this measurement
:type aggregated_on: string between 'm','h' and 'd'
:param from_timestamp: Unix timestamp corresponding to the beginning of
the time window
:type from_timestamp: int
:param to_timestamp: Unix timestamp corresponding to the end of the
time window
:type to_timestamp: int
:param limit: max number of items to be returned. Defaults to 100
:type limit: int
:returns: list of *pyowm.stationsapi30.measurement.AggregatedMeasurement*
objects
"""
assert station_id is not None
assert aggregated_on is not None
assert from_timestamp is not None
assert from_timestamp > 0
assert to_timestamp is not None
assert to_timestamp > 0
if to_timestamp < from_timestamp:
raise ValueError("End timestamp can't be earlier than begin timestamp")
assert isinstance(limit, int)
assert limit >= 0
query = {'appid': self.API_key,
'station_id': station_id,
'type': aggregated_on,
'from': from_timestamp,
'to': to_timestamp,
'limit': limit}
status, data = self.http_client.get_json(
'http://api.openweathermap.org/data/3.0/measurements',
params=query,
headers={'Content-Type': 'application/json'})
return [self.aggregated_measurements_parser.parse_dict(item) for item in data]
def send_buffer(self, buffer):
"""
Posts to the Stations API data about the Measurement objects contained
into the provided Buffer instance.
:param buffer: the *pyowm.stationsapi30.buffer.Buffer* instance whose
measurements are to be posted
:type buffer: *pyowm.stationsapi30.buffer.Buffer* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert buffer is not None
msmts = []
for x in buffer.measurements:
m = x.to_dict()
item = dict()
item['station_id'] = m['station_id']
item['dt'] = m['timestamp']
item['temperature'] = m['temperature']
item['wind_speed'] = m['wind_speed']
item['wind_gust'] = m['wind_gust']
item['wind_deg'] = m['wind_deg']
item['pressure'] = m['pressure']
item['humidity'] = m['humidity']
item['rain_1h'] = m['rain_1h']
item['rain_6h'] = m['rain_6h']
item['rain_24h'] = m['rain_24h']
item['snow_1h'] = m['snow_1h']
item['snow_6h'] = m['snow_6h']
item['snow_24h'] = m['snow_24h']
item['dew_point'] = m['dew_point']
item['humidex'] = m['humidex']
item['heat_index'] = m['heat_index']
item['visibility_distance'] = m['visibility_distance']
item['visibility_prefix'] = m['visibility_prefix']
item['clouds'] = [dict(distance=m['clouds_distance']),
dict(condition=m['clouds_condition']),
dict(cumulus=m['clouds_cumulus'])]
item['weather'] = [
dict(precipitation=m['weather_precipitation']),
dict(descriptor=m['weather_descriptor']),
dict(intensity=m['weather_intensity']),
dict(proximity=m['weather_proximity']),
dict(obscuration=m['weather_obscuration']),
dict(other=m['weather_other'])]
msmts.append(item)
status, _ = self.http_client.post(
'http://api.openweathermap.org/data/3.0/measurements',
params={'appid': self.API_key},
data=msmts,
headers={'Content-Type': 'application/json'})
```
#### File: integration/webapi25/test_cityidregistry_reads_fs.py
```python
import unittest
from os import sep
from pyowm.webapi25.cityidregistry import CityIDRegistry
from pyowm.webapi25.location import Location
class TestCityIDRegistryReadsFS(unittest.TestCase):
_prefix = 'cityids'+sep
_instance = CityIDRegistry(_prefix+'%03d-%03d.txt.gz')
def test_assess_subfile_from(self):
self.assertEqual(self._instance._assess_subfile_from('b-city'),
self._prefix+'097-102.txt.gz')
self.assertEqual(self._instance._assess_subfile_from('h-city'),
self._prefix+'103-108.txt.gz')
self.assertEqual(self._instance._assess_subfile_from('n-city'),
self._prefix+'109-114.txt.gz')
self.assertEqual(self._instance._assess_subfile_from('t-city'),
self._prefix+'115-122.txt.gz')
self.assertRaises(ValueError, CityIDRegistry._assess_subfile_from,
self._instance, '123abc')
self.assertRaises(ValueError, CityIDRegistry._assess_subfile_from,
self._instance, '{abc')
def test_lookup_line_by_city_name(self):
expected = u'Dongen,2756723,51.626671,4.93889,NL'
self.assertEquals(expected,
self._instance._lookup_line_by_city_name('dongen'))
self.assertTrue(self._instance. \
_lookup_line_by_city_name('aaaaaaaa') is None)
def test_id_for(self):
self.assertEqual(self._instance.id_for('dongen'), 2756723)
self.assertTrue(self._instance.id_for('aaaaaaaaaa') is None)
def test_id_for_fails_with_malformed_inputs(self):
self.assertRaises(ValueError, CityIDRegistry.id_for, self._instance,
'123abc')
def test_location_for(self):
expected = Location('Dongen', 4.938890, 51.626671, 2756723, 'NL')
result = self._instance.location_for('dongen')
self.assertEqual(result.get_name(), expected.get_name())
self.assertEqual(result.get_country(), expected.get_country())
self.assertEqual(result.get_ID(), expected.get_ID())
self.assertEqual(result.get_lat(), expected.get_lat())
self.assertEqual(result.get_lon(), expected.get_lon())
self.assertTrue(self._instance.location_for('aaaaaaaaaa') is None)
def test_location_for_fails_with_malformed_inputs(self):
self.assertRaises(ValueError, CityIDRegistry.location_for,
self._instance, '123abc')
def test_ids_for(self):
result = self._instance.ids_for("bologna", matching='exact')
self.assertEquals(0, len(result))
result = self._instance.ids_for("Abbans-Dessus")
self.assertEquals(2, len(result))
self.assertTrue((3038800, 'Abbans-Dessus', 'FR') in result)
self.assertTrue((6452202, 'Abbans-Dessus', 'FR') in result)
result = self._instance.ids_for("Dessus", matching='like')
self.assertEquals(6, len(result))
def test_locations_for(self):
expected1 = Location('Abbans-Dessus', 5.88188, 47.120548, 3038800, 'FR')
expected2 = Location('Abbans-Dessus', 5.88333, 47.116669, 6452202, 'FR')
result = self._instance.locations_for("Abbans-Dessus")
self.assertEquals(2, len(result))
for l in result:
self.assertTrue(isinstance(l, Location))
self.assertTrue(l.get_ID() in [expected1.get_ID(), expected2.get_ID()])
if __name__ == "__main__":
unittest.main()
```
#### File: integration/webapi25/test_configuration_injection_webapi25.py
```python
import unittest
import os
import pyowm
from pyowm.constants import DEFAULT_API_KEY
class ConfigurationInjectionTestsWebAPI25(unittest.TestCase):
_config_module_name = 'tests.integration.webapi25.external_configuration'
_non_existent_config_module_name = 'this_will_never_be_a_config_module'
API_KEY = os.getenv('OWM_API_KEY', DEFAULT_API_KEY)
def test(self):
pyowm.OWM(self.API_KEY, '2.5', self._config_module_name)
def test_library_is_instantiated_with_wrong_API_version(self):
self.assertRaises(ValueError, pyowm.OWM, 'abcd', '0.0')
def test_library_is_instantiated_with_external_config(self):
"""
Test that library is smoothly instantiated also when injecting external
configuration
"""
try:
pyowm.OWM(self.API_KEY, '2.5', self._config_module_name)
except Exception:
self.fail("Error raised during library instantiation")
def test_error_raised_when_providing_non_existent_external_config(self):
"""
Test that library instantiation raises an error when trying to inject
a non-existent external configuration module
"""
self.assertRaises(Exception, pyowm.OWM, self.API_KEY, '2.5',
self._non_existent_config_module_name)
def test_library_performs_API_calls_with_external_config(self):
"""
Test that API works correctly with external config values. For testing
purposes, we do that by specifying None values for JSON parsers, which
leads to errors raising
"""
try:
instance = \
pyowm.OWM(self.API_KEY, '2.5',
self._config_module_name)
except:
self.fail("Error raised during library instantiation")
self.assertRaises(Exception, instance.weather_at_place, 'London,uk')
if __name__ == "__main__":
unittest.main()
```
#### File: unit/webapi25/test_ozone_parser.py
```python
import unittest
from pyowm.webapi25.ozone_parser import OzoneParser
from pyowm.exceptions.parse_response_error import ParseResponseError
from tests.unit.webapi25.json_test_responses import (
OZONE_JSON, OZONE_MALFORMED_JSON)
class TestObservationParser(unittest.TestCase):
__instance = OzoneParser()
def test_parse_JSON(self):
result = self.__instance.parse_JSON(OZONE_JSON)
self.assertIsNotNone(result)
self.assertIsNotNone(result.get_reference_time())
self.assertIsNotNone(result.get_reception_time())
loc = result.get_location()
self.assertIsNotNone(loc)
self.assertIsNone(loc.get_name())
self.assertIsNone(loc.get_ID())
self.assertIsNotNone(loc.get_lon())
self.assertIsNotNone(loc.get_lat())
self.assertIsNone(result.get_interval())
self.assertIsNotNone(result.get_du_value())
def test_parse_JSON_fails_when_JSON_data_is_None(self):
self.assertRaises(ParseResponseError, OzoneParser.parse_JSON,
self.__instance, None)
def test_parse_JSON_fails_with_malformed_JSON_data(self):
self.assertRaises(ParseResponseError, OzoneParser.parse_JSON,
self.__instance, OZONE_MALFORMED_JSON)
```
#### File: unit/webapi25/test_so2indexparser.py
```python
import unittest
from pyowm.webapi25.so2indexparser import SO2IndexParser
from pyowm.exceptions.parse_response_error import ParseResponseError
from tests.unit.webapi25.json_test_responses import (
SO2INDEX_JSON, SO2INDEX_MALFORMED_JSON)
class TestSO2IndexParser(unittest.TestCase):
__instance = SO2IndexParser()
def test_parse_JSON(self):
result = self.__instance.parse_JSON(SO2INDEX_JSON)
self.assertIsNotNone(result)
self.assertIsNotNone(result.get_reference_time())
self.assertIsNotNone(result.get_reference_time())
loc = result.get_location()
self.assertIsNotNone(loc)
self.assertIsNone(loc.get_name())
self.assertIsNone(loc.get_ID())
self.assertIsNotNone(loc.get_lon())
self.assertIsNotNone(loc.get_lat())
self.assertIsNone(result.get_interval())
self.assertNotEquals(0, len(result.get_so2_samples()))
def test_parse_JSON_fails_when_JSON_data_is_None(self):
self.assertRaises(ParseResponseError, SO2IndexParser.parse_JSON,
self.__instance, None)
def test_parse_JSON_fails_with_malformed_JSON_data(self):
self.assertRaises(ParseResponseError, SO2IndexParser.parse_JSON,
self.__instance, SO2INDEX_MALFORMED_JSON)
``` |
{
"source": "jpelbertrios/gaffer-tools",
"score": 2
} |
#### File: src/gafferpy/gaffer_operations.py
```python
import gafferpy.gaffer_binaryoperators as gaffer_binaryoperators
import gafferpy.gaffer_functions as gaffer_functions
import gafferpy.gaffer_predicates as gaffer_predicates
from gafferpy.gaffer_core import *
class NamedOperationParameter(ToJson, ToCodeString):
CLASS = 'gaffer.NamedOperationParameter'
def __init__(self,
name,
value_class,
description=None,
default_value=None,
required=False):
self.name = name
self.value_class = value_class
self.description = description
self.default_value = default_value
self.required = required
def get_detail(self):
detail = {
"valueClass": self.value_class,
"required": self.required
}
if self.description is not None:
detail['description'] = self.description
if self.default_value is not None:
detail['defaultValue'] = self.default_value
return detail
def to_json(self):
return {
"description": self.description,
"defaultValue": self.default_value,
"valueClass": self.value_class,
"required": self.required
}
class NamedViewParameter(ToJson, ToCodeString):
CLASS = 'gaffer.NamedViewParameter'
def __init__(self,
name,
value_class,
description=None,
default_value=None,
required=False):
self.name = name
self.value_class = value_class
self.description = description
self.default_value = default_value
self.required = required
def get_detail(self):
detail = {
"valueClass": self.value_class,
"required": self.required
}
if self.description is not None:
detail['description'] = self.description
if self.default_value is not None:
detail['defaultValue'] = self.default_value
return detail
def to_json(self):
return {
"description": self.description,
"defaultValue": self.default_value,
"valueClass": self.value_class,
"required": self.required
}
class View(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.data.elementdefinition.view.View'
def __init__(self, entities=None, edges=None, global_elements=None,
global_entities=None, global_edges=None, all_edges=False,
all_entities=False):
super().__init__()
self.entities = None
self.edges = None
self.global_elements = None
self.global_entities = None
self.global_edges = None
self.all_edges = all_edges
self.all_entities = all_entities
if entities is not None:
self.entities = []
if isinstance(entities, list):
for el_def in entities:
if not isinstance(el_def, ElementDefinition):
el_def = JsonConverter.from_json(
el_def, ElementDefinition)
self.entities.append(el_def)
else:
for group, el_def in entities.items():
if not isinstance(el_def, ElementDefinition):
el_def = JsonConverter.from_json(
el_def, ElementDefinition)
el_def.group = group
self.entities.append(el_def)
if edges is not None:
self.edges = []
if isinstance(edges, list):
for el_def in edges:
if not isinstance(el_def, ElementDefinition):
el_def = JsonConverter.from_json(
el_def, ElementDefinition)
self.edges.append(el_def)
else:
for group, el_def in edges.items():
if not isinstance(el_def, ElementDefinition):
el_def = JsonConverter.from_json(
el_def, ElementDefinition)
el_def.group = group
self.edges.append(el_def)
if global_elements is not None:
self.global_elements = []
if isinstance(global_elements, list):
for el_def in global_elements:
if not isinstance(el_def, GlobalElementDefinition):
el_def = JsonConverter.from_json(
el_def, GlobalElementDefinition)
self.global_elements.append(el_def)
elif isinstance(global_elements, GlobalElementDefinition):
self.global_elements.append(global_elements)
else:
for group, el_def in global_elements.items():
if not isinstance(el_def, GlobalElementDefinition):
el_def = JsonConverter.from_json(
el_def, GlobalElementDefinition)
self.global_elements.append(el_def)
if global_entities is not None:
self.global_entities = []
if isinstance(global_entities, list):
for el_def in global_entities:
if not isinstance(el_def, GlobalElementDefinition):
el_def = JsonConverter.from_json(
el_def, GlobalElementDefinition)
self.global_entities.append(el_def)
elif isinstance(global_entities, GlobalElementDefinition):
self.global_entities.append(global_entities)
else:
for group, el_def in global_entities.items():
if not isinstance(el_def, GlobalElementDefinition):
el_def = JsonConverter.from_json(
el_def, GlobalElementDefinition)
self.global_entities.append(el_def)
if global_edges is not None:
self.global_edges = []
if isinstance(global_edges, list):
for el_def in global_edges:
if not isinstance(el_def, GlobalElementDefinition):
el_def = JsonConverter.from_json(
el_def, GlobalElementDefinition)
self.global_edges.append(el_def)
elif isinstance(global_edges, GlobalElementDefinition):
self.global_edges.append(global_edges)
else:
for group, el_def in global_edges.items():
if not isinstance(el_def, GlobalElementDefinition):
el_def = JsonConverter.from_json(
el_def, GlobalElementDefinition)
self.global_edges.append(el_def)
def to_json(self):
view = {}
if self.entities is not None:
el_defs = {}
for el_def in self.entities:
el_defs[el_def.group] = el_def.to_json()
view['entities'] = el_defs
if self.edges is not None:
el_defs = {}
for el_def in self.edges:
el_defs[el_def.group] = el_def.to_json()
view['edges'] = el_defs
if self.global_elements is not None:
el_defs = []
for el_def in self.global_elements:
el_defs.append(el_def.to_json())
view['globalElements'] = el_defs
if self.global_entities is not None:
el_defs = []
for el_def in self.global_entities:
el_defs.append(el_def.to_json())
view['globalEntities'] = el_defs
if self.global_edges is not None:
el_defs = []
for el_def in self.global_edges:
el_defs.append(el_def.to_json())
view['globalEdges'] = el_defs
if self.all_edges is True:
view['allEdges'] = True
if self.all_entities is True:
view['allEntities'] = True
return view
class NamedView(View):
CLASS = 'uk.gov.gchq.gaffer.data.elementdefinition.view.NamedView'
def __init__(self, name, parameters=None, entities=None, edges=None,
global_elements=None,
global_entities=None, global_edges=None):
super().__init__(entities, edges, global_elements, global_entities,
global_edges)
self.name = name
self.parameters = parameters
def to_json(self):
view = super().to_json()
view['class'] = self.CLASS
view['name'] = self.name
if self.parameters is not None:
view['parameters'] = self.parameters
return view
class ElementDefinition(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.data.elementdefinition.view.ViewElementDefinition'
def __init__(self, group='',
transient_properties=None,
group_by=None,
pre_aggregation_filter_functions=None,
aggregate_functions=None,
post_aggregation_filter_functions=None,
transform_functions=None,
post_transform_filter_functions=None,
properties=None,
exclude_properties=None):
super().__init__()
self.group = group
if transient_properties is None:
self.transient_properties = None
else:
self.transient_properties = {}
if isinstance(transient_properties, list):
for prop in transient_properties:
if not isinstance(prop, Property):
prop = JsonConverter.from_json(prop, Property)
self.transient_properties[prop.name] = prop.class_name
else:
for propName, propClass in transient_properties.items():
self.transient_properties[propName] = propClass
self.pre_aggregation_filter_functions = JsonConverter.from_json(
pre_aggregation_filter_functions,
gaffer_predicates.PredicateContext)
self.aggregate_functions = JsonConverter.from_json(
aggregate_functions, gaffer_predicates.PredicateContext)
self.post_aggregation_filter_functions = JsonConverter.from_json(
post_aggregation_filter_functions,
gaffer_predicates.PredicateContext)
self.transform_functions = JsonConverter.from_json(
transform_functions, gaffer_functions.FunctionContext)
self.post_transform_filter_functions = JsonConverter.from_json(
post_transform_filter_functions, gaffer_predicates.PredicateContext)
self.group_by = group_by
self.properties = properties
self.exclude_properties = exclude_properties
def to_json(self):
element_def = {}
if self.transient_properties is not None:
element_def['transientProperties'] = self.transient_properties
if self.pre_aggregation_filter_functions is not None:
funcs = []
for func in self.pre_aggregation_filter_functions:
funcs.append(func.to_json())
element_def['preAggregationFilterFunctions'] = funcs
if self.post_aggregation_filter_functions is not None:
funcs = []
for func in self.post_aggregation_filter_functions:
funcs.append(func.to_json())
element_def['postAggregationFilterFunctions'] = funcs
if self.transform_functions is not None:
funcs = []
for func in self.transform_functions:
funcs.append(func.to_json())
element_def['transformFunctions'] = funcs
if self.post_transform_filter_functions is not None:
funcs = []
for func in self.post_transform_filter_functions:
funcs.append(func.to_json())
element_def['postTransformFilterFunctions'] = funcs
if self.group_by is not None:
element_def['groupBy'] = self.group_by
if self.properties is not None:
element_def['properties'] = self.properties
if self.exclude_properties is not None:
element_def['excludeProperties'] = self.exclude_properties
return element_def
class ElementTransformDefinition(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.ElementTransformDefinition'
def __init__(self, group='',
functions=None):
super().__init__()
self.group = group
self.functions = JsonConverter.from_json(
functions, gaffer_functions.FunctionContext)
def to_json(self):
element_def = {}
if self.functions is not None:
funcs = []
for func in self.functions:
funcs.append(func.to_json())
element_def['functions'] = funcs
return element_def
class AggregatePair(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.operation.util.AggregatePair'
def __init__(self,
group=None,
group_by=None,
element_aggregator=None):
super().__init__()
self.group = group
if group_by is not None and not isinstance(group_by, list):
group_by = [group_by]
self.group_by = group_by
if element_aggregator is not None and not isinstance(element_aggregator,
ElementAggregateDefinition):
element_aggregator = ElementAggregateDefinition(
operators=element_aggregator['operators'])
self.element_aggregator = element_aggregator
def to_json(self):
element_def = {}
if self.group_by is not None:
element_def['groupBy'] = self.group_by
if self.element_aggregator is not None:
element_def['elementAggregator'] = self.element_aggregator.to_json()
return element_def
class ElementAggregateDefinition(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.ElementAggregateDefinition'
def __init__(self, operators=None):
super().__init__()
self.operators = JsonConverter.from_json(
operators, gaffer_binaryoperators.BinaryOperatorContext)
def to_json(self):
element_def = {}
if self.operators is not None:
funcs = []
for function in self.operators:
funcs.append(function.to_json())
element_def['operators'] = funcs
return element_def
class ElementFilterDefinition(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.ElementFilterDefinition'
def __init__(self, group='',
predicates=None):
super().__init__()
self.group = group
self.predicates = JsonConverter.from_json(
predicates, gaffer_predicates.PredicateContext)
def to_json(self):
element_def = {}
if self.predicates is not None:
funcs = []
for function in self.predicates:
funcs.append(function.to_json())
element_def['predicates'] = funcs
return element_def
class GlobalElementFilterDefinition(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.GlobalElementFilterDefinition'
def __init__(self, predicates=None):
super().__init__()
self.predicates = JsonConverter.from_json(
predicates, gaffer_predicates.PredicateContext)
def to_json(self):
element_def = {}
if self.predicates is not None:
funcs = []
for func in self.predicates:
funcs.append(func.to_json())
element_def['predicates'] = funcs
return element_def
class GlobalElementDefinition(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.data.elementdefinition.view.GlobalViewElementDefinition'
def __init__(self,
transient_properties=None,
group_by=None,
pre_aggregation_filter_functions=None,
post_aggregation_filter_functions=None,
transform_functions=None,
post_transform_filter_functions=None,
properties=None,
exclude_properties=None):
super().__init__()
if transient_properties is None:
self.transient_properties = None
else:
self.transient_properties = {}
if isinstance(transient_properties, list):
for prop in transient_properties:
if not isinstance(prop, Property):
prop = JsonConverter.from_json(prop, Property)
self.transient_properties[prop.name] = prop.class_name
else:
for propName, propClass in transient_properties.items():
self.transient_properties[propName] = propClass
self.pre_aggregation_filter_functions = JsonConverter.from_json(
pre_aggregation_filter_functions,
gaffer_predicates.PredicateContext)
self.post_aggregation_filter_functions = JsonConverter.from_json(
post_aggregation_filter_functions,
gaffer_predicates.PredicateContext)
self.transform_functions = JsonConverter.from_json(
transform_functions, gaffer_functions.FunctionContext)
self.post_transform_filter_functions = JsonConverter.from_json(
post_transform_filter_functions, gaffer_predicates.PredicateContext)
self.group_by = group_by
self.properties = properties
self.exclude_properties = exclude_properties
def to_json(self):
element_def = {}
if self.transient_properties is not None:
element_def['transientProperties'] = self.transient_properties
if self.pre_aggregation_filter_functions is not None:
funcs = []
for func in self.pre_aggregation_filter_functions:
funcs.append(func.to_json())
element_def['preAggregationFilterFunctions'] = funcs
if self.post_aggregation_filter_functions is not None:
funcs = []
for func in self.post_aggregation_filter_functions:
funcs.append(func.to_json())
element_def['postAggregationFilterFunctions'] = funcs
if self.transform_functions is not None:
funcs = []
for func in self.transform_functions:
funcs.append(func.to_json())
element_def['transformFunctions'] = funcs
if self.post_transform_filter_functions is not None:
funcs = []
for func in self.post_transform_filter_functions:
funcs.append(func.to_json())
element_def['postTransformFilterFunctions'] = funcs
if self.group_by is not None:
element_def['groupBy'] = self.group_by
if self.properties is not None:
element_def['properties'] = self.properties
if self.exclude_properties is not None:
element_def['excludeProperties'] = self.exclude_properties
return element_def
class Property(ToJson, ToCodeString):
CLASS = "uk.gov.gchq.gaffer.data.element.Property"
def __init__(self, name, class_name):
super().__init__()
if not isinstance(name, str):
raise TypeError('Name must be a string')
if not isinstance(class_name, str):
raise TypeError('ClassName must be a class name string')
self.name = name
self.class_name = class_name
def to_json(self):
return {self.name: self.class_name}
class Operation(ToJson, ToCodeString):
def __init__(self,
_class_name,
view=None,
options=None,
views=None):
self._class_name = _class_name
if view is not None and isinstance(view, list):
views = view
view = None
if view is not None and isinstance(view, dict):
view = JsonConverter.from_json(view, View)
self.view = view
self.views = None
if views is not None and isinstance(views, list):
self.views = []
for view in views:
if not isinstance(view, View):
view = JsonConverter.from_json(view, View)
self.views.append(view)
self.options = options
def to_json(self):
operation = {'class': self._class_name}
if self.options is not None:
operation['options'] = self.options
if self.view is not None:
operation['view'] = self.view.to_json()
if self.views is not None:
operation['views'] = []
for view in self.views:
operation['views'].append(view.to_json())
return operation
class Match(ToJson, ToCodeString):
def __init__(self, _class_name):
self._class_name = _class_name
def to_json(self):
return {
'class': self._class_name
}
class ElementMatch(Match):
CLASS = "uk.gov.gchq.gaffer.store.operation.handler.join.match.ElementMatch"
def __init__(self, group_by_properties=None):
super().__init__(_class_name=self.CLASS)
self.group_by_properties = group_by_properties
def to_json(self):
match_json = super().to_json()
if (self.group_by_properties is not None):
match_json['groupByProperties'] = self.group_by_properties
return match_json
class KeyFunctionMatch(Match):
CLASS = "uk.gov.gchq.gaffer.store.operation.handler.join.match.KeyFunctionMatch"
def __init__(self, first_key_function=None, second_key_function=None):
super().__init__(_class_name=self.CLASS)
if not isinstance(first_key_function, gaffer_functions.Function):
self.first_key_function = JsonConverter.from_json(first_key_function, class_obj=gaffer_functions.Function)
else:
self.first_key_function = first_key_function
if not isinstance(second_key_function, gaffer_functions.Function):
self.second_key_function = JsonConverter.from_json(second_key_function, class_obj=gaffer_functions.Function)
else:
self.second_key_function = second_key_function
def to_json(self):
match_json = super().to_json()
if self.first_key_function is not None:
match_json['firstKeyFunction'] = self.first_key_function.to_json()
if self.second_key_function is not None:
match_json['secondKeyFunction'] = self.second_key_function.to_json()
return match_json
class OperationChain(Operation):
CLASS = "uk.gov.gchq.gaffer.operation.OperationChain"
def __init__(self, operations, options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
self._class_name = self.CLASS
self.operations = operations
def to_json(self):
operation_chain_json = super().to_json()
operations_json = []
for operation in self.operations:
if isinstance(operation, ToJson):
operations_json.append(operation.to_json())
else:
operations_json.append(operation)
operation_chain_json['operations'] = operations_json
return operation_chain_json
class OperationChainDAO(OperationChain):
CLASS = "uk.gov.gchq.gaffer.operation.OperationChainDAO"
def __init__(self, operations,
options=None):
super().__init__(operations=operations, options=options)
def to_json(self):
operation_chain_json = super().to_json()
operation_chain_json.pop('class', None)
return operation_chain_json
class GetTraits(Operation):
CLASS = 'uk.gov.gchq.gaffer.store.operation.GetTraits'
def __init__(self,
current_traits,
options=None):
super().__init__(
_class_name=self.CLASS, options=options)
self.current_traits = current_traits
def to_json(self):
operation = super().to_json()
operation['currentTraits'] = self.current_traits
return operation
class AddElements(Operation):
"""
This class defines a Gaffer Add Operation.
"""
CLASS = 'uk.gov.gchq.gaffer.operation.impl.add.AddElements'
def __init__(self,
input=None,
skip_invalid_elements=None,
validate=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
self.input = input
self.skip_invalid_elements = skip_invalid_elements
self.validate = validate
def to_json(self):
operation = super().to_json()
if self.skip_invalid_elements is not None:
operation['skipInvalidElements'] = self.skip_invalid_elements
if self.validate is not None:
operation['validate'] = self.validate
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
return operation
class GenerateElements(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.generate.GenerateElements'
def __init__(self,
element_generator,
input=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
if not isinstance(element_generator, gaffer_functions.ElementGenerator):
element_generator = gaffer_functions.ElementGenerator(
element_generator['class'],
element_generator)
self.element_generator = element_generator
self.input = input
def to_json(self):
operation = super().to_json()
if self.input is not None:
input_json = []
for item in self.input:
if isinstance(item, ToJson):
input_json.append(item.to_json())
else:
input_json.append(item)
operation['input'] = input_json
operation['elementGenerator'] = self.element_generator.to_json()
return operation
class GenerateObjects(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.generate.GenerateObjects'
def __init__(self,
element_generator,
input=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
if not isinstance(element_generator, gaffer_functions.ElementGenerator):
element_generator = gaffer_functions.ElementGenerator(
element_generator['class'],
element_generator)
self.element_generator = element_generator
self.input = input
def to_json(self):
operation = super().to_json()
if self.input is not None:
elements_json = []
for element in self.input:
if isinstance(element, ToJson):
elements_json.append(element.to_json())
else:
elements_json.append(element)
operation['input'] = elements_json
operation['elementGenerator'] = self.element_generator.to_json()
return operation
class Validate(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.Validate'
def __init__(self,
validate,
skip_invalid_elements=True,
options=None):
super().__init__(
_class_name=self.CLASS, options=options)
self.validate = validate
self.skip_invalid_elements = skip_invalid_elements
def to_json(self):
operation = super().to_json()
operation['validate'] = self.validate
operation['skipInvalidElements'] = self.skip_invalid_elements
return operation
class ExportToGafferResultCache(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.resultcache.ExportToGafferResultCache'
def __init__(self,
key=None,
op_auths=None,
options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
if not isinstance(key, str) and key is not None:
raise TypeError('key must be a string')
self.key = key
self.op_auths = op_auths
def to_json(self):
operation = super().to_json()
if self.key is not None:
operation['key'] = self.key
if self.op_auths is not None:
operation['opAuths'] = self.op_auths
return operation
class GetGafferResultCacheExport(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.resultcache.GetGafferResultCacheExport'
def __init__(self,
job_id=None,
key=None,
options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.job_id = job_id
self.key = key
def to_json(self):
operation = super().to_json()
if self.job_id is not None:
operation['jobId'] = self.job_id
if self.key is not None:
operation['key'] = self.key
return operation
class ExportToSet(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.set.ExportToSet'
def __init__(self, key=None, options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
if not isinstance(key, str) and key is not None:
raise TypeError('key must be a string')
self.key = key
def to_json(self):
operation = super().to_json()
if self.key is not None:
operation['key'] = self.key
return operation
class GetSetExport(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.set.GetSetExport'
def __init__(self,
job_id=None,
key=None,
start=None,
end=None,
options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.job_id = job_id
self.key = key
self.start = start
self.end = end
def to_json(self):
operation = super().to_json()
if self.job_id is not None:
operation['jobId'] = self.job_id
if self.key is not None:
operation['key'] = self.key
if self.start is not None:
operation['start'] = self.start
if self.end is not None:
operation['end'] = self.end
return operation
class GetExports(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.export.GetExports'
def __init__(self,
get_exports=None,
options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.get_exports = []
for export in get_exports:
if not isinstance(export, Operation):
export = JsonConverter.from_json(export)
self.get_exports.append(export)
def to_json(self):
operation = super().to_json()
if self.get_exports is not None:
exports = []
for export in self.get_exports:
exports.append(export.to_json())
operation['getExports'] = exports
return operation
class GetJobDetails(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.job.GetJobDetails'
def __init__(self,
job_id=None,
options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.job_id = job_id
def to_json(self):
operation = super().to_json()
if self.job_id is not None:
operation['jobId'] = self.job_id
return operation
class GetAllJobDetails(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.job.GetAllJobDetails'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
def to_json(self):
operation = super().to_json()
return operation
class GetJobResults(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.job.GetJobResults'
def __init__(self, job_id, options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.job_id = job_id
def to_json(self):
operation = super().to_json()
operation['jobId'] = self.job_id
return operation
class CancelScheduledJob(Operation):
CLASS = "uk.gov.gchq.gaffer.operation.impl.job.CancelScheduledJob"
def __init__(self, job_id):
super().__init__(_class_name=self.CLASS)
self.job_id = job_id
def to_json(self):
operation_json = super().to_json()
if self.job_id is not None:
operation_json['jobId'] = self.job_id
return operation_json
class SplitStoreFromFile(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.SplitStoreFromFile'
def __init__(self, input_path, options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.input_path = input_path
def to_json(self):
operation = super().to_json()
operation['inputPath'] = self.input_path
return operation
class SplitStoreFromIterable(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.SplitStoreFromIterable'
def __init__(self, input=None, options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.input = input
def to_json(self):
operation = super().to_json()
if self.input is not None:
operation['input'] = self.input
return operation
class SampleElementsForSplitPoints(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.SampleElementsForSplitPoints'
def __init__(self, input=None, num_splits=None, proportion_to_sample=None,
options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.input = input
self.num_splits = num_splits
self.proportion_to_sample = proportion_to_sample
def to_json(self):
operation = super().to_json()
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.num_splits is not None:
operation['numSplits'] = self.num_splits
if self.proportion_to_sample is not None:
operation['proportionToSample'] = self.proportion_to_sample
return operation
class GetOperation(Operation):
def __init__(self,
_class_name,
input=None,
view=None,
directed_type=None,
include_incoming_out_going=None,
# deprecated, use seed_matching instead
seed_matching_type=None,
seed_matching=None,
options=None):
super().__init__(
_class_name=_class_name,
view=view,
options=options)
if not isinstance(_class_name, str):
raise TypeError(
'ClassName must be the operation class name as a string')
self.input = input
self.directed_type = directed_type
self.include_incoming_out_going = include_incoming_out_going
self.seed_matching = seed_matching_type
if seed_matching is not None:
self.seed_matching = seed_matching
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seeds = []
if isinstance(self.input, list):
for seed in self.input:
if isinstance(seed, ElementSeed):
json_seeds.append(seed.to_json())
else:
json_seeds.append(EntitySeed(seed).to_json())
else:
if isinstance(self.input, ElementSeed):
json_seeds.append(self.input.to_json())
else:
json_seeds.append(EntitySeed(self.input).to_json())
operation['input'] = json_seeds
if self.seed_matching is not None:
operation['seedMatching'] = self.seed_matching
if self.include_incoming_out_going is not None:
if self.directed_type is not None:
operation['directedType'] = self.directed_type
operation[
'includeIncomingOutGoing'] = self.include_incoming_out_going
return operation
class GetElements(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.get.GetElements'
def __init__(self,
input=None,
view=None,
directed_type=None,
include_incoming_out_going=None,
seed_matching_type=None,
# deprecated, use seed_matching instead
seed_matching=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=input,
view=view,
directed_type=directed_type,
include_incoming_out_going=include_incoming_out_going,
seed_matching_type=seed_matching_type,
seed_matching=seed_matching,
options=options)
class GetFromEndpoint(Operation):
CLASS = "uk.gov.gchq.gaffer.operation.impl.get.GetFromEndpoint"
def __init__(self, endpoint, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.endpoint = endpoint
def to_json(self):
operation_json = super().to_json()
if self.endpoint is not None:
operation_json['endpoint'] = self.endpoint
return operation_json
class GetAdjacentIds(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds'
def __init__(self,
input=None,
view=None,
include_incoming_out_going=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=input,
view=view,
directed_type=None,
include_incoming_out_going=include_incoming_out_going,
seed_matching_type=None,
seed_matching=None,
options=options)
class GetAllElements(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.get.GetAllElements'
def __init__(self,
view=None,
directed_type=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=None,
view=view,
directed_type=directed_type,
include_incoming_out_going=None,
options=options)
class NamedOperation(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.named.operation.NamedOperation'
def __init__(self,
operation_name,
input=None,
view=None,
parameters=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=input,
view=view,
directed_type=None,
include_incoming_out_going=None,
seed_matching_type=None,
seed_matching=None,
options=options)
self.operation_name = operation_name
self.parameters = parameters
def to_json(self):
operation = super().to_json()
operation['operationName'] = self.operation_name
if self.parameters is not None:
operation['parameters'] = self.parameters
return operation
class AddNamedOperation(Operation):
CLASS = 'uk.gov.gchq.gaffer.named.operation.AddNamedOperation'
def __init__(self,
operation_chain,
operation_name,
description=None,
read_access_roles=None,
write_access_roles=None,
overwrite_flag=None,
parameters=None,
options=None,
score=None):
super().__init__(
_class_name=self.CLASS,
options=options)
if isinstance(operation_chain, OperationChain):
if not isinstance(operation_chain, OperationChainDAO):
operation_chain = OperationChainDAO(
operations=operation_chain.operations)
self.operation_chain = operation_chain
else:
operations = []
ops = operation_chain
if isinstance(ops, dict):
ops = ops['operations']
if not isinstance(ops, list):
raise TypeError('Operation chain type was not recognised')
for op in ops:
if not isinstance(op, Operation):
op = JsonConverter.from_json(op)
operations.append(op)
self.operation_chain = OperationChainDAO(operations=operations)
self.operation_name = operation_name
self.description = description
self.read_access_roles = read_access_roles
self.write_access_roles = write_access_roles
self.overwrite_flag = overwrite_flag
self.score = score
self.parameters = None
if parameters is not None:
self.parameters = []
if isinstance(parameters, list):
for param in parameters:
if not isinstance(param, NamedOperationParameter):
param = JsonConverter.from_json(param,
NamedOperationParameter)
self.parameters.append(param)
else:
for name, param in parameters.items():
param = dict(param)
param['name'] = name
param = JsonConverter.from_json(param,
NamedOperationParameter)
self.parameters.append(param)
def to_json(self):
operation = super().to_json()
if isinstance(self.operation_chain, OperationChain):
operation['operationChain'] = self.operation_chain.to_json()
else:
operation['operationChain'] = self.operation_chain
operation['operationName'] = self.operation_name
if self.overwrite_flag is not None:
operation['overwriteFlag'] = self.overwrite_flag
if self.description is not None:
operation['description'] = self.description
if self.read_access_roles is not None:
operation['readAccessRoles'] = self.read_access_roles
if self.write_access_roles is not None:
operation['writeAccessRoles'] = self.write_access_roles
if self.score is not None:
operation['score'] = self.score
if self.parameters is not None:
operation['parameters'] = {}
for param in self.parameters:
operation['parameters'][param.name] = param.get_detail()
return operation
class DeleteNamedOperation(Operation):
CLASS = 'uk.gov.gchq.gaffer.named.operation.DeleteNamedOperation'
def __init__(self, operation_name, options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
self.operation_name = operation_name
def to_json(self):
operation = super().to_json()
operation['operationName'] = self.operation_name
return operation
class GetAllNamedOperations(Operation):
CLASS = 'uk.gov.gchq.gaffer.named.operation.GetAllNamedOperations'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
class AddNamedView(Operation):
CLASS = 'uk.gov.gchq.gaffer.named.view.AddNamedView'
def __init__(self,
view,
name,
description=None,
overwrite_flag=None,
parameters=None,
write_access_roles=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
if not isinstance(view, View):
view = JsonConverter.from_json(view, View)
self.view = view
self.name = name
self.description = description
self.overwrite_flag = overwrite_flag
self.write_access_roles = write_access_roles
self.parameters = None
if parameters is not None:
self.parameters = []
if isinstance(parameters, list):
for param in parameters:
if not isinstance(param, NamedViewParameter):
param = JsonConverter.from_json(param,
NamedViewParameter)
self.parameters.append(param)
else:
for name, param in parameters.items():
param = dict(param)
param['name'] = name
param = JsonConverter.from_json(param,
NamedViewParameter)
self.parameters.append(param)
def to_json(self):
operation = super().to_json()
if isinstance(self.view, View):
operation['view'] = self.view.to_json()
else:
operation['view'] = self.view
operation['name'] = self.name
if self.overwrite_flag is not None:
operation['overwriteFlag'] = self.overwrite_flag
if self.description is not None:
operation['description'] = self.description
if self.parameters is not None:
operation['parameters'] = {}
for param in self.parameters:
operation['parameters'][param.name] = param.get_detail()
if self.write_access_roles is not None:
operation['writeAccessRoles'] = self.write_access_roles
return operation
class DeleteNamedView(Operation):
CLASS = 'uk.gov.gchq.gaffer.named.view.DeleteNamedView'
def __init__(self, name, options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
self.name = name
def to_json(self):
operation = super().to_json()
operation['name'] = self.name
return operation
class GetAllNamedViews(Operation):
CLASS = 'uk.gov.gchq.gaffer.named.view.GetAllNamedViews'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS,
options=options)
class DiscardOutput(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.DiscardOutput'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options)
class Count(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.Count'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options
)
class CountGroups(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.CountGroups'
def __init__(self, limit=None, options=None):
super().__init__(
_class_name=self.CLASS,
view=None,
options=options)
self.limit = limit
def to_json(self):
operation = super().to_json()
if self.limit is not None:
operation['limit'] = self.limit
return operation
class Limit(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.Limit'
def __init__(self, result_limit, truncate=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.result_limit = result_limit
self.truncate = truncate
def to_json(self):
operation = super().to_json()
operation['resultLimit'] = self.result_limit
if self.truncate is not None:
operation['truncate'] = self.truncate
return operation
class ToSet(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToSet'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options)
class ToArray(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToArray'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options)
class ToEntitySeeds(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToEntitySeeds'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options)
class ToList(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToList'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options)
class ToStream(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToStream'
def __init__(self, options=None):
super().__init__(
_class_name=self.CLASS, options=options)
class ToVertices(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToVertices'
def __init__(self, edge_vertices=None, use_matched_vertex=None,
options=None):
super().__init__(
_class_name=self.CLASS, options=options)
self.edge_vertices = edge_vertices
self.use_matched_vertex = use_matched_vertex
def to_json(self):
operation = super().to_json()
if self.edge_vertices is not None:
operation['edgeVertices'] = self.edge_vertices
if self.use_matched_vertex is not None:
operation['useMatchedVertex'] = self.use_matched_vertex
return operation
class ToCsv(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToCsv'
def __init__(self,
element_generator,
include_header=True, options=None):
super().__init__(
_class_name=self.CLASS, options=options
)
if not isinstance(element_generator, gaffer_functions.CsvGenerator):
element_generator = JsonConverter.from_json(
element_generator, gaffer_functions.CsvGenerator)
self.element_generator = element_generator
self.include_header = include_header
def to_json(self):
operation = super().to_json()
operation['elementGenerator'] = self.element_generator.to_json()
operation['includeHeader'] = self.include_header
return operation
class ToMap(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToMap'
def __init__(self,
element_generator, options=None):
super().__init__(
_class_name=self.CLASS, options=options
)
if not isinstance(element_generator, gaffer_functions.MapGenerator):
element_generator = JsonConverter.from_json(
element_generator, gaffer_functions.MapGenerator)
self.element_generator = element_generator
def to_json(self):
operation = super().to_json()
operation['elementGenerator'] = self.element_generator.to_json()
return operation
class Sort(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.compare.Sort'
def __init__(self, comparators,
input=None,
result_limit=None,
deduplicate=None, options=None):
super().__init__(
_class_name=self.CLASS, options=options
)
self.comparators = comparators
self.input = input
self.result_limit = result_limit
self.deduplicate = deduplicate
def to_json(self):
operation = super().to_json()
comparators_json = []
for comparator in self.comparators:
if not isinstance(comparator, Comparator):
raise TypeError(
'All comparators must be a Gaffer Comparator object')
comparators_json.append(comparator.to_json())
operation['comparators'] = comparators_json
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.result_limit is not None:
operation['resultLimit'] = self.result_limit
if self.deduplicate is not None:
operation['deduplicate'] = self.deduplicate
return operation
class Max(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.compare.Max'
def __init__(self, comparators, input=None, options=None):
super().__init__(
_class_name=self.CLASS, options=options
)
self.comparators = comparators
self.input = input
def to_json(self):
operation = super().to_json()
comparators_json = []
for comparator in self.comparators:
if not isinstance(comparator, Comparator):
raise TypeError(
'All comparators must be a Gaffer Comparator object')
comparators_json.append(comparator.to_json())
operation['comparators'] = comparators_json
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
return operation
class Min(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.compare.Min'
def __init__(self, comparators, input=None, options=None):
super().__init__(
_class_name=self.CLASS, options=options
)
self.comparators = comparators
self.input = input
def to_json(self):
operation = super().to_json()
comparators_json = []
for comparator in self.comparators:
if not isinstance(comparator, Comparator):
raise TypeError(
'All comparators must be a Gaffer Comparator object')
comparators_json.append(comparator.to_json())
operation['comparators'] = comparators_json
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
return operation
class ExportToOtherGraph(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherGraph'
def __init__(self, graph_id=None, input=None, parent_schema_ids=None,
schema=None, parent_store_properties_id=None,
store_properties=None, options=None):
super().__init__(
self.CLASS, options=options
)
self.graph_id = graph_id
self.input = input
self.parent_schema_ids = parent_schema_ids
self.schema = schema
self.parent_store_properties_id = parent_store_properties_id
self.store_properties = store_properties
def to_json(self):
operation = super().to_json()
if self.graph_id is not None:
operation['graphId'] = self.graph_id
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.parent_schema_ids is not None:
operation['parentSchemaIds'] = self.parent_schema_ids
if self.schema is not None:
operation['schema'] = self.schema
if self.parent_store_properties_id is not None:
operation[
'parentStorePropertiesId'] = self.parent_store_properties_id
if self.store_properties is not None:
operation['storeProperties'] = self.store_properties
return operation
class ExportToOtherAuthorisedGraph(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.export.graph.ExportToOtherAuthorisedGraph'
def __init__(self, graph_id=None, input=None, parent_schema_ids=None,
parent_store_properties_id=None, options=None):
super().__init__(
self.CLASS, options=options
)
self.graph_id = graph_id
self.input = input
self.parent_schema_ids = parent_schema_ids
self.parent_store_properties_id = parent_store_properties_id
def to_json(self):
operation = super().to_json()
if self.graph_id is not None:
operation['graphId'] = self.graph_id
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.parent_schema_ids is not None:
operation['parentSchemaIds'] = self.parent_schema_ids
if self.parent_store_properties_id is not None:
operation[
'parentStorePropertiesId'] = self.parent_store_properties_id
return operation
class AddElementsFromSocket(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.add.AddElementsFromSocket'
def __init__(self, hostname=None, port=None, element_generator=None,
parallelism=None, validate=None, skip_invalid_elements=None,
delimiter=None, options=None):
super().__init__(
self.CLASS,
options=options
)
self.hostname = hostname
self.port = port
if not isinstance(element_generator, str):
raise TypeError('element_generator must be a java class name (str)')
self.element_generator = element_generator
self.parallelism = parallelism
self.validate = validate
self.skip_invalid_elements = skip_invalid_elements
self.delimiter = delimiter
def to_json(self):
operation = super().to_json()
if self.hostname is not None:
operation['hostname'] = self.hostname
if self.port is not None:
operation['port'] = self.port
if self.element_generator is not None:
operation['elementGenerator'] = self.element_generator
if self.parallelism is not None:
operation['parallelism'] = self.parallelism
if self.validate is not None:
operation['validate'] = self.validate
if self.skip_invalid_elements is not None:
operation['skipInvalidElements'] = self.skip_invalid_elements
if self.delimiter is not None:
operation['delimiter'] = self.delimiter
return operation
class AddElementsFromKafka(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.add.AddElementsFromKafka'
def __init__(self, topic, group_id, bootstrap_servers, element_generator,
parallelism=None, validate=None, skip_invalid_elements=None,
options=None):
super().__init__(
self.CLASS,
options=options
)
self.topic = topic
self.group_id = group_id
self.bootstrap_servers = bootstrap_servers
if not isinstance(element_generator, str):
raise TypeError('element_generator must be a java class name (str)')
self.element_generator = element_generator
self.parallelism = parallelism
self.validate = validate
self.skip_invalid_elements = skip_invalid_elements
def to_json(self):
operation = super().to_json()
if self.topic is not None:
operation['topic'] = self.topic
if self.group_id is not None:
operation['groupId'] = self.group_id
if self.bootstrap_servers is not None:
operation['bootstrapServers'] = self.bootstrap_servers
if self.element_generator is not None:
operation['elementGenerator'] = self.element_generator
if self.parallelism is not None:
operation['parallelism'] = self.parallelism
if self.validate is not None:
operation['validate'] = self.validate
if self.skip_invalid_elements is not None:
operation['skipInvalidElements'] = self.skip_invalid_elements
return operation
class AddElementsFromFile(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.add.AddElementsFromFile'
def __init__(self, filename=None, element_generator=None,
parallelism=None, validate=None, skip_invalid_elements=None,
options=None):
super().__init__(
self.CLASS,
options=options
)
self.filename = filename
if not isinstance(element_generator, str):
raise TypeError('element_generator must be a java class name (str)')
self.element_generator = element_generator
self.parallelism = parallelism
self.validate = validate
self.skip_invalid_elements = skip_invalid_elements
def to_json(self):
operation = super().to_json()
if self.filename is not None:
operation['filename'] = self.filename
if self.element_generator is not None:
operation['elementGenerator'] = self.element_generator
if self.parallelism is not None:
operation['parallelism'] = self.parallelism
if self.validate is not None:
operation['validate'] = self.validate
if self.skip_invalid_elements is not None:
operation['skipInvalidElements'] = self.skip_invalid_elements
return operation
class GetElementsBetweenSets(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsBetweenSets'
def __init__(self,
input=None,
input_b=None,
view=None,
directed_type=None,
include_incoming_out_going=None,
# deprecated, use seed_matching instead
seed_matching_type=None,
seed_matching=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=input,
view=view,
directed_type=directed_type,
include_incoming_out_going=include_incoming_out_going,
seed_matching_type=seed_matching_type,
seed_matching=seed_matching,
options=options)
self.input_b = input_b
def to_json(self):
operation = super().to_json()
if self.input_b is not None:
json_seeds_b = []
for seed_b in self.input_b:
if isinstance(seed_b, ElementSeed):
json_seeds_b.append(seed_b.to_json())
elif isinstance(seed_b, str):
json_seeds_b.append(EntitySeed(seed_b).to_json())
else:
raise TypeError(
'SeedsB argument must contain ElementSeed objects')
operation['inputB'] = json_seeds_b
return operation
class GetElementsWithinSet(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsWithinSet'
def __init__(self,
input=None,
view=None,
directed_type=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=input,
view=view,
directed_type=directed_type,
include_incoming_out_going=None,
options=options)
class GetElementsInRanges(GetOperation):
CLASS = 'uk.gov.gchq.gaffer.accumulostore.operation.impl.GetElementsInRanges'
def __init__(self,
input=None,
view=None,
directed_type=None,
include_incoming_out_going=None,
# deprecated, use seed_matching instead
seed_matching_type=None,
seed_matching=None,
options=None):
super().__init__(
_class_name=self.CLASS,
input=None,
view=view,
directed_type=directed_type,
include_incoming_out_going=include_incoming_out_going,
seed_matching_type=seed_matching_type,
seed_matching=seed_matching,
options=options)
self.input = input
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seed_pairs = []
for seed_pair in self.input:
if isinstance(seed_pair, SeedPair):
json_seed_pairs.append(seed_pair.to_json())
else:
raise TypeError(
'input argument must contain SeedPair objects')
operation['input'] = json_seed_pairs
return operation
class SummariseGroupOverRanges(Operation):
CLASS = 'uk.gov.gchq.gaffer.accumulostore.operation.impl.SummariseGroupOverRanges'
def __init__(self, input,
view=None,
include_incoming_out_going=None,
directed_type=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options,
view=view
)
self.input = input
self.include_incoming_out_going = include_incoming_out_going
self.directed_type = directed_type
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seed_pairs = []
for seed_pair in self.input:
if isinstance(seed_pair, SeedPair):
json_seed_pairs.append(seed_pair.to_json())
else:
raise TypeError(
'input argument must contain SeedPair objects')
operation['input'] = json_seed_pairs
if self.include_incoming_out_going is not None:
operation[
'includeIncomingOutGoing'] = self.include_incoming_out_going
if self.directed_type is not None:
operation['directedType'] = self.directed_type
return operation
class Filter(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.Filter'
def __init__(self,
input=None,
entities=None,
edges=None,
global_elements=None,
global_entities=None,
global_edges=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options
)
self.input = input
self.entities = None
self.edges = None
self.global_elements = None
self.global_entities = None
self.global_edges = None
if entities is not None:
self.entities = []
if isinstance(entities, list):
for el_def in entities:
if not isinstance(el_def, ElementFilterDefinition):
el_def = JsonConverter.from_json(
el_def, ElementFilterDefinition)
self.entities.append(el_def)
else:
for group, el_def in entities.items():
if not isinstance(el_def, ElementFilterDefinition):
el_def = JsonConverter.from_json(
el_def, ElementFilterDefinition)
el_def.group = group
self.entities.append(el_def)
if edges is not None:
self.edges = []
if isinstance(edges, list):
for el_def in edges:
if not isinstance(el_def, ElementFilterDefinition):
el_def = JsonConverter.from_json(
el_def, ElementFilterDefinition)
self.edges.append(el_def)
else:
for group, el_def in edges.items():
if not isinstance(el_def, ElementFilterDefinition):
el_def = JsonConverter.from_json(
el_def, ElementFilterDefinition)
el_def.group = group
self.edges.append(el_def)
if global_elements is not None:
if not isinstance(global_elements, GlobalElementFilterDefinition):
global_elements = JsonConverter.from_json(
global_elements, GlobalElementFilterDefinition)
self.global_elements = global_elements
if global_entities is not None:
if not isinstance(global_entities, GlobalElementFilterDefinition):
global_entities = JsonConverter.from_json(
global_entities, GlobalElementFilterDefinition)
self.global_entities = global_entities
if global_edges is not None:
if not isinstance(global_edges, GlobalElementFilterDefinition):
global_edges = JsonConverter.from_json(
global_edges, GlobalElementFilterDefinition)
self.global_edges = global_edges
def to_json(self):
operation = super().to_json()
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.entities is not None:
el_defs = {}
for el_def in self.entities:
el_defs[el_def.group] = el_def.to_json()
operation['entities'] = el_defs
if self.edges is not None:
el_defs = {}
for el_def in self.edges:
el_defs[el_def.group] = el_def.to_json()
operation['edges'] = el_defs
if self.global_elements is not None:
operation['globalElements'] = self.global_elements.to_json()
if self.global_entities is not None:
operation['globalEntities'] = self.global_entities.to_json()
if self.global_edges is not None:
operation['globalEdges'] = self.global_edges.to_json()
return operation
class Aggregate(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.Aggregate'
def __init__(self,
input=None,
entities=None,
edges=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options
)
self.input = input
self.entities = None
self.edges = None
if entities is not None:
self.entities = []
if isinstance(entities, list):
for el_def in entities:
if not isinstance(el_def, AggregatePair):
el_def = JsonConverter.from_json(
el_def, AggregatePair)
self.entities.append(el_def)
else:
for group, el_def in entities.items():
if not isinstance(el_def, AggregatePair):
el_def = JsonConverter.from_json(
el_def, AggregatePair)
el_def.group = group
self.entities.append(el_def)
if edges is not None:
self.edges = []
if isinstance(edges, list):
for el_def in edges:
if not isinstance(el_def, AggregatePair):
el_def = JsonConverter.from_json(
el_def, AggregatePair)
self.edges.append(el_def)
else:
for group, el_def in edges.items():
if not isinstance(el_def, AggregatePair):
el_def = JsonConverter.from_json(
el_def, AggregatePair)
el_def.group = group
self.edges.append(el_def)
def to_json(self):
operation = super().to_json()
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.entities is not None:
el_defs = {}
for el_def in self.entities:
el_defs[el_def.group] = el_def.to_json()
operation['entities'] = el_defs
if self.edges is not None:
el_defs = {}
for el_def in self.edges:
el_defs[el_def.group] = el_def.to_json()
operation['edges'] = el_defs
return operation
class Transform(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.function.Transform'
def __init__(self,
input=None,
entities=None,
edges=None,
options=None):
super().__init__(
_class_name=self.CLASS,
options=options
)
self.input = input
self.entities = None
self.edges = None
if entities is not None:
self.entities = []
if isinstance(entities, list):
for el_def in entities:
if not isinstance(el_def, ElementTransformDefinition):
el_def = JsonConverter.from_json(
el_def, ElementTransformDefinition)
self.entities.append(el_def)
else:
for group, el_def in entities.items():
if not isinstance(el_def, ElementTransformDefinition):
el_def = JsonConverter.from_json(
el_def, ElementTransformDefinition)
el_def.group = group
self.entities.append(el_def)
if edges is not None:
self.edges = []
if isinstance(edges, list):
for el_def in edges:
if not isinstance(el_def, ElementTransformDefinition):
el_def = JsonConverter.from_json(
el_def, ElementTransformDefinition)
self.edges.append(el_def)
else:
for group, el_def in edges.items():
if not isinstance(el_def, ElementTransformDefinition):
el_def = JsonConverter.from_json(
el_def, ElementTransformDefinition)
el_def.group = group
self.edges.append(el_def)
def to_json(self):
operation = super().to_json()
if self.input is not None:
elements_json = []
for element in self.input:
elements_json.append(element.to_json())
operation['input'] = elements_json
if self.entities is not None:
el_defs = {}
for el_def in self.entities:
el_defs[el_def.group] = el_def.to_json()
operation['entities'] = el_defs
if self.edges is not None:
el_defs = {}
for el_def in self.edges:
el_defs[el_def.group] = el_def.to_json()
operation['edges'] = el_defs
return operation
class ScoreOperationChain(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.ScoreOperationChain'
def __init__(self, operation_chain, options=None):
super().__init__(_class_name=self.CLASS,
options=options)
if operation_chain is None:
raise TypeError('Operation Chain is required')
if not isinstance(operation_chain, OperationChain):
operation_chain = JsonConverter.from_json(operation_chain,
OperationChain)
self.operation_chain = operation_chain
def to_json(self):
operation = super().to_json()
operation['operationChain'] = self.operation_chain.to_json()
return operation
class GetWalks(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.GetWalks'
def __init__(self,
input=None,
operations=None,
results_limit=None,
options=None):
super().__init__(_class_name=self.CLASS,
options=options)
self.input = input
self.operations = None
self.results_limit = results_limit
if operations is not None:
self.operations = []
for op in operations:
if not isinstance(op, GetElements) and not isinstance(op,
OperationChain):
op = JsonConverter.from_json(op)
self.operations.append(op)
def to_json(self):
operation = super().to_json()
if self.results_limit is not None:
operation['resultsLimit'] = self.results_limit
if self.input is not None:
entity_seed_json = []
for entity_seed in self.input:
entity_seed_json.append(entity_seed.to_json())
operation['input'] = entity_seed_json
if self.operations is not None:
operations_json = []
for op in self.operations:
operations_json.append(op.to_json())
operation['operations'] = operations_json
return operation
class Map(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.Map'
def __init__(self,
functions,
input=None,
options=None):
super().__init__(_class_name=self.CLASS,
options=options)
self.input = input
if functions is not None:
self.functions = []
for func in functions:
if not isinstance(func, gaffer_functions.Function):
func = JsonConverter.from_json(
func, gaffer_functions.Function)
self.functions.append(func)
def to_json(self):
operation = super().to_json()
if self.input is not None:
operation['input'] = self.input
if self.functions is not None:
functions_json = []
for function in self.functions:
functions_json.append(function.to_json())
operation['functions'] = functions_json
return operation
class If(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.If'
def __init__(self, input=None, condition=None, conditional=None,
then=None, otherwise=None, options=None):
super().__init__(_class_name=self.CLASS,
options=options)
self.input = input
self.condition = condition
if conditional is not None:
if not isinstance(conditional, Conditional):
self.conditional = JsonConverter.from_json(conditional,
Conditional)
else:
self.conditional = conditional
else:
self.conditional = None
if then is not None:
if not isinstance(then, Operation):
self.then = JsonConverter.from_json(then, Operation)
else:
self.then = then
else:
self.then = None
if otherwise is not None:
if not isinstance(otherwise, Operation):
self.otherwise = JsonConverter.from_json(otherwise, Operation)
else:
self.otherwise = otherwise
else:
self.otherwise = None
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seeds = []
if isinstance(self.input, list):
for seed in self.input:
if isinstance(seed, ToJson):
json_seeds.append(seed.to_json())
else:
json_seeds.append(seed)
else:
if isinstance(self.input, ToJson):
json_seeds.append(self.input.to_json())
else:
json_seeds.append(self.input.to_json())
operation['input'] = json_seeds
if self.condition is not None:
operation['condition'] = self.condition
if self.conditional is not None:
operation['conditional'] = self.conditional.to_json()
if self.then is not None:
operation['then'] = self.then.to_json()
if self.otherwise is not None:
operation['otherwise'] = self.otherwise.to_json()
return operation
class While(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.While'
def __init__(self, max_repeats=1000, input=None, operation=None,
condition=None, conditional=None, options=None):
super().__init__(_class_name=self.CLASS,
options=options)
self.max_repeats = max_repeats
self.input = input
self.condition = condition
if operation is not None:
if not isinstance(operation, Operation):
self.operation = JsonConverter.from_json(operation, Operation)
else:
self.operation = operation
if conditional is not None:
if not isinstance(conditional, Conditional):
self.conditional = JsonConverter.from_json(conditional,
Conditional)
else:
self.conditional = conditional
else:
self.conditional = conditional
def to_json(self):
operation = super().to_json()
operation['maxRepeats'] = self.max_repeats
if self.input is not None:
json_seeds = []
if isinstance(self.input, list):
for seed in self.input:
if isinstance(seed, ToJson):
json_seeds.append(seed.to_json())
else:
json_seeds.append(seed)
else:
if isinstance(self.input, ToJson):
json_seeds.append(self.input.to_json())
else:
json_seeds.append(self.input.to_json())
operation['input'] = json_seeds
if self.operation is not None:
operation['operation'] = self.operation.to_json()
if self.condition is not None:
operation['condition'] = self.condition
if self.conditional is not None:
operation['conditional'] = self.conditional.to_json()
return operation
class Reduce(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.Reduce'
def __init__(self, input=None, identity=None,
aggregate_function=None, options=None):
super().__init__(_class_name=self.CLASS,
options=options)
self.input = input
self.identity = identity
if aggregate_function is None:
raise ValueError('aggregate_function is required')
if isinstance(aggregate_function, dict):
aggregate_function = JsonConverter.from_json(
aggregate_function, gaffer_binaryoperators.BinaryOperator)
self.aggregate_function = aggregate_function
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seeds = []
if isinstance(self.input, list):
for seed in self.input:
if isinstance(seed, ToJson):
json_seeds.append(seed.to_json())
else:
json_seeds.append(seed)
else:
if isinstance(self.input, ToJson):
json_seeds.append(self.input.to_json())
else:
json_seeds.append(self.input.to_json())
operation['input'] = json_seeds
operation['aggregateFunction'] = self.aggregate_function.to_json()
if self.identity is not None:
if isinstance(self.identity, ToJson):
operation['identity'] = self.identity.to_json()
else:
operation['identity'] = self.identity
return operation
class ForEach(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.ForEach'
def __init__(self, input=None, operation=None, options=None):
super().__init__(_class_name=self.CLASS,
options=options)
self.input = input
if operation is not None:
if not isinstance(operation, Operation):
self.operation = JsonConverter.from_json(operation, Operation)
else:
self.operation = operation
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seeds = []
if isinstance(self.input, list):
for seed in self.input:
if isinstance(seed, ToJson):
json_seeds.append(seed.to_json())
else:
json_seeds.append(seed)
else:
if isinstance(self.input, ToJson):
json_seeds.append(self.input.to_json())
else:
json_seeds.append(self.input.to_json())
operation['input'] = json_seeds
if self.operation is not None:
operation['operation'] = self.operation.to_json()
return operation
class ToSingletonList(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.output.ToSingletonList'
def __init__(self, input=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.input = input
def to_json(self):
operation = super().to_json()
if self.input is not None:
json_seeds = []
if isinstance(self.input, list):
for seed in self.input:
if isinstance(seed, ToJson):
json_seeds.append(seed.to_json())
else:
json_seeds.append(seed)
else:
if isinstance(self.input, ToJson):
json_seeds.append(self.input.to_json())
else:
json_seeds.append(self.input.to_json())
operation['input'] = json_seeds
return operation
class ValidateOperationChain(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.ValidateOperationChain'
def __init__(self, operation_chain=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
if operation_chain is None:
raise ValueError('operation_chain is required')
if not isinstance(operation_chain, OperationChain):
self.operation_chain = JsonConverter.from_json(
operation_chain, OperationChain)
else:
self.operation_chain = operation_chain
def to_json(self):
operation_json = super().to_json()
operation_json['operationChain'] = self.operation_chain.to_json()
return operation_json
class Conditional(ToJson, ToCodeString):
CLASS = 'uk.gov.gchq.gaffer.operation.util.Conditional'
def __init__(self, predicate=None, transform=None):
if predicate is not None:
if not isinstance(predicate, gaffer_predicates.Predicate):
self.predicate = JsonConverter.from_json(predicate,
gaffer_predicates.Predicate)
else:
self.predicate = predicate
if transform is not None:
if not isinstance(transform, Operation):
self.transform = JsonConverter.from_json(transform, Operation)
else:
self.transform = transform
def to_json(self):
conditional_json = {}
if self.predicate is not None:
conditional_json["predicate"] = self.predicate.to_json()
if self.transform is not None:
conditional_json["transform"] = self.transform.to_json()
return conditional_json
class Join(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.join.Join'
def __init__(self, input=None, operation=None, match_method=None, match_key=None, flatten=None, join_type=None, collection_limit=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
if operation is not None:
if not isinstance(operation, Operation):
self.operation = JsonConverter.from_json(operation)
else:
self.operation = operation
if match_method is not None:
if not isinstance(match_method, Match):
self.match_method = JsonConverter.from_json(match_method)
else:
self.match_method = match_method
self.input = input
self.flatten = flatten
self.match_key = match_key
self.collection_limit = collection_limit
self.join_type = join_type
def to_json(self):
operation_json = super().to_json()
if self.input is not None:
json_input = []
for input in self.input:
if isinstance(input, ToJson):
json_input.append(input.to_json())
else:
json_input.append(input)
operation_json['input'] = json_input
if self.operation is not None:
operation_json['operation'] = self.operation.to_json()
if self.match_method is not None:
operation_json['matchMethod'] = self.match_method.to_json()
if self.match_key is not None:
operation_json['matchKey'] = self.match_key
if self.flatten is not None:
operation_json['flatten'] = self.flatten
if self.join_type is not None:
operation_json['joinType'] = self.join_type
if self.collection_limit is not None:
operation_json['collectionLimit'] = self.collection_limit
return operation_json
class GetAllGraphIds(Operation):
CLASS = 'uk.gov.gchq.gaffer.federatedstore.operation.GetAllGraphIds'
def __init__(self, options=None):
super().__init__(_class_name=self.CLASS, options=options)
class FederatedOperationChain(Operation):
CLASS = 'uk.gov.gchq.gaffer.federatedstore.operation.FederatedOperationChain'
def __init__(self,operation_chain=None,options=None):
super().__init__(_class_name=self.CLASS, options=options)
if operation_chain is None:
raise ValueError('operation_chain is required')
if operation_chain is not None:
if isinstance(operation_chain,OperationChain):
self.operation_chain = operation_chain
elif isinstance(operation_chain,list):
allOperations = True
if (len(allOperations) == 0):
allOperations = False
for op in operation_chain:
if not isinstance(op,Operation):
allOperations = False
if allOperations:
self.operation_chain = OperationChain(operation_chain)
elif isinstance(operation_chain,Operation):
self.operation_chain = OperationChain(operation_chain)
else:
self.operation_chain = JsonConverter.from_json(operation_chain, OperationChain)
def to_json(self):
operation = super().to_json()
operation['operationChain'] = self.operation_chain.to_json()
return operation
class RemoveGraph(Operation):
CLASS = 'uk.gov.gchq.gaffer.federatedstore.operation.RemoveGraph'
def __init__(self, graph_id, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.graph_id = graph_id
def to_json(self):
operation = super().to_json()
operation['graphId'] = self.graph_id
return operation
class AddGraph(Operation):
CLASS = 'uk.gov.gchq.gaffer.federatedstore.operation.AddGraph'
def __init__(self, graph_id,
store_properties=None,
parent_properties_id=None,
schema=None,
parent_schema_ids=None,
graph_auths=None,
is_public=None,
disabled_by_default=None,
options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.graph_id = graph_id
self.store_properties = store_properties
self.parent_properties_id = parent_properties_id
self.schema = schema
self.parent_properties_id = parent_properties_id
self.parent_schema_ids = parent_schema_ids
self.graph_auths = graph_auths
self.is_public = is_public
self.disabled_by_default = disabled_by_default
def to_json(self):
operation = super().to_json()
operation['graphId'] = self.graph_id
if self.store_properties is not None:
operation['storeProperties'] = self.store_properties
if self.parent_properties_id is not None:
operation['parentPropertiesId'] = self.parent_properties_id
if self.schema is not None:
operation['schema'] = self.schema
if self.parent_schema_ids is not None:
operation['parentSchemaIds'] = self.parent_schema_ids
if self.graph_auths is not None:
operation['graphAuths'] = self.graph_auths
if self.is_public is not None:
operation['isPublic'] = self.is_public
if self.disabled_by_default is not None:
operation['disabledByDefault'] = self.disabled_by_default
return operation
class AddGraphWithHooks(Operation):
CLASS = 'uk.gov.gchq.gaffer.federatedstore.operation.AddGraphWithHooks'
def __init__(self, graph_id,
store_properties=None,
parent_properties_id=None,
schema=None,
parent_schema_ids=None,
graph_auths=None,
is_public=None,
disabled_by_default=None,
hooks=None,
options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.graph_id = graph_id
self.store_properties = store_properties
self.parent_properties_id = parent_properties_id
self.schema = schema
self.parent_properties_id = parent_properties_id
self.parent_schema_ids = parent_schema_ids
self.graph_auths = graph_auths
self.is_public = is_public
self.disabled_by_default = disabled_by_default
self.hooks = hooks
def to_json(self):
operation = super().to_json()
operation['graphId'] = self.graph_id
if self.store_properties is not None:
operation['storeProperties'] = self.store_properties
if self.parent_properties_id is not None:
operation['parentPropertiesId'] = self.parent_properties_id
if self.schema is not None:
operation['schema'] = self.schema
if self.parent_schema_ids is not None:
operation['parentSchemaIds'] = self.parent_schema_ids
if self.graph_auths is not None:
operation['graphAuths'] = self.graph_auths
if self.is_public is not None:
operation['isPublic'] = self.is_public
if self.disabled_by_default is not None:
operation['disabledByDefault'] = self.disabled_by_default
if self.hooks is not None:
operation['hooks'] = self.hooks
return operation
class GetVariable(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.GetVariable'
def __init__(self, variable_name=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.variable_name = variable_name
def to_json(self):
operation = super().to_json()
if self.variable_name is not None:
operation['variableName'] = self.variable_name
return operation
class GetVariables(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.GetVariables'
def __init__(self, variable_names=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.variable_names = variable_names
def to_json(self):
operation = super().to_json()
if self.variable_names is not None:
operation['variableNames'] = self.variable_names
return operation
class SetVariable(Operation):
CLASS = 'uk.gov.gchq.gaffer.operation.impl.SetVariable'
def __init__(self, input=None, variable_name=None, options=None):
super().__init__(_class_name=self.CLASS, options=options)
self.input = input
self.variable_name = variable_name
def to_json(self):
operation = super().to_json()
if self.variable_name is not None:
operation['variableName'] = self.variable_name
if self.input is not None:
operation['input'] = self.input
return operation
def load_operation_json_map():
for name, class_obj in inspect.getmembers(
sys.modules[__name__], inspect.isclass):
if hasattr(class_obj, 'CLASS'):
JsonConverter.GENERIC_JSON_CONVERTERS[class_obj.CLASS] = \
lambda obj, class_obj=class_obj: class_obj(**obj)
load_operation_json_map()
``` |
{
"source": "Jpelczynski1/Jpelczynski1.py.app",
"score": 4
} |
#### File: Jpelczynski1/Jpelczynski1.py.app/Tic_Tac_Toe.py
```python
board = ["-","-","-",
"-","-","-",
"-","-","-",]
# If game is still going
game_still_going = True
# who won? Or tie?
winner = None
current_player = "X"
def display_board():
print(board[0] + " | " + board[1] + " | " + board[2])
print(board[3] + " | " + board[4] + " | " + board[5])
print(board[6] + " | " + board[7] + " | " + board[8])
def play_game():
# Display initial board
display_board()
while game_still_going:
# handle turn of a player
handle_turn(current_player)
# check if the game has ened
check_if_game_over()
# flip to the other player
flip_player()
# the game has ended
if winner == "X" or winner == "O":
print(winner + "won.")
elif winner == None:
print("Tie.")
def handle_turn(player):
print(player + "'s turn.")
position = input("Choose a position from 1-9: ")
valid = False
while not valid:
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Invalid input. Choose a position from 1-9: ")
position = int(position) - 1
if board[position] == "-":
valid = True
else:
print("You can't go there. Go again.")
board[position] = player
display_board()
def check_if_game_over():
check_for_winner()
check_if_tie()
def check_for_winner():
#set up global variables
global winner
# check rows
row_winner = check_rows()
# check columns
column_winner = check_columns()
# check diagonals
diagonal_winner = check_diagonals()
if row_winner:
winner = row_winner
elif column_winner:
winner = column_winner
elif diagonal_winner:
winner = diagonal_winner
else:
winner = None
return
def check_rows():
#set up global variablse
global game_still_going
row_1 = board[0] == board[1] == board[2] != "-"
row_2 = board[3] == board[4] == board[5] != "-"
row_3 = board[6] == board[7] == board[8] != "-"
if row_1 or row_2 or row_3:
game_still_going = False
if row_1:
return board[0]
elif row_2:
return board[3]
elif row_3:
return board[6]
return
def check_columns():
#set up global variablse
global game_still_going
column_1 = board[0] == board[3] == board[6] != "-"
column_2 = board[1] == board[4] == board[7] != "-"
column_3 = board[2] == board[5] == board[8] != "-"
if column_1 or column_2 or column_3:
game_still_going = False
if column_1:
return board[0]
elif column_2:
return board[1]
elif column_3:
return board[3]
return
def check_diagonals():
#set up global variablse
global game_still_going
diagonals_1 = board[0] == board[4] == board[8] != "-"
diagonals_2 = board[6] == board[4] == board[2] != "-"
if diagonals_1 or diagonals_2:
game_still_going = False
if diagonals_1:
return board[0]
elif diagonals_2:
return board[3]
return
def check_if_tie():
global game_still_going
if "-" not in board:
game_still_going = False
return
def flip_player():
global current_player
if current_player == "X":
current_player = "O"
elif current_player == "O":
current_player = "X"
return
play_game()
# board
# display board
# play game
# handle turn
# check win
# check row
# check column
# check diagonal
# check tie
# flip player
``` |
{
"source": "jpelgrims/matching_engine",
"score": 3
} |
#### File: matching_engine/python_prototype/engine.py
```python
from collections import deque
from sortedcontainers import SortedList
import threading
class Order:
def __init__(self, id, order_type, side, price, quantity):
self.id = id
self.type = order_type
self.side = side.lower()
self.price = price
self.quantity = quantity
def __str__(self):
return "[" + str(self.price) + " for " + str(self.quantity) + " shares]"
class Trade:
def __init__(self, buyer, seller, price, quantity):
self.buy_order_id = buyer
self.sell_order_id = seller
self.price = price
self.quantity = quantity
def show(self):
print("[", self.price, self.quantity, "]")
class OrderBook:
def __init__(self, bids=[], asks=[]):
self.bids = SortedList(bids, key = lambda order: -order.price)
self.asks = SortedList(asks, key = lambda order: order.price)
def __len__(self):
return len(self.bids) + len(self.asks)
def best_bid(self):
if len(self.bids) > 0:
return self.bids[0].price
else:
return 0
def best_ask(self):
if len(self.asks) > 0:
return self.asks[0].price
else:
return 0
def add(self, order):
if order.side == 'buy':
index = self.bids.bisect_right(order)
self.bids.insert(index, order)
elif order.side == 'sell':
index = self.asks.bisect_right(order)
self.asks.insert(index, order)
def remove(self, order):
if order.side == 'buy':
self.bids.remove(order)
elif order.side == 'sell':
self.asks.remove(order)
class MatchingEngine:
def __init__(self):
self.queue = deque()
self.orderbook = OrderBook()
self.trades = deque()
def process(self, order):
if order.type == "limit":
self.match_limit_order(order)
def get_trades(self):
trades = list(self.trades)
return trades
def match_limit_order(self, order):
if order.side == 'buy' and order.price >= self.orderbook.best_ask():
# Buy order crossed the spread
filled = 0
consumed_asks = []
for i in range(len(self.orderbook.asks)):
ask = self.orderbook.asks[i]
if ask.price > order.price:
break # Price of ask is too high, stop filling order
elif filled == order.quantity:
break # Order was filled
if filled + ask.quantity <= order.quantity: # order not yet filled, ask will be consumed whole
filled += ask.quantity
trade = Trade(order.id, ask.id, ask.price, ask.quantity)
self.trades.append(trade)
consumed_asks.append(ask)
elif filled + ask.quantity > order.quantity: # order is filled, ask will be consumed partially
volume = order.quantity-filled
filled += volume
trade = Trade(order.id, ask.id, ask.price, volume)
self.trades.append(trade)
ask.quantity -= volume
for ask in consumed_asks:
self.orderbook.remove(ask)
if filled < order.quantity:
self.orderbook.add(Order(order.id, "limit", order.side, order.price, order.quantity-filled))
elif order.side == 'sell' and order.price <= self.orderbook.best_bid():
# Sell order crossed the spread
filled = 0
consumed_bids = []
for i in range(len(self.orderbook.bids)):
bid = self.orderbook.bids[i]
if bid.price < order.price:
break # Price of bid is too low, stop filling order
if filled == order.quantity:
break # Order was filled
if filled + bid.quantity <= order.quantity: # order not yet filled, bid will be consumed whole
filled += bid.quantity
trade = Trade(order.id, bid.id, bid.price, bid.quantity)
self.trades.append(trade)
consumed_bids.append(bid)
elif filled + bid.quantity > order.quantity: # order is filled, bid will be consumed partially
volume = order.quantity-filled
filled += volume
trade = Trade(order.id, bid.id, bid.price, volume)
self.trades.append(trade)
bid.quantity -= volume
for bid in consumed_bids:
self.orderbook.remove(bid)
if filled < order.quantity:
self.orderbook.add(Order(order.id, "limit", order.side, order.price, order.quantity-filled))
else:
# Order did not cross the spread, place in order book
self.orderbook.add(order)
def cancel_order(self, cancel):
pass
``` |
{
"source": "jpelgrims/statwebgen",
"score": 3
} |
#### File: statwebgen/generator/config.py
```python
import os
import json
import os.path
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
configuration = {}
def load_json_file(path):
if os.path.isfile(path):
with open(path, 'r') as config_file:
return json.load(config_file)
else:
return {}
def merge_configurations(base_config: dict, specified_config: dict) -> dict:
merged_config = {}
for key, value in base_config.items():
if isinstance(value, dict):
merged_config[key] = merge_configurations(value, specified_config.get(key, {}))
else:
setting = value
if key in specified_config:
setting = specified_config[key]
merged_config[key] = setting
return merged_config
def get_config(directory):
global configuration
if not configuration:
base_configuration = load_json_file(os.path.join(SCRIPT_DIR, "config.json"))
config_file_path = os.path.join(directory, "statwebgen.json")
site_configuration = load_json_file(config_file_path)
configuration = merge_configurations(base_configuration, site_configuration)
return configuration
else:
return configuration
``` |
{
"source": "jpelikan71/pdf_approximation",
"score": 3
} |
#### File: pdf_approximation/pdf_appx/lp_appx.py
```python
import numpy as np
from numpy.polynomial import Polynomial, Legendre, polynomial
def approx_legendre_poly(Moments):
n_moments = Moments.shape[0]-1
exp_coef = (np.zeros((1)))
# For method description see, for instance:
# Chapter 3 of "The Problem of Moments", <NAME>, <NAME>
for i in range(n_moments+1):
p = Legendre.basis(i).convert(window = [0.0,1.0], kind=Polynomial)
q = (2*i+1)*np.sum(Moments[0:(i+1)]*p.coef)
pq = (p.coef*q)
exp_coef = polynomial.polyadd(exp_coef, pq)
expansion = Polynomial(exp_coef)
return expansion
```
#### File: pdf_approximation/pdf_appx/pda_utils.py
```python
import numpy as np
from scipy import integrate
from scipy import interpolate
import random
# random transform for pdf generation/sampling
def casual_increasing_function():
symmetry = random.randint(0,1)
if (symmetry==0):
nnodes = random.randint(5,12)
x = np.linspace(0,1,(nnodes+2))
y = np.sort((np.random.rand(nnodes)))
y = np.append( np.array([0]) ,y)
y = np.append(y, np.array([1]) )
if (symmetry==1):
snodes = random.randint(2,4)
nnodes = snodes*2+3
x = np.linspace(0,1,nnodes)
y = np.zeros((nnodes))
y1 = np.sort((np.random.rand(snodes)))*0.5
y1 = np.append( np.array([0]) ,y1)
y2 = 1. - (np.flipud( y1 ) )
y[0:snodes+1] = y1
y[(snodes+2):nnodes]=y2
y[snodes+1] = 0.5
f = interpolate.PchipInterpolator(x,y)
return f
def moments_from_pdf(pdf,num_moments):
moments = np.ones((num_moments+1))
for k in range (num_moments+1):
v = lambda x:(pdf(x))*(x**k)
J, err = integrate.quad(v,0,1)
moments[k] = J
return moments
def moments_from_cdf(cdf,num_moments):
moments = np.ones((num_moments+1))
for k in range (1,num_moments+1):
v = lambda x:(cdf(x))*(x**(k-1))
J, err = integrate.quad(v,0,1)
moments[k] = 1-J*k
return moments
def moments_from_samples(X, num_moments):
Xp = np.expand_dims(X, axis = 0)
powers = np.expand_dims((np.arange(num_moments+1)), axis = 1)
moments = np.mean( np.power(Xp, powers) , axis=1)
return moments
``` |
{
"source": "jpenalbae/greatfet",
"score": 3
} |
#### File: host/greatfet/board.py
```python
import usb
import time
from .protocol import vendor_requests
from .errors import DeviceNotFoundError
from .peripherals.led import LED
from .peripherals.gpio import GPIO
# Default device identifiers.
GREATFET_VENDOR_ID = 0x1d50
GREATFET_PRODUCT_ID = 0x60e6
# Quirk constant that helps us identify libusb's pipe errors, which bubble
# up as generic USBErrors with errno 32 on affected platforms.
LIBUSB_PIPE_ERROR = 32
# Total seconds we should wait after a reset before reconnecting.
RECONNECT_DELAY = 3
class GreatFETBoard(object):
"""
Class representing a USB-connected GreatFET device.
"""
"""
The GreatFET board IDs handled by this class. Used by the default
implementation of accepts_connected_device() to determine if a given subclass
handles the given board ID.
"""
HANDLED_BOARD_IDS = []
"""
The display name of the given GreatFET board. Subclasses should override
this with a more appropriate name.
"""
BOARD_NAME = "Unknown GreatFET"
"""
The mappings from GPIO names to port numbers. Paths in names can be delineated
with underscores to group gpios. For example, if Jumper 7, Pin 3 is Port 5, Pin 11,
you could add an entry that reads "J7_P3": (5, 11).
"""
GPIO_MAPPINGS = {}
@classmethod
def autodetect(cls, **device_identifiers):
"""
Attempts to create a new instance of the GreatFETBoard subclass
most applicable to the given device. For example, if the attached
board is a GreatFET One, this will automatically create a
GreatFET One object.
Accepts the same arguments as pyusb's usb.find() method, allowing narrowing
to a more specific GreatFET by e.g. serial number.
Throws a DeviceNotFoundError if no device is avaiable.
"""
# Iterate over each subclass of GreatFETBoard until we find a board
# that accepts the given board ID.
for subclass in cls.__subclasses__():
if subclass.accepts_connected_device(**device_identifiers):
return subclass(**device_identifiers)
# If we couldn't find a board, raise an error.
raise DeviceNotFoundError()
@classmethod
def autodetect_all(cls, **device_identifiers):
"""
Attempts to create a new instance of the GreatFETBoard subclass
most applicable for each board present on the system-- similar to the
behavior of autodetect.
Accepts the same arguments as pyusb's usb.find() method, allowing narrowing
to a more specific GreatFET by e.g. serial number.
Returns a list of GreatFET devices, which may be empty if none are found.
"""
devices = []
# Iterate over each subclass of GreatFETBoard until we find a board
# that accepts the given board ID.
for subclass in cls.__subclasses__():
# Get objects for all devices accepted by the given subclass.
subclass_devices = subclass.all_accepted_devices(**device_identifiers)
# FIXME: It's possible that two classes may choose to both advertise support
# for the same device, in which case we'd wind up with duplicats here. We could
# try to filter out duplicates using e.g. USB bus/device, but that assumes
# things are USB connected.
devices.extend(subclass_devices)
# Return the list of all subclasses.
return devices
@classmethod
def all_accepted_devices(cls, **device_identifiers):
"""
Returns a list of all devices supported by the given class. This should be
overridden if the device connects via anything other that USB.
Accepts the same arguments as pyusb's usb.find() method, allowing narrowing
to a more specific GreatFET by e.g. serial number.
"""
devices = []
# Grab the list of all devices that we theoretically could use.
identifiers = cls.populate_default_identifiers(device_identifiers, find_all=True)
raw_devices = usb.core.find(**identifiers)
# Iterate over all of the connected devices, and filter out the devices
# that this class doesn't connect.
for raw_device in raw_devices:
# We need to be specific about which device in particular we're
# grabbing when we query things-- or we'll get the first acceptable
# device every time. The trick here is to populate enough information
# into the identifier to uniquely identify the device. The address
# should do, as pyusb is only touching enmerated devices.
identifiers['address'] = raw_device.address
identifiers['find_all'] = False
# If we support the relevant device _instance_, and it to our list.
if cls.accepts_connected_device(**identifiers):
devices.append(cls(**identifiers))
return devices
@classmethod
def accepts_connected_device(cls, **device_identifiers):
"""
Returns true iff the provided class is appropriate for handling a connected
GreatFET.
Accepts the same arguments as pyusb's usb.find() method, allowing narrowing
to a more specific GreatFET by e.g. serial number.
"""
try:
potential_device = cls(**device_identifiers)
except DeviceNotFoundError:
return False
except usb.core.USBError as e:
# A pipe error here likely means the device didn't support a start-up
# command, and STALLED.
# We'll interpret that as a "we don't accept this device" by default.
if e.errno == LIBUSB_PIPE_ERROR:
return False
else:
raise e
try:
board_id = potential_device.board_id()
finally:
potential_device.close()
# Accept only GreatFET devices whose board IDs are handled by this
# class. This is mostly used by subclasses, which should override
# HANDLED_BOARD_IDS.
return board_id in cls.HANDLED_BOARD_IDS
@staticmethod
def populate_default_identifiers(device_identifiers, find_all=False):
"""
Populate a dictionary of default identifiers-- which can
be overridden or extended by arguments to the function.
device_identifiers -- any user-specified identifers; will override
the default identifiers in the event of a conflit
"""
# By default, accept any device with the default vendor/product IDs.
identifiers = {
'idVendor': GREATFET_VENDOR_ID,
'idProduct': GREATFET_PRODUCT_ID,
'find_all': find_all,
}
identifiers.update(device_identifiers)
return identifiers
def __init__(self, **device_identifiers):
"""
Instantiates a new connection to a GreatFET device; by default connects
to the first available GreatFET.
Accepts the same arguments as pyusb's usb.find() method, allowing narrowing
to a more specific GreatFET by serial number.
"""
# By default, accept any device with the default vendor/product IDs.
self.identifiers = self.populate_default_identifiers(device_identifiers)
# For convenience, allow serial_number=None to be equivalent to not
# providing a serial number: a GreatFET with any serail number will be
# accepted.
if 'serial_number' in self.identifiers and self.identifiers['serial_number'] is None:
del self.identifiers['serial_number']
# Connect to the first available GreatFET device.
try:
self.device = usb.core.find(**self.identifiers)
except usb.core.USBError as e:
# On some platforms, providing identifiers that don't match with any
# real device produces a USBError/Pipe Error. We'll convert it into a
# DeviceNotFoundError.
if e.errno == LIBUSB_PIPE_ERROR:
raise DeviceNotFoundError()
else:
raise e
# If we couldn't find a GreatFET, bail out early.
if self.device is None:
raise DeviceNotFoundError()
# Ensure that we have an active USB connection to the device.
self._initialize_usb()
# Final sanity check: if we don't handle this board ID, bail out!
if self.HANDLED_BOARD_IDS and (self.board_id() not in self.HANDLED_BOARD_IDS):
raise DeviceNotFoundError()
def _initialize_usb(self):
"""Sets up our USB connection to the GreatFET device."""
# For now, the GreatFET is only providing a single configuration, so we
# can accept the first configuration provided.
self.device.set_configuration()
def board_id(self):
"""Reads the board ID number for the GreatFET device."""
# Query the board for its ID number.
response = self.vendor_request_in(vendor_requests.READ_BOARD_ID, length=1)
return response[0]
def board_name(self):
"""Returns the human-readable product-name for the GreatFET device."""
return self.BOARD_NAME
def firmware_version(self):
"""Reads the board's firmware version."""
# Query the board for its firmware version, and convert that to a string.
return self.vendor_request_in_string(vendor_requests.READ_VERSION_STRING, length=255)
def serial_number(self, as_hex_string=True):
"""Reads the board's unique serial number."""
result = self.vendor_request_in(vendor_requests.READ_PARTID_SERIALNO, length=24)
# The serial number starts eight bytes in.
result = result[8:]
# If we've been asked to convert this to a hex string, do so.
if as_hex_string:
result = _to_hex_string(result)
return result
def usb_serial_number(self):
""" Reports the device's USB serial number. """
return self.device.serial_number
def part_id(self, as_hex_string=True):
"""Reads the board's unique serial number."""
result = self.vendor_request_in(vendor_requests.READ_PARTID_SERIALNO, length=24)
# The part ID constitues the first eight bytes of the response.
result = result[0:7]
if as_hex_string:
result = _to_hex_string(result)
return result
def reset(self, reconnect=True, switch_to_external_clock=False):
"""
Reset the GreatFET device.
Arguments:
reconect -- If True, this method will wait for the device to
finish the reset and then attempt to reconnect.
switch_to_external_clock -- If true, the device will accept a 12MHz
clock signal on P4_7 (J2_P11 on the GreatFET one) after the reset.
"""
type = 1 if switch_to_external_clock else 0
try:
self.vendor_request_out(vendor_requests.RESET, value=type)
except usb.core.USBError as e:
pass
# If we're to attempt a reconnect, do so.
connected = False
if reconnect:
time.sleep(RECONNECT_DELAY)
self.__init__(**self.identifiers)
# FIXME: issue a reset to all device peripherals with state, here?
def switch_to_external_clock(self):
"""
Resets the GreatFET, and starts it up again using an external clock
source, rather than the onboard crystal oscillator.
"""
self.reset(switch_to_external_clock=True)
def close(self):
"""
Dispose pyUSB resources allocated by this connection. This connection
will no longer be usable.
"""
usb.util.dispose_resources(self.device)
def _vendor_request(self, direction, request, length_or_data=0, value=0, index=0, timeout=1000):
"""Performs a USB vendor-specific control request.
See also _vendor_request_in()/_vendor_request_out(), which provide a
simpler syntax for simple requests.
Args:
request -- The number of the vendor request to be performed. Usually
a constant from the protocol.vendor_requests module.
value -- The value to be passed to the vendor request.
For IN requests:
length_or_data -- The length of the data expected in response from the request.
For OUT requests:
length_or_data -- The data to be sent to the device.
"""
return self.device.ctrl_transfer(
direction | usb.TYPE_VENDOR | usb.RECIP_DEVICE,
request, value, index, length_or_data, timeout)
def vendor_request_in(self, request, length, value=0, index=0, timeout=1000):
"""Performs a USB control request that expects a respnose from the GreatFET.
Args:
request -- The number of the vendor request to be performed. Usually
a constant from the protocol.vendor_requests module.
length -- The length of the data expected in response from the request.
"""
return self._vendor_request(usb.ENDPOINT_IN, request, length,
value=value, index=index, timeout=timeout)
def vendor_request_in_string(self, request, length=255, value=0, index=0, timeout=1000):
"""Performs a USB control request that expects a respnose from the GreatFET.
Interprets the result as a UTF-8 encoded string.
Args:
request -- The number of the vendor request to be performed. Usually
a constant from the protocol.vendor_requests module.
length -- The length of the data expected in response from the request.
"""
raw = self._vendor_request(usb.ENDPOINT_IN, request, length_or_data=length,
value=value, index=index, timeout=timeout)
return raw.tostring().decode('utf-8')
def vendor_request_out(self, request, value=0, index=0, data=None, timeout=1000):
"""Performs a USB control request that provides data to the GreatFET.
Args:
request -- The number of the vendor request to be performed. Usually
a constant from the protocol.vendor_requests module.
value -- The value to be passed to the vendor request.
"""
return self._vendor_request(usb.ENDPOINT_OUT, request, value=value,
index=index, length_or_data=data, timeout=timeout)
def _populate_leds(self, led_count):
"""Adds the standard set of LEDs to the board object.
Args:
led_count -- The number of LEDS present on the board.
"""
self.leds = {}
for i in range(1, led_count + 1):
self.leds[i] = LED(self, i)
def _populate_gpio(self):
"""Adds GPIO pin definitions to the board's main GPIO object."""
self.gpio = GPIO(self)
# Handle each GPIO mapping.
for name, pin in self.GPIO_MAPPINGS.items():
self.gpio.register_gpio(name, pin)
def _to_hex_string(byte_array):
"""Convert a byte array to a hex string."""
hex_generator = ('{:02x}'.format(x) for x in byte_array)
return ''.join(hex_generator)
```
#### File: greatfet/peripherals/i2c_bus.py
```python
from ..protocol import vendor_requests
from ..peripheral import GreatFETPeripheral
class I2CBus(GreatFETPeripheral):
"""
Class representing a GreatFET I2C bus.
For now, supports only the primary I2C bus (I2C0), but will be
expanded when the vendor commands are.
"""
def __init__(self, board, name='i2c bus', buffer_size=255):
"""
Initialize a new I2C bus.
Args:
board -- The GreatFET board whose I2C bus we want to control.
name -- The display name for the given I2C bus.
buffer_size -- The size of the I2C receive buffer on the GreatFET.
"""
# Store a reference to the parent board.
self.board = board
# Store our limitations.
self.buffer_size = buffer_size
# Create a list that will store all connected devices.
self.devices = []
# Set up the I2C bus for communications.
board.vendor_request_out(vendor_requests.I2C_START)
def attach_device(self, device):
"""
Attaches a given I2C device to this bus. Typically called
by the I2C device as it is constructed.
Arguments:
device -- The device object to attach to the given bus.
"""
# TODO: Check for address conflicts!
self.devices.append(device)
def transmit(self, address, data, receive_length=0):
"""
Sends data over the I2C bus, and optionally recieves
data in response.
Args:
address -- The I2C address for the target device.
Should not contain read/write bits. Can be used to address
special addresses, for now; but this behavior may change.
data -- The data to be sent to the given device.
receive_length -- If provided, the I2C controller will attempt
to read the provided amount of data, in bytes.
"""
if (not isinstance(receive_length, int)) or receive_length < 0:
raise ValueError("invalid receive length!")
if receive_length > self.buffer_size:
raise ValueError("Tried to receive more than the size of the receive buffer.");
if address > 127 or address < 0:
raise ValueError("Tried to transmit to an invalid I2C address!")
# Perform the core transfer...
self.board.vendor_request_out(vendor_requests.I2C_XFER, value=address,
index=receive_length, data=data)
# If reciept was requested, return the received data.
if receive_length:
data = self.board.vendor_request_in(vendor_requests.I2C_RESPONSE,
length=receive_length)
else:
data = []
return data
``` |
{
"source": "jpenney78/usabmx_results",
"score": 3
} |
#### File: usabmx_results/app/routes.py
```python
from flask import render_template, flash, request, redirect
from app import app
from app.forms import RequestForm
from app.results_to_csv import get_results
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@app.route('/request', methods=['GET', 'POST'])
def request_results():
if request.method == 'POST':
logging.info('posted')
req = request.form
try:
results = get_results(req.get('results_url'))
logging.info(f"results: {results}")
return render_template('results.html', title='Results', results=results)
except:
logging.exception('exception')
pass
else:
form = RequestForm()
if form.validate_on_submit():
flash('Url={}, States={}'.format(
form.results_url.data, form.state_list.data))
return redirect('/index')
return render_template('request.html', title='Get Results', form=form)
#def index():
# url = {'url': 'URL'}
# return render_template('index.html', title='Results Form', url=url)
``` |
{
"source": "jpenney/flashbake",
"score": 2
} |
#### File: flashbake/plugins/__init__.py
```python
from enum import Enum
import flashbake
import logging
PLUGIN_ERRORS = Enum('invalid_plugin',
'invalid_type',
'unknown_plugin',
'missing_attribute',
'invalid_attribute',
'missing_property',
'ignorable_error')
class PluginError(Exception):
def __init__(self, reason, plugin_spec, name=None):
self.plugin_spec = plugin_spec
self.reason = reason
self.name = name
def __str__(self):
if self.name == None:
return '%s: %s' % (self.reason, self.plugin_spec)
else:
return '%s, %s: %s' % (self.plugin_spec, self.reason, self.name)
def service_and_prefix(plugin_spec):
service_name = plugin_spec.split(':')[-1]
property_prefix = '_'.join(service_name.lower().strip().split(' '))
return service_name, property_prefix
class AbstractPlugin:
""" Common parent for all kinds of plugins, mostly to share option handling
code. """
def __init__(self, plugin_spec):
self.plugin_spec = plugin_spec
self.service_name, self.property_prefix = service_and_prefix(plugin_spec)
self.__property_defs = []
self.__shared_prop_defs = []
def define_property(self, name, type=None, required=False, default=None):
try:
self.__property_defs.append((name, type, required, default))
except AttributeError:
raise Exception('Call AbstractPlugin.__init__ in your plugin\'s __init__.')
def share_property(self, name, type=None, plugin_spec=None):
try:
if plugin_spec:
parsed = service_and_prefix(plugin_spec)
property_prefix = parsed[1]
self.__shared_prop_defs.append(('%s_%s' % (property_prefix, name), type))
else:
self.__shared_prop_defs.append((name, type))
except AttributeError:
raise Exception('Call AbstractPlugin.__init__ in your plugin\'s __init__.')
def share_properties(self, config):
for name, type in self.__shared_prop_defs:
config.share_property(name, type)
def capture_properties(self, config):
try:
for prop in self.__property_defs:
assert len(prop) == 4, "Property definition, %s, is invalid" % (prop,)
self.__capture_property(config, *prop)
except AttributeError:
raise Exception('Call AbstractPlugin.__init__ in your plugin\'s __init__.')
def init(self, config):
""" This method is optional. """
pass
def dependencies(self):
""" Optional method via which a plugin can express a dependency on another plugin. """
return list()
def __capture_property(self, config, name, type=None, required=False, default=None):
""" Move a property, if present, from the ControlConfig to the daughter
plugin. """
config_name = '%s_%s' % (self.property_prefix, name)
if required and not config_name in config.extra_props:
raise PluginError(PLUGIN_ERRORS.missing_property, self.plugin_spec, config_name)
value = default
if config_name in config.extra_props:
value = config.extra_props[config_name]
del config.extra_props[config_name]
if type != None and value != None:
try:
value = type(value)
except:
raise flashbake.ConfigError(
'The value, %s, for option, %s, could not be parsed as %s.'
% (value, name, type))
self.__dict__[name] = value
def abstract(self):
""" borrowed this from Norvig
http://norvig.com/python-iaq.html """
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError('%s must be implemented in subclass' % caller)
class AbstractMessagePlugin(AbstractPlugin):
""" Common parent class for all message plugins, will try to help enforce
the plugin protocol at runtime. """
def __init__(self, plugin_spec, connectable=False):
AbstractPlugin.__init__(self, plugin_spec)
self.connectable = connectable
def addcontext(self, message_file, config):
""" This method is required, it will asplode if not overridden by
daughter classes. """
self.abstract()
class AbstractFilePlugin(AbstractPlugin):
""" Common parent class for all file plugins, will try to help enforce
the plugin protocol at runtime. """
def pre_process(self, hot_files, config):
""" This method is required, it will asplode if not overridden by
daughter classes. """
self.abstract()
def post_process(self, to_commit, hot_files, config):
""" This method is optional, it will be run after status processing but before commit so the
plugin may shuffle files into the commit. """
pass
class AbstractNotifyPlugin(AbstractPlugin):
""" Common parent class for all notification plugins. """
def warn(self, hot_files, config):
''' Implementations will provide messages about the problem files in the
hot_files argument through different mechanisms.
N.B. This method is required, it will asplode if not overridden by
daughter classes. '''
self.abstract()
def notify_commit(self, to_commit, hot_files, config):
''' Option method to notify when a commit is performed, probably most useful
for services like desktop notifiers. '''
pass
``` |
{
"source": "jpenney/Myaamori-Aegisub-Scripts",
"score": 2
} |
#### File: scripts/fontvalidator/fontvalidator.py
```python
import argparse
import collections
import io
import itertools
import logging
import os.path
import pathlib
import re
import sys
import zlib
import ass
import ebmlite
from fontTools.ttLib import ttFont
from fontTools.misc import encodingTools
logging.basicConfig(format="%(name)s: %(message)s")
TAG_PATTERN = re.compile(r"\\\s*([^(\\]+)(?<!\s)\s*(?:\(\s*([^)]+)(?<!\s)\s*)?")
INT_PATTERN = re.compile(r"^[+-]?\d+")
LINE_PATTERN = re.compile(r"(?:\{(?P<tags>[^}]*)\}?)?(?P<text>[^{]*)")
State = collections.namedtuple("State", ["font", "italic", "weight", "drawing"])
def parse_int(s):
if match := INT_PATTERN.match(s):
return int(match.group(0))
else:
return 0
def parse_tags(s, state, line_style, styles):
for match in TAG_PATTERN.finditer(s):
value, paren = match.groups()
def get_tag(name, *exclude):
if value.startswith(name) and not any(value.startswith(ex) for ex in exclude):
args = []
if paren is not None:
args.append(paren)
if len(stripped := value[len(name):].lstrip()) > 0:
args.append(stripped)
return args
else:
return None
if (args := get_tag("fn")) is not None:
if len(args) == 0:
font = line_style.font
elif args[0].startswith("@"):
font = args[0][1:]
else:
font = args[0]
state = state._replace(font=font)
elif (args := get_tag("b", "blur", "be", "bord")) is not None:
weight = None if len(args) == 0 else parse_int(args[0])
if weight is None:
transformed = None
elif weight == 0:
transformed = 400
elif weight in (1, -1):
transformed = 700
elif 100 <= weight <= 900:
transformed = weight
else:
transformed = None
state = state._replace(weight=transformed or line_style.weight)
elif (args := get_tag("i", "iclip")) is not None:
slant = None if len(args) == 0 else parse_int(args[0])
state = state._replace(italic=slant == 1 if slant in (0, 1) else line_style.italic)
elif (args := get_tag("p", "pos", "pbo")) is not None:
scale = 0 if len(args) == 0 else parse_int(args[0])
state = state._replace(drawing=scale != 0)
elif (args := get_tag("r")) is not None:
if len(args) == 0:
style = line_style
else:
if (style := styles.get(args[0])) is None:
print(rf"Warning: \r argument {args[0]} does not exist; defaulting to line style")
style = line_style
state = state._replace(font=style.font, italic=style.italic, weight=style.weight)
elif (args := get_tag("t")) is not None:
if len(args) > 0:
state = parse_tags(args[0], state, line_style, styles)
return state
def parse_line(line, line_style, styles):
state = line_style
for tags, text in LINE_PATTERN.findall(line):
if len(tags) > 0:
state = parse_tags(tags, state, line_style, styles)
if len(text) > 0:
yield state, text
class Font:
def __init__(self, fontfile, font_number=0):
self.fontfile = fontfile
self.font = ttFont.TTFont(fontfile, fontNumber=font_number)
self.num_fonts = getattr(self.font.reader, "numFonts", 1)
self.postscript = self.font.has_key("CFF ")
self.glyphs = self.font.getGlyphSet()
os2 = self.font["OS/2"]
self.weight = os2.usWeightClass
self.italic = os2.fsSelection & 0b1 > 0
self.slant = self.italic * 110
self.width = 100
self.names = [name for name in self.font["name"].names
if name.platformID == 3 and name.platEncID in (0, 1)]
self.family_names = [name.string.decode('utf_16_be')
for name in self.names if name.nameID == 1]
self.full_names = [name.string.decode('utf_16_be')
for name in self.names if name.nameID == 4]
for name in self.font["name"].names:
if name.nameID == 6 and (encoding := encodingTools.getEncoding(
name.platformID, name.platEncID, name.langID)) is not None:
self.postscript_name = name.string.decode(encoding).strip()
# these are the two recommended formats, prioritize them
if (name.platformID, name.platEncID, name.langID) in \
[(1, 0, 0), (3, 1, 0x409)]:
break
self.exact_names = [self.postscript_name] if self.postscript else self.full_names
mac_italic = self.font["head"].macStyle & 0b10 > 0
if mac_italic != self.italic:
print(f"warning: different italic values in macStyle and fsSelection for font {self.postscript_name}")
def missing_glyphs(self, text):
if (uniTable := self.font.getBestCmap()):
return [c for c in text
if ord(c) not in uniTable]
elif (symbolTable := self.font["cmap"].getcmap(3, 0)):
macTable = self.font["cmap"].getcmap(1, 0)
encoding = encodingTools.getEncoding(1, 0, macTable.language) if macTable else 'mac_roman'
missing = []
for c in text:
try:
if (c.encode(encoding)[0] + 0xf000) not in symbolTable.cmap:
missing.append(c)
except UnicodeEncodeError:
missing.append(c)
return missing
else:
print(f"warning: could not read glyphs for font {self}")
def __repr__(self):
return f"{self.postscript_name}(italic={self.italic}, weight={self.weight})"
class FontCollection:
def __init__(self, fontfiles):
self.fonts = []
for name, f in fontfiles:
try:
font = Font(f)
self.fonts.append(font)
if font.num_fonts > 1:
for i in range(1, font.num_fonts):
self.fonts.append(Font(f, font_number=i))
except Exception as e:
print(f"Error reading {name}: {e}")
self.cache = {}
self.by_full = {name.lower(): font
for font in self.fonts
for name in font.exact_names}
self.by_family = {name.lower(): [font for (_, font) in fonts]
for name, fonts in itertools.groupby(
sorted([(family, font)
for font in self.fonts
for family in font.family_names],
key=lambda x: x[0]),
key=lambda x: x[0])}
def similarity(self, state, font):
return abs(state.weight - font.weight) + abs(state.italic * 100 - font.slant)
def _match(self, state):
if (exact := self.by_full.get(state.font)):
return exact, True
elif (family := self.by_family.get(state.font)):
return min(family, key=lambda font: self.similarity(state, font)), False
else:
return None, False
def match(self, state):
s = state._replace(font=state.font.lower(), drawing=False)
try:
return self.cache[s]
except KeyError:
font = self._match(s)
self.cache[s] = font
return font
def validate_fonts(doc, fonts, ignore_drawings=False, warn_on_exact=False):
report = {
"missing_font": collections.defaultdict(set),
"missing_glyphs": collections.defaultdict(set),
"missing_glyphs_lines": collections.defaultdict(set),
"faux_bold": collections.defaultdict(set),
"faux_italic": collections.defaultdict(set),
"mismatch_bold": collections.defaultdict(set),
"mismatch_italic": collections.defaultdict(set)
}
styles = {style.name: State(style.fontname, style.italic, 700 if style.bold else 400, False)
for style in doc.styles}
for i, line in enumerate(doc.events):
if isinstance(line, ass.Comment):
continue
nline = i + 1
try:
style = styles[line.style]
except KeyError:
print(f"Warning: Unknown style {line.style} on line {nline}; assuming default style")
style = State("Arial", False, 400, False)
for state, text in parse_line(line.text, style, styles):
font, exact_match = fonts.match(state)
if ignore_drawings and state.drawing:
continue
if font is None:
report["missing_font"][state.font].add(nline)
continue
if state.weight >= font.weight + 150:
report["faux_bold"][state.font, state.weight, font.weight].add(nline)
if state.weight <= font.weight - 150 and (not exact_match or warn_on_exact):
report["mismatch_bold"][state.font, state.weight, font.weight].add(nline)
if state.italic and not font.italic:
report["faux_italic"][state.font].add(nline)
if not state.italic and font.italic and (not exact_match or warn_on_exact):
report["mismatch_italic"][state.font].add(nline)
if not state.drawing:
missing = font.missing_glyphs(text)
report["missing_glyphs"][state.font].update(missing)
if len(missing) > 0:
report["missing_glyphs_lines"][state.font].add(nline)
issues = 0
def format_lines(lines, limit=10):
sorted_lines = sorted(lines)
if len(sorted_lines) > limit:
sorted_lines = sorted_lines[:limit]
sorted_lines.append("[...]")
return ' '.join(map(str, sorted_lines))
for font, lines in sorted(report["missing_font"].items(), key=lambda x: x[0]):
issues += 1
print(f"- Could not find font {font} on line(s): {format_lines(lines)}")
for (font, reqweight, realweight), lines in sorted(report["faux_bold"].items(), key=lambda x: x[0]):
issues += 1
print(f"- Faux bold used for font {font} (requested weight {reqweight}, got {realweight}) " \
f"on line(s): {format_lines(lines)}")
for font, lines in sorted(report["faux_italic"].items(), key=lambda x: x[0]):
issues += 1
print(f"- Faux italic used for font {font} on line(s): {format_lines(lines)}")
for (font, reqweight, realweight), lines in sorted(report["mismatch_bold"].items(), key=lambda x: x[0]):
issues += 1
print(f"- Requested weight {reqweight} but got {realweight} for font {font} " \
f"on line(s): {format_lines(lines)}")
for font, lines in sorted(report["mismatch_italic"].items(), key=lambda x: x[0]):
issues += 1
print(f"- Requested non-italic but got italic for font {font} on line(s): " + \
format_lines(lines))
for font, lines in sorted(report["missing_glyphs_lines"].items(), key=lambda x: x[0]):
issues += 1
print(f"- Font {font} is missing glyphs {''.join(sorted(report['missing_glyphs'][font]))} " \
f"on line(s): {format_lines(lines)}")
print(f"{issues} issue(s) found")
return issues > 0
def get_element(parent, element, id=False):
return next(get_elements(parent, element, id=id))
def get_elements(parent, *element, id=False):
if id:
return filter(lambda x: x.id in element, parent)
else:
return filter(lambda x: x.name in element, parent)
def get_dicts(parent, element, id=False):
return ({x.name: x for x in elem} for elem in get_elements(parent, element, id=id))
def get_subtitles(mkv):
subtitles = []
for segment in get_elements(mkv, "Segment"):
tracks_to_read = {}
tracks = get_element(segment, "Tracks")
for track in get_dicts(tracks, "TrackEntry"):
if track["CodecID"].value != b'S_TEXT/ASS':
continue
compression = False
for encoding in get_elements(track.get("ContentEncodings", []), "ContentEncoding"):
for compression in get_elements(encoding, "ContentCompression"):
compression = True
try:
track_name = track["Name"].value
except KeyError:
track_name = "Unknown"
assdoc = ass.parse(io.TextIOWrapper(io.BytesIO(track["CodecPrivate"].value),
encoding='utf_8_sig'))
tracks_to_read[track["TrackNumber"].value] = track_name, assdoc, compression
track_lines = {k: {} for k in tracks_to_read}
for cluster in get_elements(segment, "Cluster"):
for elem in cluster:
if elem.name == "SimpleBlock":
block = elem.value
elif elem.name == "BlockGroup":
block = get_element(elem, 0xa1, id=True).value
else:
continue
stream = io.BytesIO(block)
track, _ = ebmlite.decoding.readElementSize(stream)
if track in tracks_to_read:
_, _, compression = tracks_to_read[track]
timestamp = ebmlite.decoding.readInt(stream, 2)
stream.read(1)
data = stream.read()
if compression:
data = zlib.decompress(data)
order, layer, line = data.split(b',', 2)
timestamp = b'0:00:00.00,0:00:00.00'
track_lines[track][int(order)] = b'Dialogue: ' + layer + b',' + timestamp + b',' + line
for track_id, l in track_lines.items():
name, assdoc, _ = tracks_to_read[track_id]
lines = b'[Events]\n' + b'\n'.join([l[k] for k in sorted(l)])
events = ass.parse(io.TextIOWrapper(io.BytesIO(lines), encoding='utf_8_sig'))
assdoc.events.extend(events.events)
subtitles.append((name, assdoc))
return subtitles
# from mpv
FONT_MIMETYPES = {
b"application/x-truetype-font",
b"application/vnd.ms-opentype",
b"application/x-font-ttf",
b"application/x-font",
b"application/font-sfnt",
b"font/collection",
b"font/otf",
b"font/sfnt",
b"font/ttf"
}
def get_fonts(mkv):
fonts = []
for segment in get_elements(mkv, "Segment"):
for attachments in get_elements(segment, "Attachments"):
for attachment in get_dicts(attachments, "AttachedFile"):
if attachment["FileMimeType"].value not in FONT_MIMETYPES:
print(f"Ignoring non-font attachment {attachment['FileName'].value}")
fonts.append((attachment["FileName"].value,
io.BytesIO(attachment["FileData"].value)))
return fonts
def is_mkv(filename):
with open(filename, 'rb') as f:
return f.read(4) == b'\x1a\x45\xdf\xa3'
def main():
parser = argparse.ArgumentParser(
description="Validate font usage in a muxed Matroska file or an ASS file.")
parser.add_argument('subtitles', help="""
File containing the subtitles to verify. May be a Matroska file or an ASS file.
If a Matroska file is provided, any attached fonts will be used.
""")
parser.add_argument('additional_fonts', nargs='*', help="""
List of additional fonts to use for verification.
May be a Matroska file with fonts attached, a directory containing font files, or a single font file.
""")
parser.add_argument('--ignore-drawings', action='store_true', default=False,
help="Don't warn about missing fonts only used for drawings.")
parser.add_argument('--warn-fullname-mismatch', action='store_true', default=False,
help="Warn about mismatched styles even when using the full font name.")
args = parser.parse_args()
schema = ebmlite.loadSchema("matroska.xml")
if is_mkv(args.subtitles):
mkv = schema.load(args.subtitles)
subtitles = get_subtitles(mkv)
fontlist = get_fonts(mkv)
else:
with open(args.subtitles, 'r', encoding='utf_8_sig') as f:
subtitles = [(os.path.basename(args.subtitles), ass.parse(f))]
fontlist = []
for additional_fonts in args.additional_fonts:
path = pathlib.Path(additional_fonts)
if path.is_dir():
fontlist.extend((p.name, str(p)) for p in path.iterdir() if p.is_file())
elif is_mkv(additional_fonts):
fontmkv = schema.load(additional_fonts)
fontlist.extend(get_fonts(fontmkv))
else:
fontlist.append((path.name, additional_fonts))
issues = False
fonts = FontCollection(fontlist)
for name, doc in subtitles:
print(f"Validating track {name}")
issues = issues or validate_fonts(doc, fonts, args.ignore_drawings, args.warn_fullname_mismatch)
return issues
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jpenney/smartass",
"score": 2
} |
#### File: src/smartass/cli.py
```python
import logging
from functools import wraps
import click
import pkg_resources
from . import DumbProcessor, SmartProcessor
from .clickutils import ClickContextObj
from .fileutils import open_subfile, update_subfile
LOGGER = logging.getLogger(__name__)
def _common_cli(func):
default_log_level = logging.getLevelName(logging.INFO).lower()
@click.option('--log-level', type=click.Choice([
logging.getLevelName(lev).lower() for lev in [
logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
logging.CRITICAL]]),
default=default_log_level, show_default=True,
show_choices=True,
help='log level displayed')
@click.option(
"--no-backup/--backup", default=True, show_default=True,
help="enable/disable creation of backup files")
@click.option(
"--process-comments/--no-process-comments", default=False,
show_default=True,
help='enable/disable processing of comment events')
@click.option(
"--skip-name", multiple=True, default=[], metavar='ACTOR_NAME',
help=(
"lines by this actor (case insensitive) will be skipped. "
"May be passed multiple times."))
@click.version_option(
pkg_resources.get_distribution(__name__.split('.')[0]).version)
@click.argument(
'subfiles', nargs=-1, required=True, metavar='FILE',
type=click.Path(
exists=True, file_okay=True, dir_okay=False, writable=True))
@click.pass_context
@wraps(func)
def wrapper(ctx, log_level, *args, **kwargs):
obj = ctx.obj = ctx.obj or ClickContextObj()
level = getattr(logging, log_level.upper())
obj.log_level = level
return ctx.invoke(func, *args, **kwargs)
return wrapper
def _run_cli(processor_factory, backup, process_comments, skip_name, subfiles):
processor_args = dict(
process_comments=process_comments,
names_to_skip=skip_name)
processor = processor_factory(**processor_args)
for subfile in subfiles:
try:
(subdoc, encoding, newline) = open_subfile(subfile)
(total_events, events_processed,
events_updated) = processor.process_document(subdoc)
LOGGER.info('%s: events=%d, processed=%d, updated=%d',
subfile, total_events, events_processed,
events_updated)
if events_updated:
update_subfile(subfile, subdoc, encoding, newline, backup)
except RuntimeError as err:
LOGGER.error(
"%s: %s: %s",
subfile, type(err).__name__, str(err))
@click.command(no_args_is_help=True)
@_common_cli
def smartass(*args, **kwargs):
"""Smarten punctionation on ass subtitle files."""
_run_cli(SmartProcessor, *args, **kwargs)
@click.command(no_args_is_help=True)
@_common_cli
def dumbass(*args, **kwargs):
"""Unsmarten punctuation on ass subtitle files."""
_run_cli(DumbProcessor, *args, **kwargs)
```
#### File: src/smartass/processor.py
```python
from abc import ABCMeta, abstractmethod
from html import unescape
from ass import Comment, Dialogue, Document
from smartypants import smartypants
__all__ = ['SmartProcessor', 'DumbProcessor']
class Processor(metaclass=ABCMeta):
_DUMB_MAPPING = str.maketrans({
'\u201c': '"',
'\u201d': '"',
'\u2014': '--',
'\u2026': '...',
'\u2018': "'",
'\u2019': "'"})
def __init__(
self, make_backups=True, process_comments=False,
names_to_skip=None):
self._make_backups = make_backups
supported_events = set([Dialogue])
if process_comments:
supported_events.add(Comment)
self._supported_events = tuple(e for e in supported_events)
self._names_to_skip = set([])
if names_to_skip:
self._names_to_skip = set(n.lower() for n in names_to_skip)
@property
def make_backups(self):
return self._make_backups
def _smarten(self, line): # pylint: disable=R0201
return unescape(smartypants(line))
def _dumben(self, line):
return line.translate(self._DUMB_MAPPING)
@abstractmethod
def _process_text(self, line):
pass
def event_supported(self, event):
if not isinstance(event, self._supported_events):
return False
if (event.name and self._names_to_skip and
event.name.lower() in self._names_to_skip):
return False
return True
def _process_event(self, event):
if not self.event_supported(event):
return False
new_text = self._process_text(event.text)
if new_text == event.text:
return False
event.text = new_text
return True
def process_document(self, document):
if not isinstance(document, Document):
raise TypeError("'document' (%r) unsupported")
total_events = 0
events_processed = 0
events_updated = 0
for event in document.events:
total_events += 1
if self.event_supported(event):
events_processed += 1
if self._process_event(event):
events_updated += 1
return (total_events, events_processed, events_updated)
class SmartProcessor(Processor):
def _process_text(self, line):
return self._smarten(self._dumben(line))
class DumbProcessor(Processor):
def _process_text(self, line):
return self._dumben(line)
``` |
{
"source": "jpenninkhof/cf-mendix-buildpack",
"score": 2
} |
#### File: cf-mendix-buildpack/lib/database_config.py
```python
import json
import logging
import os
import re
import buildpackutil
from abc import abstractmethod, ABC
from urllib.parse import parse_qs, urlencode, unquote
from m2ee import logger # noqa: E402
def get_database_config(development_mode=False):
if any(
[x.startswith("MXRUNTIME_Database") for x in list(os.environ.keys())]
):
return None
factory = DatabaseConfigurationFactory()
configuration = factory.get_instance()
return configuration.get_m2ee_configuration()
class DatabaseConfigurationFactory:
"""Returns a DatabaseConfiguration instance to return database configuration
for the Mendix runtime"""
def __init__(self):
self.vcap_services = buildpackutil.get_vcap_services_data()
def get_instance(self):
# explicit detect supported configurations
if self.present_in_vcap(
"hana", tags=["hana", "database", "relational"]
):
return SapHanaDatabaseConfiguration(
self.vcap_services["hana"][0]["credentials"]
)
# fallback to original configuration
url = self.get_database_uri_from_vcap(self.vcap_services)
if url is None:
url = os.environ["DATABASE_URL"]
if url is not None:
return UrlDatabaseConfiguration(url)
return None
def present_in_vcap(self, service_name, tags=[]):
"""Check if service is available in vcap and given tags match"""
if service_name is not None:
present = service_name in self.vcap_services
if not present:
return False
binding = self.vcap_services[service_name][0]
return set(binding["tags"]) & set(tags) == set(tags)
# Loop services when no service name given, check types
for binding in [
self.vcap_services[service][0] for service in self.vcap_services
]:
if set(binding["tags"]) & set(tags) == set(tags):
return True
return False
def get_database_uri_from_vcap(self, vcap_services):
for service_type_name in (
"p-mysql",
"p.mysql",
"elephantsql",
"cleardb",
"PostgreSQL",
"dashDB",
"mariadb",
"postgresql",
"rds",
"postgresql_shared",
):
if vcap_services and service_type_name in vcap_services:
return vcap_services[service_type_name][0]["credentials"][
"uri"
]
if "azure-sqldb" in vcap_services:
return vcap_services["azure-sqldb"][0]["credentials"]["jdbcUrl"]
for key in vcap_services:
try:
uri = vcap_services[key][0]["credentials"]["uri"]
if key.startswith("rds"):
return uri
if key.startswith("dashDB"):
return uri
if uri.startswith("postgres"):
return uri
if uri.startswith("mysql"):
return uri
except (TypeError, KeyError):
pass
return None
class DatabaseConfiguration(ABC):
"""Base clase for database configurations. Implements only the basics."""
def __init__(self):
self.development_mode = (
os.getenv("DEVELOPMENT_MODE", "").lower() == "true"
)
def get_m2ee_configuration(self):
"""Return the m2ee configuration for connection to the database"""
self.init()
m2ee_config = {
"DatabaseType": self.get_database_type(),
"DatabaseHost": self.get_database_host(),
"DatabaseUserName": self.get_database_username(),
"DatabasePassword": self.get_database_password(),
"DatabaseName": self.get_database_name(),
"DatabaseJdbcUrl": self.get_database_jdbc_url(),
}
m2ee_config.update(self.get_additional_m2ee_config())
if self.development_mode:
m2ee_config.update(
{
"ConnectionPoolingMaxIdle": 1,
"ConnectionPoolingMaxActive": 20,
"ConnectionPoolingNumTestsPerEvictionRun": 50,
"ConnectionPoolingSoftMinEvictableIdleTimeMillis": 1000,
"ConnectionPoolingTimeBetweenEvictionRunsMillis": 1000,
}
)
# Strip empty values
filter_m2ee_config = {k: v for k, v in m2ee_config.items() if v}
logging.debug(
"Returning database configuration: {}".format(
json.dumps(filter_m2ee_config)
)
)
return filter_m2ee_config
def get_override_connection_parameters(self):
params_str = os.getenv("DATABASE_CONNECTION_PARAMS", "{}")
try:
params = json.loads(params_str)
return params
except Exception:
logger.warning(
"Invalid JSON string for DATABASE_CONNECTION_PARAMS. Ignoring value."
)
return {}
@abstractmethod
def init(self):
"""Parse the configuration. This method should read the source (either
vcap or environment variables) to make it possible that methods are
get_dabatabase_hostname can work"""
@abstractmethod
def get_database_type(self):
"""Return the database type for the M2EE configuration"""
pass
@abstractmethod
def get_database_host(self):
"""Return the database host for the M2EE configuration"""
pass
@abstractmethod
def get_database_username(self):
"""Return the username for the M2EE configuration"""
pass
@abstractmethod
def get_database_password(self):
"""Return the password for the M2EE configuration"""
pass
@abstractmethod
def get_database_jdbc_url(self):
"""Return the database jdbc url for the M2EE configuration
Implementations should use get_override_connection_parameters allowing users
adjust or extend the parameters retrieved from the VCAP.
"""
pass
@abstractmethod
def get_database_name(self):
"""Return the database name for the M2EE configuration"""
pass
@abstractmethod
def get_additional_m2ee_config(self):
return {}
class UrlDatabaseConfiguration(DatabaseConfiguration):
"""Returns a database configuration based on the original code from buildpackutil."""
def __init__(self, url):
super().__init__()
logging.debug("Detected URL based database configuration.")
self.url = url
def init(self):
patterns = [
r"(?P<type>[a-zA-Z0-9]+)://(?P<user>[^:]+):(?P<password>[^@]+)@(?P<host>[^/]+)/(?P<dbname>[^?]*)(?P<extra>\?.*)?", # noqa: E501
r"jdbc:(?P<type>[a-zA-Z0-9]+)://(?P<host>[^;]+);database=(?P<dbname>[^;]*);user=(?P<user>[^;]+);password=(?P<password>.*)$", # noqa: E501
]
supported_databases = {
"postgres": "PostgreSQL",
"postgresql": "PostgreSQL",
"mysql": "MySQL",
"db2": "Db2",
"sqlserver": "SQLSERVER",
}
for pattern in patterns:
match = re.search(pattern, self.url)
if match is not None:
break
else:
raise Exception(
"Could not parse database credentials from database uri %s"
% self.url
)
database_type_input = match.group("type")
if database_type_input not in supported_databases:
raise Exception("Unknown database type: %s", database_type_input)
database_type = supported_databases[database_type_input]
config = {
"DatabaseType": database_type,
"DatabaseUserName": unquote(match.group("user")),
"DatabasePassword": <PASSWORD>("password"),
"DatabaseHost": match.group("host"),
"DatabaseName": match.group("dbname"),
}
# parsing additional parameters
# 1) check for sslmode in existing jdbc url for m2ee config
# 2) update jdbc url (from vcap) with input from DATABASE_CONNECTION_PARAMS
jdbc_params = {}
# getting values from url
has_extra = "extra" in match.groupdict() and match.group("extra")
if has_extra:
extra = match.group("extra").lstrip("?")
jdbc_params = parse_qs(extra)
# defaults
if database_type == "PostgreSQL":
jdbc_params.update({"tcpKeepAlive": "true"})
extra_url_params_str = os.getenv("DATABASE_CONNECTION_PARAMS", "{}")
if extra_url_params_str is not None:
try:
extra_url_params = json.loads(extra_url_params_str)
jdbc_params.update(extra_url_params)
except Exception:
logger.warning(
"Invalid JSON string for DATABASE_CONNECTION_PARAMS"
)
# generate jdbc_url, might be None
jdbc_url = self.get_jdbc_strings(self.url, match, config, jdbc_params)
if jdbc_url is not None:
logger.debug("Setting JDBC url: {}".format(jdbc_url))
config.update({"DatabaseJdbcUrl": jdbc_url})
if "sslmode" in jdbc_params:
sslmode = jdbc_params["sslmode"]
if sslmode and sslmode[0] == "require":
config.update({"DatabaseUseSsl": True})
self.m2ee_config = config
def get_jdbc_strings(self, url, match, config, jdbc_params):
# JDBC strings might be different from connection uri strings retrieved from the VCAP
# For supported/tested situations we'll create a JDBC string based on
# * url (from VCAP or DATABASE_URL)
# * config (extracted information from url)
# * jdbc_params (from DATABASE_URL or DATABASE_CONNECTION_PARAMS)
#
# if given url is a JDBC string this will be returned
#
# return unmodified jdbc string
if url.startswith("jdbc:"):
return url
if len(jdbc_params) > 0:
extra_jdbc_params = "?{}".format(urlencode(jdbc_params, True))
else:
extra_jdbc_params = ""
if config["DatabaseType"] == "PostgreSQL":
jdbc_url = "jdbc:postgresql://{}/{}{}".format(
config["DatabaseHost"],
config["DatabaseName"],
extra_jdbc_params,
)
return jdbc_url
def get_database_type(self):
return self.m2ee_config.get("DatabaseType")
def get_database_host(self):
return self.m2ee_config.get("DatabaseHost")
def get_database_username(self):
return self.m2ee_config.get("DatabaseUserName")
def get_database_password(self):
return self.m2ee_config.get("DatabasePassword")
def get_database_jdbc_url(self):
return self.m2ee_config.get("DatabaseJdbcUrl")
def get_database_name(self):
"""Return the database name for the M2EE configuration"""
return self.m2ee_config.get("DatabaseName")
def get_additional_m2ee_config(self):
if self.m2ee_config["DatabaseType"] == "MySQL":
return {
"ConnectionPoolingNumTestsPerEvictionRun": 50,
"ConnectionPoolingSoftMinEvictableIdleTimeMillis": 10000,
"ConnectionPoolingTimeBetweenEvictionRunsMillis": 10000,
}
return {}
class SapHanaDatabaseConfiguration(DatabaseConfiguration):
database_type = "SAPHANA"
def __init__(self, credentials):
super().__init__()
logging.debug("Detected SAP Hana configuration.")
self.credentials = credentials
def init(self):
pass
def get_database_type(self):
return self.database_type
def get_database_host(self):
return "{}:{}".format(
self.credentials.get("host"), self.credentials.get("port")
)
def get_database_username(self):
return self.credentials.get("user")
def get_database_password(self):
return self.credentials.get("password")
def get_database_jdbc_url(self):
"""Return the database jdbc url for the M2EE configuration"""
url = self.credentials.get("url", "")
pattern = r"jdbc:sap://(?P<host>[^:]+):(?P<port>[0-9]+)(?P<q>\?(?P<params>.*))?$"
match = re.search(pattern, url)
if match is None:
logger.error("Unable to parse Hana JDBC url string for parameters")
raise Exception(
"Unable to parse Hana JDBC url string for parameters"
)
parameters = {}
if match.group("q") is not None and match.group("params") is not None:
q = match.group("q")
params = match.group("params")
parameters.update(parse_qs(params))
# override parameters from DATABASE_CONNECTION_PARAMS
parameters.update(self.get_override_connection_parameters())
if q is not None and len(parameters) > 0:
parameterStr = "?{}".format(urlencode(parameters, True))
url = url.replace(q, parameterStr)
return url
def get_database_name(self):
return self.credentials.get("schema")
def get_additional_m2ee_config(self):
return {}
``` |
{
"source": "jpenrici/Clock_Tkinter",
"score": 3
} |
#### File: Clock_Tkinter/src/clock_tk.py
```python
from tkinter import *
from PIL import Image, ImageTk
from datetime import datetime
from math import cos, sin, pi
import time, _thread
import sys
# Waiting time in minutes
STANDARD = 15
class ClockTk(Frame):
def __init__(self, master=None, start="00:00:00", interval=STANDARD):
super().__init__(master)
self.master = master
self.master.title("TIMER - Press ESC to exit.")
self.master.bind("<Escape>", lambda x : quit())
self.path = "../images/clock.png"
self.pack()
hour, minute, second = start.split(':')
self.interval = interval
self.start = (int(hour) * 60) + int(minute)
self.stop = self.start + self.interval
self.alert = self.start + self.interval * 0.8
self.running = False
self.make()
self.render()
def make(self):
try:
img = Image.open(self.path)
img_width, img_height = img.size
if img_width != img_height:
self.close()
self.size = img_width
self.canvas = Canvas(self.master, width=self.size, height=self.size)
self.canvas.pack()
self.image = ImageTk.PhotoImage(img)
except:
self.close()
def pointer(self, angle, radius):
# Adjust angles
a = (angle - 90) * pi / 180.0
x = (self.size / 2) + cos(a) * radius
y = (self.size / 2) + sin(a) * radius
return (x, y)
def render(self):
# Update current time
now = datetime.now()
minute = now.minute
minutes = (now.hour * 60) + minute
hour = now.hour if now.hour <= 12 else now.hour - 12
# Background - alert
color = "#C4C4C4" if minutes < self.stop else "red"
self.canvas.create_rectangle(0,0, self.size, self.size, fill=color)
# Watch image
self.canvas.create_image(0, 0, image=self.image, anchor='nw')
# Pointers
x, y = self.pointer(minute * 6, self.size / 3)
self.canvas.create_line(self.size / 2, self.size / 2, x, y, width=5, fill='green')
x, y = self.pointer((hour * 30) + (minute * 0.4), self.size / 4)
self.canvas.create_line(self.size / 2, self.size / 2, x, y, width=5, fill='green')
# Minute marker
i = 0.0
while minutes + i < self.stop:
i += 0.1
a = (minute + i) * 6
x, y = self.pointer(a, self.size / 2 - 5)
x1, y1 = self.pointer(a, self.size / 2 - 20)
color = '#7AFF71'
if minutes + i > self.start:
color = "#0A6D04"
if minutes + i > self.alert:
color = "red"
self.canvas.create_line(x, y, x1, y1, width=5, fill=color)
def run(self):
self.running = True
_thread.start_new_thread(self.update, tuple([]))
print("Timer activated ...\n")
print("Use Esc to exit the application ...\n")
def close(self):
print("There is something wrong.")
exit()
def update(self):
while self.running:
time.sleep(10)
self.render()
def validate(args):
now = datetime.now()
start = "EMPTY"
interval = 0
print("Check ...", args)
for arg in args:
if arg[0:4] == "now=":
if start == "EMPTY":
try:
v = arg[4:].split(':')
if len(v) == 1:
v += ["00"]
h = "error" if int(v[0]) < 0 or int(v[0]) > 23 else v[0]
m = "error" if int(v[1]) < 0 or int(v[1]) > 59 else v[1]
print("Time status [ {0}:{1}:00 ]".format(h, m))
start = "{0}:{1}:00".format(int(h), int(m))
except:
print("Time error ...")
if arg[0:8] == "minutes=":
if interval == 0:
try:
v = arg[8:]
print("Interval status [ {0} ]".format(v))
v = "error" if int(v) <= 0 or int(v) > 60 else v
interval = int(v)
except:
print("Wrong interval value ...")
if start == "EMPTY":
start = "{0}:{1}:{2}".format(now.hour, now.minute, now.second)
print("Current time [ {0} ]".format(start))
if interval == 0:
interval = STANDARD
print("Interval default [ {0} ]".format(interval))
return [start, interval]
def main(args):
# Check
value_start, value_interval = validate(args)
# Run
root = Tk()
app = ClockTk(master=root, start=value_start, interval=value_interval)
app.run()
app.mainloop()
def test():
# Inputs
tests = [
[],
["test"],
["test", "minutes=-1", "test"],
["minutes=-1"],
["minutes=0"],
["minutes=61"],
["minutes=a5"],
["minutes= 5"],
["minutes=5"],
["now=hour:minute"],
["now=24:10", "minutes=5"],
["now=10:60", "minutes=5"],
["now=10:10", "minutes=5"],
["now=16", "minutes=5"],
["now=1", "minutes=5"]
]
for i in tests:
validate(i)
print("="*80)
if __name__ == '__main__':
# Test
# test()
# Run
main(sys.argv)
``` |
{
"source": "jpenrici/CodeWars_Trainings",
"score": 4
} |
#### File: CodeWars_Trainings/Python_Trainings/sumNumbers.py
```python
def sum_array(arr):
if arr == None:
return 0
if len(arr) <= 2:
return 0
arr.sort()
del arr[0]
del arr[-1]
return sum(arr)
def testEqual(test, str1, str2):
print("[{0:d}]: {1}".format(test, str1 == str2))
def main():
# "Basic tests"
# "None or Empty"
testEqual(1, sum_array(None), 0)
testEqual(2, sum_array([]), 0)
# "Only one Element"
testEqual(3, sum_array([3]), 0)
testEqual(4, sum_array([-3]), 0)
# "Only two Element"
testEqual(5, sum_array([ 3, 5]), 0)
testEqual(6, sum_array([-3, -5]), 0)
# "Real Tests"
testEqual(7, sum_array([6, 2, 1, 8, 10]), 16)
testEqual(8, sum_array([6, 0, 1, 10, 10]), 17)
testEqual(9, sum_array([-6, -20, -1, -10, -12]), -28)
testEqual(10, sum_array([-6, 20, -1, 10, -12]), 3)
if __name__ == '__main__':
main()
``` |
{
"source": "jpenrici/Computer_Graphics",
"score": 3
} |
#### File: Computer_Graphics/NumPy_Training/img_histogram_2D.py
```python
import os
import numpy as np
from matplotlib import pyplot as plt, colors as colors
PATH = "../Images/"
RED = 0
GREEN = 1
BLUE = 2
def view(data, X, Y, title="2D histogram"):
c = ["RED", "GREEN", "BLUE"]
dataX = data[:, :, X].flatten()
dataY = data[:, :, Y].flatten()
bins = np.arange(0, 256)
# plot
plt.hist2d(dataX, dataY, bins, norm=colors.LogNorm())
plt.title(title)
plt.xlabel(c[X])
plt.ylabel(c[Y])
plt.xlim([0, 255])
plt.ylim([0, 255])
plt.colorbar()
plt.show()
def test(filename):
img_np = PATH + filename + ".npy"
print("Data: ", img_np)
if not os.path.exists(img_np):
print("File not found!")
return
data = np.load(img_np)
h, w, c = data.shape
if c > 3:
data = data[:, :, :3]
view(data, RED, GREEN)
view(data, RED, BLUE)
view(data, GREEN, BLUE)
if __name__ == '__main__':
# ndArray (Imagem)
test("folha_croton")
```
#### File: Computer_Graphics/NumPy_Training/img_order_pixels.py
```python
import os
import numpy as np
PATH = "../Images/"
def organize(data):
# ndArray
print("Original:")
print(data[:2])
# Preparar, remodelar Array
height, width, channels = data.shape
data = data.flatten() # vetorizar
temp = [i for i in data] # lista
temp = [temp[i:i+channels] for i in range(0, height * width * channels,
channels)] # separar pixels novamente
# Ordenação crescente
for c in range(0, channels):
# Ordenar do último canal para o primeiro
i = channels - c - 1
temp.sort(key=lambda value: value[i])
npArray = np.array(temp, dtype=np.uint8)
npArray = npArray.flatten() # abrir
npArray = npArray.reshape(height, width, channels) # remodelar
print("Result:")
print(npArray[:2])
def test(filename):
img_np = PATH + filename + ".npy"
print("Data: ", img_np)
if not os.path.exists(img_np):
print("File not found!")
return
data = np.load(img_np)
organize(data)
if __name__ == '__main__':
# ndArray
h, w, c = 5, 4, 3
numbers = [i for i in range(h*w*c, 0, -1)]
npArray = np.array(numbers).reshape(h, w, c)
organize(npArray)
# ndArray (Imagem)
test("folha_croton")
```
#### File: Computer_Graphics/NumPy_Training/RGB.py
```python
import numpy as np
from PIL import Image
from math import sqrt, sin, cos, radians
'''
Conjunto de métodos e classes para estudo e manipulação de imagens.
'''
class RGB:
__rgb = []
__RED, __GREEN, __BLUE = 0, 1, 2
def __init__(self, *rgb):
'''
in: int,int,int
[int, int, int]
#HexHexHex
Int
out: [int, int, int]
'''
if len(rgb) == 1 and isinstance(rgb[0], str):
self.fromHex(rgb[0])
return
if len(rgb) == 1 and isinstance(rgb[0], int):
self.fromInt(rgb[0])
return
if len(rgb) == 1 and isinstance(rgb[0], list) and len(rgb[0]) == 3:
red, green, blue = rgb[0]
elif len(rgb) == 3:
red, green, blue = rgb[0], rgb[1], rgb[2]
else:
red, green, blue = 0, 0, 0
self.__rgb = self.__config(red, green, blue)
def __str__(self):
text = "[{:3d}, {:3d}, {:3d}]".format(self.__rgb[0], self.__rgb[1],
self.__rgb[2])
return text
def __config(self, red, green, blue):
# Validar tipo
red = 0 if not isinstance(red, int) else red
green = 0 if not isinstance(green, int) else green
blue = 0 if not isinstance(blue, int) else blue
# Limite inferior
red = 0 if red < 0 else red
green = 0 if green < 0 else green
blue = 0 if blue < 0 else blue
# Limite superior
red = 255 if red > 255 else red
green = 255 if green > 255 else green
blue = 255 if blue > 255 else blue
return [red, green, blue]
def setRGB(self, red, green, blue):
self.__rgb = self.__config(red, green, blue)
def setColor(self, rgb: 'list' = None):
if not isinstance(rgb, list):
return
if len(rgb) != 3:
return
self.__rgb = self.__config(rgb[self.__RED], rgb[self.__GREEN],
rgb[self.__BLUE])
def getRGB(self):
return self.__rgb
def toHex(self):
'''
in: [int] rgb = [255, 255, 255]
out: str rgb = #FFFFFF
'''
text = "#"
for i in self.__rgb:
q = i // 16
r = i % 16
if q <= 9:
text += str(q)
else:
text += chr(55 + q)
if r <= 9:
text += str(r)
else:
text += chr(55 + r)
return text
def toInt(self):
'''
in: [int] rgb = [255, 255, 255]
out: int rgb = 16777215
'''
num = self.__rgb[self.__RED] * 256 * 256
num += self.__rgb[self.__GREEN] * 256 + self.__rgb[self.__BLUE]
return num
def fromInt(self, num):
'''
in: int rgb = 16777215
out: [int] rgb = [255, 255, 255]
'''
red = num // (256 * 256)
num %= 256 * 256
green = num // 256
num %= 256
blue = num
self.setRGB(red, green, blue)
def __hex2int(self, text):
num = 0
text = text[::-1].upper()
for i in range(len(text)):
value = ord(text[i])
if ord('0') <= value <= ord('9'):
value = value - ord('0')
if ord('A') <= value <= ord('F'):
value = value - ord('A') + 10
num += value * 16 ** i
return num
def fromHex(self, text):
'''
in: str rgb = #FFFFFF
out: [int] rgb = [255, 255, 255]
'''
if text == "" or len(text) != 7:
print("failed to execute")
return
text = text.replace('#', "")
red = self.__hex2int(text[0:2])
green = self.__hex2int(text[2:4])
blue = self.__hex2int(text[4:6])
self.setRGB(red, green, blue)
class Color:
# Cores pré definidas
RED = [255, 0, 0]
GREEN = [0, 255, 0]
BLUE = [0, 0, 255]
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
class Img:
__channels = 3 # RGB
def __init__(self, height, width, background=Color.BLACK):
self.width = width
self.height = height
self.background = background
# Armazenamento em vetor
self.pixels = np.array(background * height * width, dtype='uint8')
def __isValid(self, x, y):
if x < 0 or x >= self.width:
return False
if y < 0 or y >= self.height:
return False
return True
def setPixel(self, x, y, color=Color.WHITE):
x = int(x)
y = int(y)
if not self.__isValid(x, y):
# Não desenhar fora dos limites
return
pos = self.__channels * (x + y * self.width)
self.pixels[pos:pos + self.__channels] = color
def getPixel(self, x, y):
if not self.__isValid(x, y):
return [None, None, None]
pos = self.__channels * (x + y * self.width)
pixel = self.pixels[pos:pos + self.__channels]
return pixel
def setBackground(self, color=Color.WHITE):
# Troca de cores do fundo
stop = self.height * self.width * self.__channels
for pos in range(0, stop, self.__channels):
value = self.pixels[pos:pos + self.__channels]
if (value == self.background).all():
self.pixels[pos:pos + self.__channels] = color
self.background = color
def line(self, x0, y0, x1, y1, color=Color.WHITE):
x = x1 - x0
y = y1 - y0
step_x = -1 * (x < 0) + (x > 0)
step_y = -1 * (y < 0) + (y > 0)
d = int(sqrt(x ** 2 + y ** 2))
for i in range(d):
self.setPixel(x0 + (i * step_x), y0 + (i * step_y), color)
def circle(self, x0, y0, radius=2, start=0, stop=360, color=Color.WHITE):
start = 0 if (start < 0 or start > 360) else start
stop = 360 if (stop < 0 or stop > 360) else stop
for angle in range(start, stop):
x = x0 + int(radius * sin(radians(angle)))
y = y0 + int(radius * cos(radians(angle)))
self.setPixel(x, y, color)
def rectangle(self, x0, y0, height=2, width=2, color=Color.WHITE,
fill=False):
if not fill:
self.line(x0, y0, x0 + width, y0, color)
self.line(x0, y0, x0, y0 + height, color)
self.line(x0 + width, y0, x0 + width, y0 + height, color)
self.line(x0, y0 + height, x0 + width, y0 + height, color)
if fill:
if not self.__isValid(x0, y0):
return
if not self.__isValid(x0 + width, y0 + height):
return
# Preencher
for y in range(y0, y0 + height):
pos = self.__channels * (x0 + y * self.width)
self.pixels[pos:pos + self.__channels * width] = color * width
def view(self):
output = self.pixels.reshape(self.height, self.width, self.__channels)
# print(output)
display = Image.fromarray(output, 'RGB')
display.show()
def export(self, filename):
output = self.pixels.reshape(self.height, self.width, self.__channels)
img = Image.fromarray(output, 'RGB')
img.save(filename)
if __name__ == '__main__':
pass
``` |
{
"source": "jpenrici/Expression_Calculator",
"score": 3
} |
#### File: src/calculator/test_expression_calculator.py
```python
from expression_calculator import ExpressionCalculator, Util
def test_ERROR():
# Mensagem de erro
assert (ExpressionCalculator.ERROR == "ERROR")
print("test_ERROR ... ok")
def test_isNullOrEmpty():
# Nulo ou vazio
assert (Util.isNullOrEmpty("") is True)
assert (Util.isNullOrEmpty(" ") is True)
assert (Util.isNullOrEmpty(None) is True)
# Preenchido com string
assert (Util.isNullOrEmpty("a") is False)
assert (Util.isNullOrEmpty("-") is False)
assert (Util.isNullOrEmpty("0") is False)
assert (Util.isNullOrEmpty('1') is False)
assert (Util.isNullOrEmpty("-1") is False)
assert (Util.isNullOrEmpty("1+1") is False)
assert (Util.isNullOrEmpty("(10+10.1)") is False)
# Preenchido por conversão
assert (Util.isNullOrEmpty(0) is False) # valor inteiro
assert (Util.isNullOrEmpty(-1) is False) # valor negativo
assert (Util.isNullOrEmpty(10.1) is False) # valor fracionado
assert (Util.isNullOrEmpty(10+10.1) is False) # cálculo
print("test_isNullOrEmpty ... ok")
def test_isNumber():
# Não numéricos
assert (Util.isNumber("") is False)
assert (Util.isNumber(" ") is False)
assert (Util.isNumber(None) is False)
assert (Util.isNumber("A") is False)
assert (Util.isNumber("-") is False)
assert (Util.isNumber("+") is False)
assert (Util.isNumber(",") is False)
assert (Util.isNumber("-,") is False)
assert (Util.isNumber("(10+10.1)") is False) # expressão
# Numéricos com erros
assert (Util.isNumber("0.1") is False) # separador decimal != ','
assert (Util.isNumber(".1") is False)
assert (Util.isNumber("1.") is False)
assert (Util.isNumber(",1") is False) # erro digitação
assert (Util.isNumber("1,") is False)
assert (Util.isNumber("-1,") is False)
assert (Util.isNumber("-,1") is False)
assert (Util.isNumber("0,,1") is False)
assert (Util.isNumber("0,0,1") is False)
assert (Util.isNumber(",,1") is False)
assert (Util.isNumber("1 ") is False)
assert (Util.isNumber(" 1") is False)
assert (Util.isNumber("1A") is False)
assert (Util.isNumber("0,A") is False)
# String numéricas
assert (Util.isNumber("0") is True)
assert (Util.isNumber("1") is True)
assert (Util.isNumber("-1") is True)
assert (Util.isNumber("+1") is True)
assert (Util.isNumber("10") is True)
assert (Util.isNumber("0,1") is True)
# Numéricos
assert (Util.isNumber(-10) is True)
assert (Util.isNumber(10.1) is True)
assert (Util.isNumber(10+10.1) is True)
print("test_isNumber ... ok")
def test_validate():
# Nulo ou vazio
assert (Util.validate("") is False)
assert (Util.validate(" ") is False)
assert (Util.validate(None) is False)
# Caracteres inválidos
assert (Util.validate("a") is False)
assert (Util.validate("(0,1+-,2/5.)") is False) # separador decimal != ','
assert (Util.validate("(01,23+-,4*5,6/7-8+9,)+A") is False)
# Parênteses sem par
assert (Util.validate("(((1+2))") is False) # '(' > ')'
assert (Util.validate("(((01,23+-,4*5,6/7-8+9,))))") is False)
# Todos caracteres válidos
assert (Util.validate("0") is True)
assert (Util.validate("-") is True)
assert (Util.validate("-1") is True)
assert (Util.validate("0+1") is True)
assert (Util.validate("+(-1)") is True)
assert (Util.validate("+ ( - 1 )") is True)
print("test_validate ... ok")
def test_prepare():
t = ExpressionCalculator()
# Soma de número negativo '+-'
assert (t.prepare("1+-1") == "1-1")
# Operador '+' junto ao '-'
assert (t.prepare("+-1") == "0-1")
# Operador '+' no início
assert (t.prepare("+1+-2") == "0+1-2")
# Operador '-' no início
assert (t.prepare("-1-1") == "0-1-1")
# Multiplicação de número negativo '*-'
assert (t.prepare("1*-1") == "1*(0-1)*1")
# Multiplicação de número positivo '*+'
assert (t.prepare("1*+1") == "1*1")
# Divisão de número negativo '*-'
assert (t.prepare("1/-1") == "1*(0-1)/1")
# Divisão de número negativo '*+'
assert (t.prepare("1/+1") == "1/1")
# Operador '-' próximo a parênteses
assert (t.prepare("-(-(-15*-10)))") == "0-(0-(0-15*(0-1)*10)))")
print("test_prepare ... ok")
def test_postFix():
t = ExpressionCalculator()
# Inválido
assert (t.postFix("") == "ERROR")
assert (t.postFix(" ") == "ERROR")
assert (t.postFix(None) == "ERROR")
assert (t.postFix("expression") == "ERROR")
# Número
assert (t.postFix(10) == "10") # valor inteiro
assert (t.postFix("0") == "0")
assert (t.postFix(-1) == "0 1 -") # valor negativo
assert (t.postFix("-1") == "0 1 -")
assert (t.postFix("-0,1") == "0 0,1 -") # valor fracionado
# Expressão
assert (t.postFix("- 1 + 1") == "0 1 - 1 +") # espaço
assert (t.postFix("-1+1") == "0 1 - 1 +")
assert (t.postFix("0+1-2") == "0 1 + 2 -")
assert (t.postFix("1+2+3") == "1 2 + 3 +")
assert (t.postFix("1,1+1,1") == "1,1 1,1 +")
assert (t.postFix("1+(1+1)") == "1 1 1 + +")
assert (t.postFix("(1+0,1)+2") == "1 0,1 + 2 +")
assert (t.postFix("(1*0,1)-15") == "1 0,1 * 15 -")
assert (t.postFix("(1/0,1)+-1") == "1 0,1 / 1 -")
assert (t.postFix("(1/0,1)*-1") == "1 0,1 / 0 1 - * 1 *")
assert (t.postFix("(1/0,1)*-1+15") == "1 0,1 / 0 1 - * 1 * 15 +")
assert (t.postFix("(1/0,1)/-5") == "1 0,1 / 0 1 - * 5 /")
assert (t.postFix("(1/0,1)/(-1+1)") == "1 0,1 / 0 1 - 1 + /")
assert (t.postFix("((1+-1,1))+-((-5+1))") == "1 1,1 - 0 5 - 1 + -")
assert (t.postFix("-(((-1)))+-(((-2+3)))") == "0 0 1 - - 0 2 - 3 + -")
assert (t.postFix("+(((-1)))+-(((-2+3)))") == "0 0 1 - + 0 2 - 3 + -")
assert (t.postFix("1,0+4,0") == "1,0 4,0 +")
assert (t.postFix("1,0+4,0+2,0+3") == "1,0 4,0 + 2,0 + 3 +")
assert (t.postFix("5,0-1,0") == "5,0 1,0 -")
assert (t.postFix("5,0-2,0-2") == "5,0 2,0 - 2 -")
assert (t.postFix("5,0*2,0") == "5,0 2,0 *")
assert (t.postFix("5,0*2,0*2") == "5,0 2,0 * 2 *")
assert (t.postFix("10,0/2,0") == "10,0 2,0 /")
assert (t.postFix("10,0/2,0/2/10") == "10,0 2,0 / 2 / 10 /")
assert (t.postFix("(10+10)-(5*2)") == "10 10 + 5 2 * -")
assert (t.postFix("(1,5+2,5+3)-(2,5+2,5)") == "1,5 2,5 + 3 + 2,5 2,5 + -")
assert (t.postFix("5*-10+20-5") == "5 0 1 - * 10 * 20 + 5 -")
assert (t.postFix("2000+1/2") == "2000 1 2 / +")
assert (t.postFix("2+2+4*5+1/1000") == "2 2 + 4 5 * + 1 1000 / +")
print("test_postFix ... ok")
def test_resolve():
t = ExpressionCalculator()
# Inválido
assert (t.resolve("") == "ERROR")
assert (t.resolve(" ") == "ERROR")
assert (t.resolve(None) == "ERROR")
assert (t.resolve("A") == "ERROR")
assert (t.resolve(",1") == "ERROR")
assert (t.resolve("(1 + 2)") == "ERROR") # input: infix, esperado: posfix
# Número
assert (t.resolve("0") == "0")
assert (t.resolve(" 0") == "0")
assert (t.resolve("1,1") == "1,1")
# Expressão
assert (t.resolve("10,0 2,0 / 2 / 10 /") == "0.25")
assert (t.resolve("10 10 + 5 2 * -") == "10.0")
assert (t.resolve("1,5 2,5 + 3 + 2,5 2,5 + -") == "2.0")
assert (t.resolve("0 1 -") == "-1.0")
assert (t.resolve("0 1 - 1 +") == "0.0")
assert (t.resolve("0 1 + 2 -") == "-1.0")
assert (t.resolve("1 2 + 3 +") == "6.0")
assert (t.resolve("1,1 1,1 +") == "2.2")
assert (t.resolve("1 0,1 + 2 +") == "3.1")
assert (t.resolve("1 0,1 * 15 -") == "-14.9")
assert (t.resolve("1 0,1 / 1 -") == "9.0")
assert (t.resolve("1 0,1 / 0 1 - * 1 *") == "-10.0")
assert (t.resolve("1 0,1 / 0 1 - * 1 * 15 +") == "5.0")
assert (t.resolve("1 0,1 / 0 1 - * 5 /") == "-2.0")
assert (t.resolve("1 0,1 / 0 1 - 1 + /") == "ERROR") # Divisão por zero
assert (t.resolve("1 1,1 - 0 5 - 1 + -") == "3.9")
assert (t.resolve("0 0 1 - - 0 2 - 3 + -") == "0.0")
assert (t.resolve("0 0 1 - + 0 2 - 3 + -") == "-2.0")
assert (t.resolve("5 0 1 - * 10 * 20 + 5 -") == "-35.0")
assert (t.resolve("2000 1 2 / +") == "2000.5")
assert (t.resolve("2 2 + 4 5 * + 1 1000 / +") == "24.001")
assert (t.resolve("1,5 2,5 + 3 + 2,5 2,5 + -") == "2.0")
assert (t.resolve("100 200 + 2 / 5 * 7 +") == "757.0")
assert (t.resolve("2 3 1 * + 9 -") == "-4.0")
assert (t.resolve("10 2 8 * + 3 -") == "23.0")
print("test_resolve ... ok")
def test_calc():
t = ExpressionCalculator()
assert (t.calc("0") == 0)
assert (t.calc("1") == 1)
assert (t.calc("-1") == -1)
assert (t.calc("-1+1") == 0)
assert (t.calc("0+1-2") == -1)
assert (t.calc("1+2+3") == 6)
assert (t.calc("1,1+1,1") == 2.2)
assert (t.calc("(1+0,1)+2") == 3.1)
assert (t.calc("(1*0,1)-15") == -14.9)
assert (t.calc("(1/0,1)+-1") == 9.0)
assert (t.calc("(1/0,1)*-1") == -10.0)
assert (t.calc("(1/0,1)/-1") == -10.0)
assert (t.calc("(1/0,1)/(-1+1)") is None)
assert (t.calc("((1+-1,1))+-((-5+1))") == 3.9)
assert (t.calc("-(((-1)))+-(((-2+3)))") == 0)
assert (t.calc("5*-10+20-5") == -35)
assert (t.calc("2000+1/2") == 2000.5)
assert (t.calc("2+2+4*5+1/1000") == 24.001)
assert (t.calc("((10 + (2 * 8)) - 3)") == 23.0)
print("test_calc ... ok")
def test_extra():
# Outro delimitador
t = ExpressionCalculator()
postfix = t.postFix(infix="1+2*5,0+-1", delim="|")
assert (float(t.resolve(postfix=postfix, delim="|")) == 10)
print("test_extra ... ok")
if __name__ == '__main__':
# Principal
test_ERROR()
test_isNullOrEmpty()
test_isNumber()
test_validate()
test_prepare()
test_postFix()
test_resolve()
test_calc()
# Extra
test_extra()
``` |
{
"source": "jpenrici/Extensions_Inkscape",
"score": 3
} |
#### File: Extensions_Inkscape/Color-RGB-Random/color-rgb-random.py
```python
import random
import inkex
import simplestyle
class RandomRGB(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("", "--randomizeRed",
action="store",
type="inkbool",
dest="randomizeRed",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeGreen",
action="store",
type="inkbool",
dest="randomizeGreen",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeBlue",
action="store",
type="inkbool",
dest="randomizeBlue",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeFill",
action="store",
type="inkbool",
dest="randomizeFill",
default=True,
help="")
self.OptionParser.add_option("", "--randomizeStroke",
action="store",
type="inkbool",
dest="randomizeStroke",
default=False,
help="")
self.OptionParser.add_option("", "--keepColors",
action="store",
type="inkbool",
dest="keepColors",
default=False,
help="")
def effect(self):
for id, node in self.selected.iteritems():
try:
style = simplestyle.parseStyle(node.get('style'))
except:
inkex.errormsg(_("No style attribute found for id: %s") % id)
continue
if (self.options.randomizeFill == False and
self.options.randomizeStroke == False):
break
if (self.options.keepColors):
fill_red = style['fill'][1:3]
fill_green = style['fill'][3:5]
fill_blue = style['fill'][5:7]
stroke_red = style['stroke'][1:3]
stroke_green = style['stroke'][3:5]
stroke_blue = style['stroke'][5:7]
else:
fill_red = "00"
fill_green = "00"
fill_blue = "00"
stroke_red = "00"
stroke_green = "00"
stroke_blue = "00"
if (self.options.randomizeFill):
if (self.options.randomizeRed):
fill_red = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeGreen):
fill_green = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeBlue):
fill_blue = "%02x" % random.randint(0, 0xFF)
fill = "#%s%s%s" % (fill_red, fill_green, fill_blue)
style['fill'] = fill
node.set('style', simplestyle.formatStyle(style))
if (self.options.randomizeStroke):
if (self.options.randomizeRed):
stroke_red = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeGreen):
stroke_green = "%02x" % random.randint(0, 0xFF)
if (self.options.randomizeBlue):
stroke_blue = "%02x" % random.randint(0, 0xFF)
stroke = "#%s%s%s)" % (stroke_red, stroke_green, stroke_blue)
style['stroke'] = stroke
node.set('style', simplestyle.formatStyle(style))
if __name__ == '__main__':
e = RandomRGB()
e.affect()
``` |
{
"source": "jpenrici/Gimp_experiences",
"score": 3
} |
#### File: GIMP_Plugin_Py/GIMP_Colors_Experience/gimp_plugin_colorsExperience.py
```python
import os
import sys
import datetime
from array import array
# Constantes e símbolos gimp, pdb, register e a função main
from gimpfu import *
# Descrição
LABEL = "Colors experience"
INFO = "Example of visualization of grouped colors."
HELP = globals()["__doc__"]
# Local
HOME = os.environ['HOME']
FULL_PATH = os.path.realpath(__file__)
PATH, FILENAME = os.path.split(FULL_PATH)
ENV = PATH + "/pyenv/lib/python2.7"
# Log
now = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
log = "\nGIMP: " + now
log += "\nPlug-in: " + LABEL + '\n'
logError = "Unexpected error: "
# Virtualenv
if not os.path.isdir(ENV):
logError += "pyenv/lib/python2.7 ... not found\n"
else:
sys.path.extend([ENV, ENV + "/site-packages",
ENV + "/site-packages/setuptools"])
# Dependências
dependencies = True
try:
import numpy as np
log += "numpy " + np.__version__ + " ... ok\n"
except ImportError as err:
logError += str(err) + " not found\n"
dependencies = False
if not dependencies:
log += logError
# print(log)
def message(msg):
pdb.gimp_message(msg)
def pxRgnToArray(layer):
rgn = layer.get_pixel_rgn(0, 0, layer.width, layer.height, False, False)
# print(rgn) # <gimp.PixelRgn for drawable 'layer'>
values = array("B", rgn[0:layer.width, 0:layer.height]) # uchar -> int
return values # vetor
def createNewLayer(img, name, npArray):
newLayer = gimp.Layer(img, name, img.width, img.height,
img.active_layer.type, 100, NORMAL_MODE)
rgn = newLayer.get_pixel_rgn(0, 0, newLayer.width, newLayer.height, True)
rgn[:, :] = np.uint8(npArray).tobytes() # gimp.PixelRgn
img.add_layer(newLayer, lastLayer(img))
gimp.displays_flush()
def lastLayer(img):
pos = 0
for i in range(len(img.layers)):
if(img.layers[i] == img):
pos = i
return pos
def exportTxt(filename, text):
filename = filename.replace("-", "")
filename = filename.replace(":", "")
filename = filename.replace(" ", "_")
try:
filename = open(filename, "w")
filename.write(text)
filename.close()
except Exception:
pass
def saveLog(text):
filename = "LogGimpPlugin_" + now + ".txt"
exportTxt(filename, text)
def imageType(channels):
if channels == 1:
return "Level (Gray)"
if channels == 2:
return "LA (Gray, Alpha)"
if channels == 3:
return "RGB (Red, Green, Blue)"
if channels == 4:
return "RGBA (Red, Green, Blue, Alpha)"
return None
def colorsExperience(img, layer):
global log
# Checar dependências
if not dependencies:
message(LABEL + ", error: missing dependencies ...")
saveLog(log)
return
inform = "Processing " + img.name + " ..."
gimp.progress_init(inform)
log += inform + '\n'
try:
# Coleta das informações
height = layer.height
width = layer.width
channels = layer.bpp
img_type = imageType(channels)
if img_type is None:
message("Plugin not prepared for this analysis!")
return
log += layer.name + ": " + img_type + '\n'
# Contagem de tempo para imagem de dimensões altas
start = datetime.datetime.now()
# Converter Imagem em Array
img_copy = pxRgnToArray(layer)
log += layer.name + " to Array ...\n"
pdb.gimp_progress_pulse()
# Preparar, remodelar Array
img_temp = [i for i in img_copy] # copiar inteiros
img_temp = [img_temp[i:i+channels] for i in range(0, height * width
* channels, channels)] # separar pixels novamente
log += layer.name + " initial preparation ...\n"
pdb.gimp_progress_pulse()
# Ordenação crescente
for c in range(0, channels):
# Ordenar do último canal para o primeiro
i = channels - c - 1
img_temp.sort(key=lambda value: value[i])
log += layer.name + " sorting by channel " + str(i) + " ...\n"
pdb.gimp_progress_pulse() # visualizar execução, processo lento
# Converter em Numpy Array
npArray = np.array(img_temp, dtype=np.uint8)
log += layer.name + " to Numpy Array ...\n"
pdb.gimp_progress_pulse()
npArray = npArray.flatten() # abrir
npArray = npArray.reshape(height, width, channels) # remodelar
log += layer.name + " reshape ...\n"
pdb.gimp_progress_pulse()
end = datetime.datetime.now()
log += "time: " + str((end - start).seconds) + " seconds ...\n"
# Criar camada com resultado
name = layer.name + " ordered"
createNewLayer(img, name, npArray)
log += img.name + " create layer " + name + " ...\n"
pdb.gimp_selection_none(img)
except Exception as err:
log += "[Error - Gimp Plugin: " + FILENAME + "]: " + str(err) + '\n'
gimp.message(LABEL + " failed.")
saveLog(log)
pdb.gimp_progress_end()
print(log) # Log no console Linux
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
# Registro do plug-in
register(
"colorsExperience", # nome da função
N_(INFO), # sobre o plug-in
HELP, # docstring como Help
"jpenrici", # autor
"GPL V2 License", # licença
"2020", # data de criação (ano)
N_(LABEL), # rótulo do plugin no menu
"RGB*, GRAY*", # tipos de imagens suportados
[ # parâmetros de entrada do método
(PF_IMAGE, "img", _("_Image"), None),
(PF_DRAWABLE, "drw", _("_Drawable"), None),
],
[], # parâmetros de saída do método
colorsExperience, # nome de chamada do método
menu="<Image>/Image", # caminho no menu
domain=("gimp20-python", gimp.locale_directory)
# on_query=None,
# on_run=None
)
# Função princiapl, chamada após o registro do plug-in
main()
```
#### File: GIMP_Plugin_Py/GIMP_Face_Detection/gimp_plugin_faceDetection.py
```python
import os
import sys
import datetime
from array import array
# Constantes e símbolos gimp, pdb, register e a função main
from gimpfu import *
# Descrição
LABEL = "Face Detection"
INFO = "Detect face in image."
HELP = globals()["__doc__"]
# Local
FULL_PATH = os.path.realpath(__file__)
PATH, FILENAME = os.path.split(FULL_PATH)
ENV = PATH + "/pyenv/lib/python2.7"
HAAR_MODEL = "haarcascade_frontalface_default.xml"
# Log
now = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
log = "\nGIMP: " + now
log += "\nPlug-in: " + LABEL + '\n'
logError = "Unexpected error: "
# Virtualenv
if not os.path.isdir(ENV):
logError += "pyenv/lib/python2.7 ... not found\n"
else:
sys.path.extend([ENV, ENV + "/site-packages",
ENV + "/site-packages/setuptools"])
# Dependências
cascade_path = ""
dependencies = True
try:
import numpy as np
log += "numpy " + np.__version__ + " ... ok\n"
except ImportError as err:
logError += str(err) + " not found\n"
dependencies = False
try:
import cv2 as cv
log += "opencv " + cv.__version__ + " ... ok\n"
cascade_path = os.path.join(os.path.dirname(os.path.abspath(cv.__file__)),
"data/" + HAAR_MODEL)
except ImportError as err:
logError += str(err) + " not found\n"
dependencies = False
if not os.path.isfile(cascade_path):
logError += cascade_path + " not found\n"
cascade_path = ENV + "/site-packages/cv2/data/" + HAAR_MODEL
elif not os.path.isfile(cascade_path):
logError += cascade_path + " not found\n"
cascade_path = PATH + "/" + HAAR_MODEL
elif not os.path.isfile(cascade_path):
logError += cascade_path + " not found\n"
dependencies = False
if os.path.isfile(cascade_path):
log += cascade_path + " ... ok\n"
if (not dependencies):
log += logError
# print(log)
def message(msg):
pdb.gimp_message(msg)
def pxRgnToNumpy(layer):
rgn = layer.get_pixel_rgn(0, 0, layer.width, layer.height, False, False)
values = array("B", rgn[0:layer.width, 0:layer.height]) # uchar -> int
npArray = np.array(values, dtype=np.uint8)
return npArray.reshape(layer.height, layer.width, layer.bpp)
def createNewLayer(img, name, npArray):
newLayer = gimp.Layer(img, name, img.width, img.height,
img.active_layer.type, 100, NORMAL_MODE)
rgn = newLayer.get_pixel_rgn(0, 0, newLayer.width, newLayer.height, True)
rgn[:, :] = np.uint8(npArray).tobytes() # gimp.PixelRgn
img.add_layer(newLayer, lastLayer(img))
gimp.displays_flush()
def lastLayer(img):
pos = 0
for i in range(len(img.layers)):
if(img.layers[i] == img):
pos = i
return pos
def exportTxt(filename, text):
filename = filename.replace("-", "")
filename = filename.replace(":", "")
filename = filename.replace(" ", "_")
try:
filename = open(filename, "w")
filename.write(text)
filename.close()
except Exception:
pass
def saveLog(text):
filename = "LogGimpPlugin_" + now + ".txt"
exportTxt(filename, text)
def imageType(channels):
if channels == 3:
return "RGB (Red, Green, Blue)"
if channels == 4:
return "RGBA (Red, Green, Blue, Alpha)"
return None
def faceDetection(img, layer, option):
global log
# Checar dependências
if (not dependencies):
message(LABEL + ", error: missing dependencies ...")
saveLog(log)
return
inform = "Processing " + img.name + " ..."
gimp.progress_init(inform)
pdb.gimp_image_undo_group_start(img)
log += inform + '\n'
log += "Face " + option + " ...\n"
try:
# Converter Imagem em Numpy Array
img_copy = pxRgnToNumpy(layer)
log += layer.name + " to Numpy Array ...\n"
height, width, channels = img_copy.shape
img_type = imageType(channels)
if img_type is None:
message("Plugin not prepared for this analysis!")
return
log += layer.name + ": " + img_type + '\n'
# OpenCV
img_gray = cv.cvtColor(img_copy, cv.COLOR_RGB2GRAY)
log += layer.name + " to OpenCV ...\n"
log += "image RGB to GRAY ...\n"
# Detecção
clf = cv.CascadeClassifier(cascade_path)
faces = clf.detectMultiScale(img_gray, 1.3, 5)
hits = len(faces)
log += str(hits) + " faces detected ...\n"
if hits > 0 and option == "detection":
# Cor do retângulo
color = [0 for i in range(channels)]
if channels % 2 == 0:
color[-1] = 255 # alterar Alpha em [R,G,B,A]
# Marcar retângulo
for (x, y, w, h) in faces:
img_copy = cv.rectangle(img_copy, (x, y), (x+w, y+h),
tuple(color), 2)
name = layer.name + " faces"
createNewLayer(img, name, img_copy)
log += img.name + ": create layer " + name + " ...\n"
pdb.gimp_selection_none(img)
# exportação via OpenCV (opcional - teste)
# img_out = cv.cvtColor(img_copy, cv.COLOR_BGR2RGB)
# cv.imwrite("temp_" + layer.name, img_out)
elif hits > 0 and option == "selection":
pdb.gimp_selection_none(img)
for (x, y, w, h) in faces:
pdb.gimp_image_select_ellipse(img, 0, x, y, w, h)
# pdb.gimp_image_select_rectangle(img, 0, x, y , w, h)
else:
message("Detection not possible.")
pdb.gimp_image_undo_group_end(img)
gimp.displays_flush()
log += inform + " ok\n"
except Exception as err:
log += "[Error - Gimp Plugin: " + FILENAME + "]: " + str(err) + '\n'
gimp.message(LABEL + " failed.")
pdb.gimp_progress_end()
print(log) # Log no console Linux
saveLog(log) # Opcional
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
# Registro do plug-in
register(
"faceDetection", # nome da função
N_(INFO), # sobre o plug-in
HELP, # docstring como Help
"jpenrici", # autor
"GPL V2 License", # licença
"2020", # data de criação (ano)
N_(LABEL), # rótulo do plugin no menu
"RGB*", # tipos de imagens suportados
[ # parâmetros de entrada do método
(PF_IMAGE, "img", _("_Image"), None),
(PF_DRAWABLE, "drw", _("_Drawable"), None),
(PF_RADIO, "option", "", "detection",
(("Only mark with a rectangular border.", "detection"),
("Only select for later copy and paste.", "selection"))),
],
[], # parâmetros de saída do método
faceDetection, # nome de chamada do método
menu="<Image>/Image", # caminho no menu
domain=("gimp20-python", gimp.locale_directory)
# on_query=None,
# on_run=None
)
# Função princiapl, chamada após o registro do plug-in
main()
``` |
{
"source": "jpenrici/Maze_Tkinter",
"score": 4
} |
#### File: Maze_Tkinter/src/maze_matrix.py
```python
from random import randint, choice
class Maze():
__limit = 10 # minimum
def __init__(self, rows, cols, model="basic"):
self.size = (max(rows, self.__limit), max(cols, self.__limit))
self.matrix = [] # values: 0 = wall, 1 = free, 2 = route
self.route = [] # coordinates in matrix
if model == "spiral":
self.model = model
self.spiral()
else:
# standard model
if rows % 2 == 0:
rows += 1
self.size = (max(rows, self.__limit), max(cols, self.__limit))
self.model = "basic"
self.basic()
def basic(self):
rows, cols = self.size
begin, end = 2, cols - 3
x = randint(begin, end)
for y in range(rows):
if y % 2 == 0:
self.matrix += [int(i == x) for i in range(cols)]
self.matrix[y * cols + x] = 2
self.route += [(x, y)]
else:
self.matrix += [int(i % (cols - 1) != 0) for i in range(cols)]
if choice([True, False]):
self.matrix[y * cols + x + 1] = 0
x = randint(begin, x)
else:
self.matrix[y * cols + x - 1] = 0
x = randint(x, end)
def spiral(self):
rows, cols = self.size
self.matrix = [0 for i in range(rows * cols)]
step = 0
x, x0, x1 = 1, 1, cols - 2
y, y0, y1 = 1, 3, rows - 2
self.matrix[cols] = 2
self.route = [(0, 1)]
while True:
if x == y:
self.matrix[y * cols + x] = 2
self.route += [(x, y)]
else:
self.matrix[y * cols + x] = 1
if step % 4 == 0:
if x == x1:
x1 -= 2
step += 1
else:
x += 1
if step % 4 == 1:
if y == y1:
y1 -= 2
step += 1
else:
y += 1
if step % 4 == 2:
if x == x0:
x0 += 2
step += 1
else:
x -= 1
if step % 4 == 3:
if y == y0:
y0 += 2
step += 1
else:
y -= 1
if x0 > x1 or y0 > y1:
break
self.matrix[y * cols + x] = 2
self.route += [(x, y)]
def reverse(self):
matrix = []
rows, cols = self.size
for x in range(cols):
for y in range(rows):
matrix += [self.matrix[y * cols + x]]
self.matrix = matrix[::]
self.size = (cols, rows)
self.route = [i[::-1] for i in self.route]
def show(self):
symbol = ['#', '.', '@'] # [0 = wall, 1 = free, 2 = route]
rows, cols = self.size
out = ""
for i in range(len(self.matrix)):
if i % cols == 0:
out += "\n"
out += symbol[self.matrix[i]]
print("{}\n".format(out))
def __str__(self):
rows, cols = self.size
out = "Maze {} ({},{}) : Input {} Output {}".format(self.model,
rows, cols, self.route[0], self.route[-1])
return out
if __name__ == '__main__':
# Basic
maze = Maze(10, 10)
print(maze)
maze.reverse()
print(maze)
maze.reverse()
print(maze)
maze.show()
# Spiral
maze = Maze(10, 10, "spiral")
print(maze)
maze.reverse()
print(maze)
maze.show()
``` |
{
"source": "j-penson/gym-class-booker",
"score": 3
} |
#### File: gym-class-booker/app/api.py
```python
from flask import Flask
from flask_restx import Api, fields
from app import log_setup
app = Flask(__name__)
api = Api(app,
version='1.0',
title='Gym Booker API',
description='An API to log on to the gym website and book classes',
)
ns = api.namespace('api', description='Gym Class Booking')
gym_class = api.model('gym_class', {
'headless': fields.Boolean(default=False, description='Run Chrome in headless mode'),
'user': fields.String(required=True, description='Run Chrome in headless mode'),
'class_name': fields.String(required=True, description='Target gym class to book'),
'class_datetime': fields.DateTime(required=True, description='Target gym class date and time'),
})
def create_app():
"""Create the Flask application."""
log_setup.setup()
return app
```
#### File: gym-class-booker/app/read_secrets.py
```python
from google.cloud import secretmanager_v1beta1 as secretmanager
import ast
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
def get_creds(secret_id):
"""Read a secret from GCP secret manager"""
# Build the resource name of the secret version.
name = client.secret_version_path('607688776346', secret_id, '1')
# Access the secret version and return dictionary
response = client.access_secret_version(name)
payload = response.payload.data.decode('UTF-8')
return ast.literal_eval(payload)
```
#### File: gym-class-booker/app/website_utils.py
```python
import time
from urllib import parse
def sleep(time_secs=5):
time.sleep(time_secs)
def compare_url_paths(actual, expected):
def get_path(url):
url = parse.urlparse(url)
return url.path.replace('/', '')
actual_path = get_path(actual)
expected_path = get_path(expected)
return True if actual_path == expected_path else False
``` |
{
"source": "JPenuchot/EasyClangComplete",
"score": 3
} |
#### File: EasyClangComplete/tests/test_flag.py
```python
import imp
from unittest import TestCase
from EasyClangComplete.plugin.utils import flag
imp.reload(flag)
Flag = flag.Flag
class TestFlag(TestCase):
"""Test getting flags from CMakeLists.txt."""
def test_init(self):
"""Initialization test."""
flag = Flag("hello")
self.assertEqual(flag.as_list(), ["hello"])
self.assertEqual(flag.prefix, "")
self.assertEqual(flag.body, "hello")
self.assertEqual(str(flag), "hello")
flag = Flag("hello", "world")
self.assertEqual(flag.as_list(), ["hello", "world"])
self.assertEqual(flag.prefix, "hello")
self.assertEqual(flag.body, "world")
self.assertEqual(str(flag), "hello world")
def test_hash(self):
"""Test that hash is always the same when needed."""
flag1 = Flag("hello", "world")
flag2 = Flag("hello", "world")
flag3 = Flag("world", "hello")
self.assertEqual(hash(flag1), hash(flag2))
self.assertNotEqual(hash(flag1), hash(flag3))
def test_put_into_container(self):
"""Test adding to hashed container."""
flags_set = set()
flag1 = Flag("hello")
flag2 = Flag("world")
flag3 = Flag("hello", "world")
flag4 = Flag("world", "hello")
flags_set.add(flag1)
flags_set.add(flag2)
flags_set.add(flag3)
self.assertIn(flag1, flags_set)
self.assertIn(flag2, flags_set)
self.assertIn(flag3, flags_set)
self.assertNotIn(flag4, flags_set)
def test_tokenize(self):
"""Test tokenizing a list of all split flags."""
split_str = ["-I", "hello", "-Iblah", "-isystem", "world"]
list_of_flags = Flag.tokenize_list(split_str)
self.assertTrue(len(list_of_flags), 3)
self.assertIn(Flag("-I", "hello"), list_of_flags)
self.assertIn(Flag("-Iblah"), list_of_flags)
self.assertIn(Flag("-isystem", "world"), list_of_flags)
``` |
{
"source": "jpeone/lambdata-pt4",
"score": 2
} |
#### File: lambdata-pt4/test/another_my_mod_test.py
```python
from lambdata.my_mod import Helper
helper = Helper()
def test_enlarge():
assert helper.enlarge(5) == 500
```
#### File: lambdata-pt4/test/assignment_test.py
```python
import unittest
from lambdata.assignment import CustomFrame
class TestCustomFrame(unittest.TestCase):
def test_add_state_names(self):
custom_df = CustomFrame({'abbrev': ['ca', 'ct', 'co', 'tx', 'dc']})
self.assertTrue('name' not in list(custom_df.columns))
custom_df.add_state_name()
self.assertTrue('name' in list(custom_df.columns))
self.assertEqual(custom_df['name'][0], 'California')
self.assertEqual(custom_df['abbrev'][0], 'ca')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpeoples/jssg",
"score": 2
} |
#### File: jssg/jssg/jinja_utils.py
```python
from .execution_rule import ExecutionRule
class _RendererImpl:
def __init__(self, name=None, load_file=None, create_state=None, obj=None):
self._name = name
self._load_file = load_file
self._create_state = create_state
self._obj = obj
def _get_load(self):
if self._load_file: return self._load_file
if self._obj and hasattr(self._obj, 'load_file'):
return self._obj.load_file
def _default_load(jf, fs, inf, outf, ctx):
s = fs.read(inf)
t = jf.env.from_string(s)
return t
return _default_load
def _get_create_state(self):
if self._create_state: return self._create_state
if self._obj and hasattr(self._obj, 'create_state'):
return self._obj.create_state
def _default_create_state(jf, fs, inf, outf, ctx):
if self._name is None: return None
return dict(type=self._name, context=ctx)
return _default_create_state
def load_file(self, jf, fs, inf, outf, ctx):
return self._get_load()(jf, fs, inf, outf, ctx)
def create_state(self, jf, fs, inf, outf, ctx):
return self._get_create_state()(jf, fs, inf, outf, ctx)
class _JinjaRenderer(ExecutionRule):
def __init__(self, jf, impl):
self.jf = jf
self.impl = impl
def __call__(self, fs, inf, outf):
ctx = self.jf.immediate_context(fs, inf, outf)
template = self.impl.load_file(self.jf, fs, inf, outf, ctx)
state = self.impl.create_state(self.jf, fs, inf, outf, ctx)
execution = lambda state: fs.write(outf,
template.render(ctx, user_context=state))
return execution, state
class JinjaFile(ExecutionRule):
def __init__(self, env, ctx, hooks=None):
self.env = env
self.ctx = ctx
self.hooks = hooks if hooks is not None else []
def render_markdown_string(self, s, ctx):
# TODO: Something better than looking up in dict?
md = self.env.filters['markdown']
return md(self.env.from_string(s).render(ctx))
def immediate_context(self, fs, inf, outf):
ctx = self.ctx.copy()
if len(self.hooks) > 0:
for hook in self.hooks:
ctx.update(hook(self.ctx, inf, outf))
return ctx
def renderer(self, *, name=None, obj=None, load_file=None, create_state=None):
if load_file is not None or create_state is not None:
assert obj is None
obj = _RendererImpl(name, load_file, create_state, obj)
return _JinjaRenderer(self, obj)
def __call__(self, fs, inf, outf):
return self.renderer()(fs, inf, outf)
# Code for configuring libs, like the jinja_environment
def jinja_env(search_paths=(), prefix_paths=(), additional_loaders=None, filters=None, support_rss=True, rss_name="builtin/rss_base.xml"):
"""Initialize a jinja env that searches all load_paths for templates.
builtin templates can also be found under builtin/
search_paths are paths to search for template files. These paths will be
searched under, in order, when a template load (e.g. extends) is found.
prefix_paths will be searched, but must include the directory as a prefix.
For example if 'layouts' is in prefix_paths, and contains template 'a', then
to be found, you must use the name 'layouts/a'. To use a prefix other than the
full directory path, use a tuple (path, prefix). Continuing the example,
if ('layouts', 'x') is is prefix_paths, then the template is found via 'x/a'.
"""
import jinja2
def normalize_prefix_paths(p):
for tup in p:
try:
path, prefix = tup
yield (path, prefix)
except ValueError:
# Cannot be unpacked!
assert isinstance(tup, str)
yield (tup, tup)
if filters is None:
filters = {}
if additional_loaders is None:
additional_loaders = []
user_prefix_loader = jinja2.PrefixLoader(
{prefix: jinja2.FileSystemLoader(path)
for path, prefix in normalize_prefix_paths(prefix_paths)})
user_loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(path) for path in search_paths])
if support_rss:
additional_loaders.append(rss_loader(rss_name))
filters['rss_format_date'] = rss_date
if additional_loaders:
additional_loader = jinja2.ChoiceLoader(additional_loaders)
loader = jinja2.ChoiceLoader([user_loader, user_prefix_loader]+additional_loaders)
jinja_env = jinja2.Environment(loader=loader, undefined=jinja2.StrictUndefined)
if filters:
jinja_env.filters.update(filters)
return jinja_env
def markdown_filter(extensions=None, extension_configs=None):
import markdown
mdfilter = lambda x: markdown.markdown(x, extensions=extensions, extension_configs=extension_configs)
return mdfilter
_rss_base_src = """
<?xml version="1.0" encoding="UTF-8" ?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
<channel>
<title>{{rss_title}}</title>
<link>{{rss_home_page}}</link>
<atom:link href="{{rss_link}}" rel="self" type="application/rss+xml" />
<description>{{rss_description}}</description>
{% for page in pages %}
<item>
{%if page.title is defined and page.title is not none %}<title>{{page.title | striptags | e}}</title>{%endif%}
<link>{{page.fullhref}}</link>
<guid isPermaLink="true">{{page.fullhref}}</guid>
<pubDate>{{page.date | rss_format_date}}</pubDate>
<description>{{page.content | e}}</description>
</item>
{% endfor %}
</channel>
</rss>
""".strip()
def rss_date(x):
"""Format a datestr into a format acceptable for RSS"""
import dateutil.parser
import email.utils
if isinstance(x, str):
x = dateutil.parser.parse(x)
return email.utils.format_datetime(x)
def rss_loader(name='builtin/rss_base.xml'):
import jinja2
return jinja2.DictLoader({name: _rss_base_src})
def date_formatter(format_str='%B %d, %Y'):
return lambda x: format_date(x, format_str)
def format_date(x, format_str):
"""Format a datestr with a given format string"""
import dateutil.parser
if isinstance(x, str):
x = dateutil.parser.parse(x)
return x.strftime(format_str)
``` |
{
"source": "jperaltar/2015-sat-pfinal",
"score": 3
} |
#### File: 2015-sat-pfinal/cultural/xmlparser.py
```python
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
import sys
"""
Make Html a list and store
"""
class myContentHandler(ContentHandler):
def __init__ (self):
self.inItem = False
self.inContent = False
self.Content = ""
self.Html = []
self.Activity = {}
self.inTitle = False
self.inType = False
self.inPrice = False
self.inDate = False
self.inEndDate = False
self.inHour = False
self.inDuration = False
self.inUrl = False
def startElement (self, name, attrs):
if name == 'contenido':
self.inItem = True
elif self.inItem:
if name == 'atributo':
self.inContent = True
nombre = attrs.getValue(u'nombre')
if nombre == 'TITULO':
self.inTitle = True
elif nombre == 'TIPO':
self.inType = True
elif nombre == 'PRECIO':
self.inPrice = True
elif nombre == 'FECHA-EVENTO':
self.inDate = True
elif nombre == 'FECHA-FIN-EVENTO':
self.inEndDate = True
elif nombre == 'HORA-EVENTO':
self.inHour = True
elif nombre == 'EVENTO-LARGA-DURACION':
self.inDuration = True
elif nombre == 'CONTENT-URL':
self.inUrl = True
def endElement (self, name):
if name == 'contenido':
self.inItem = False
elif self.inItem:
if name == 'atributo':
if self.inTitle:
self.Activity['title'] = self.Content
self.inTitle = False
elif self.inPrice:
self.Activity['precio'] = self.Content
self.inPrice = False
elif self.inDate:
self.Activity['fecha'] = self.Content
self.inDate = False
elif self.inEndDate:
self.Activity['final'] = self.Content
self.inEndDate = False
elif self.inHour:
self.Activity['hora'] = self.Content
self.inHour = False
elif self.inDuration:
self.Activity['duracion'] = self.Content
self.inDuration = False
elif self.inUrl:
self.Activity['url'] = self.Content
self.inUrl = False
elif self.inType:
self.Activity['tipo'] = self.Content
self.inType = False
self.Html.append(self.Activity)
self.Activity = {}
# To avoid Unicode trouble
self.inContent = False
self.Content = ""
def characters (self, chars):
if self.inContent:
self.Content = self.Content + chars
# --- Main prog
def getNews():
# Load parser and driver
theParser = make_parser()
theHandler = myContentHandler()
theParser.setContentHandler(theHandler)
# Ready, set, go!
theParser.parse("http://datos.madrid.es/portal/site/egob/" +
"menuitem.ac61933d6ee3c31cae77ae7784f1a5a0/" +
"?vgnextoid=00149033f2201410VgnVCM100000171f5" +
"a0aRCRD&format=xml&file=0&filename=206974-0-" +
"agenda-eventos-culturales-100&mgmtid=6c0b6d01" +
"df986410VgnVCM2000000c205a0aRCRD")
return theHandler.Html
``` |
{
"source": "j-peralva/AdventOfCode",
"score": 3
} |
#### File: day01_part2/model/floor.py
```python
class Floor:
counter = 0
first_time_basement = True
def __init__(self):
self.__floor = 0
self.__position = None
def move_up(self):
self.__floor += 1
self.counter += 1
def move_down(self):
self.__floor -= 1
self.counter += 1
if self.__floor < 0 and self.first_time_basement:
self.__position = self.counter
self.first_time_basement = False
@property
def floor(self) -> int:
return self.__floor
@property
def position(self) -> int:
return self.__position
```
#### File: day02_part1/model/WrappingPaper.py
```python
from typing import Generator
class WrappingPaper:
def __new__(cls, gen: Generator) -> int:
return cls.__calculate_area(gen)
@classmethod
def __calculate_area(cls, gen: Generator) -> int:
area = 0
for values in gen:
aux = (values[0] * values[1], values[0] * values[2], values[1] * values[2])
area += 2 * (aux[0] + aux[1] + aux[2]) + min(aux)
return area
```
#### File: 2015/day02_part2/main.py
```python
from model.WrappingPaper import WrappingPaper
from util.read_file import ReadFile
class Main:
@classmethod
def run(cls):
data = ReadFile('data/day02.dat')
print(WrappingPaper(data)[1])
if __name__ == '__main__':
Main.run()
```
#### File: day03_part2/model/agent.py
```python
try:
from model.grid import Grid
except ImportError:
from grid import Grid
class Agent:
__grid = Grid() # Every Agent shares the same Grid
def __init__(self):
coordinates = self.__grid.offset_adjustment[0], self.__grid.offset_adjustment[1]
self.__cur_position = coordinates # Every new agent starts at (0, 0) position of Grid
self.__grid.append(self)
def move_north(self):
line = self.__cur_position[0] - 1
column = self.__cur_position[1]
self.__cur_position = self.__grid.recalculate_grid(line, column)
def move_south(self):
line = self.__cur_position[0] + 1
column = self.__cur_position[1]
self.__cur_position = self.__grid.recalculate_grid(line, column)
def move_east(self):
line = self.__cur_position[0]
column = self.__cur_position[1] + 1
self.__cur_position = self.__grid.recalculate_grid(line, column)
def move_west(self):
line = self.__cur_position[0]
column = self.__cur_position[1] - 1
self.__cur_position = self.__grid.recalculate_grid(line, column)
def show_results(self):
print(f"Grid dimensions is: {self.__grid.grid_dimensions()}")
print(f"Number of cells of grid is: {len(self.__grid)}")
print(f"Total gifts delivered: {self.__grid.total_number_of_gifts()}")
print(f"Houses with at least one gift: {self.__grid.at_least_one_gift()}")
if __name__ == '__main__':
Agent()
``` |
{
"source": "j-peralva/PGI",
"score": 4
} |
#### File: j-peralva/PGI/simulator.py
```python
from sys import argv
from MyWorld import MyWorld
from World import World
from Actor import Actor
##
# This is the main method that sets up a virtual world
# and simulates the gowth of the diseases in the world
# if the number of iterations is given in the comand line
# argument, run the simulation for that number of iterations
# Otherwise, use the deafault number of iterations: 5.
# @author <NAME>
# @date 31/08/2020
def main(args = None):
numItr = 5
if len(args) > 1:
numItr = args[1]
print('Simulation of MyWorld')
world = MyWorld()
for i in range(numItr):
world.act()
objects = world.getObjects()
for each in objects:
each.act()
print('Simulation of World')
world = World(100, 100)
world.addObject(Actor(), 10, 10)
world.addObject(Actor(), 90, 90)
for i in range(numItr):
world.act()
objects = world.getObjects()
for each in objects:
each.act()
if __name__ == '__main__':
main(argv)
``` |
{
"source": "jpercent/gestalt",
"score": 2
} |
#### File: gestalt/jestalt/assembly.py
```python
import importlib
import logging
import json
__author__ = 'jpercent'
__all__ = ['construct_application']
logger = logging.getLogger(__name__)
def get_factory_method(string):
parts = string.split('.')
count = 0
if len(parts) <= 1:
return globals()[string]
cursor = importlib.import_module(parts[count])
count += 1
while count < len(parts):
cursor = getattr(cursor, parts[count])
count += 1
return cursor
def spawn(qualified_type_name, args):
function = get_factory_method(qualified_type_name)
if args:
newobj = function(**args)
else:
newobj = function()
if not newobj:
logger.error("failed to create {type}; arguments = {args}".format(type=qualified_type_name, args=args))
raise Exception("cannot create object type = {type}, arguments = {args}".format(type=qualified_type_name, args=args))
return newobj
class GestaltCreateInstanceException(Exception):
pass
def create_instance(instance_name, conf, services, spawn_fn=spawn):
# print("instance = ", instance_name)
# print("services = ", services)
# print("conf =", conf.keys())
try:
objdesc = conf[instance_name]
args = None
deps = None
if not('type' in objdesc):
return objdesc
obj_type = objdesc['type']
if 'args' in objdesc:
args = objdesc['args']
for arg in args:
if args[arg] == 'reference':
args[arg] = conf['global_values'][arg]
if 'deps' in objdesc:
for key, value in objdesc['deps'].items():
if type(value) == list:
depobj = []
for element in value:
if element in services:
depobj.append(services[element])
else:
depobj.append(create_instance(element, conf, services, spawn_fn))
else:
if value in services:
depobj = services[value]
else:
depobj = create_instance(value, conf, services, spawn_fn)
if not deps:
deps = {}
deps[key] = depobj
if args and deps:
args.update(deps)
elif deps:
assert not args and len(deps.keys()) > 0
args = deps
# logger.debug("creating object of type = ", obj_type)
newobj = spawn_fn(obj_type, args)
if hasattr(newobj, '__dict__'):
#print("Instance name = ", instance_name)
newobj.__dict__['gestalt_name'] = instance_name
assert newobj
return newobj
except Exception as e:
error_msg = \
"Failed to create instance = {0} conf = {1}; services injected = {2}; spawn_fn = {3}"\
.format(instance_name, conf, services, spawn_fn)
logger.error(error_msg)
raise GestaltCreateInstanceException(error_msg)
def construct_services(conf, create_fn):
services = {}
level = 0
max_level = 0
while level <= max_level:
pop_keys = set([])
for key, value in conf.items():
# print ("VALUE = ", value)
if not ('service' in value) or not value['service']:
continue
level_value = 0
if 'service-level' in value:
level_value = value['service-level']
if level_value > max_level:
max_level = level_value
if level_value <= level:
services[key] = create_fn(key, conf, services)
pop_keys.add(key)
for key in pop_keys:
conf.pop(key)
level += 1
return services
def construct_application(conf, create_fn=create_instance):
application = {'main': None}
services = construct_services(conf, create_fn)
for key, value in conf.items():
if 'main' in conf[key]:
application['main'] = create_fn(key, conf, services)
application['services'] = services
assert application['main']
return application
``` |
{
"source": "jpercent/phenom.io",
"score": 2
} |
#### File: doitweb/tamer/db.py
```python
import psycopg2
import copy
import cPickle
import re
from operator import itemgetter
# convert float (0 to 1) to 8 bit web color (e.g. 00 to ff)
def f2c(x):
if x > 1.0: x = 1.0
if x < 0.0: x = 0.0
c = hex(int(255*x))[2:]
if len(c) == 1:
c = '0' + c
return c
# Faster than copy.deepcopy, but totally hacky:
# http://stackoverflow.com/questions/1410615/copy-deepcopy-vs-pickle
def copyhack(obj):
return cPickle.loads(cPickle.dumps(obj, -1))
class TamerDB:
conn = None
name = None
def __init__(self, dbname):
if self.conn is None:
self.conn = psycopg2.connect(database=dbname, user='django',
password='<PASSWORD>', host='localhost')
name = dbname
def source_list(self, n):
cur = self.conn.cursor()
cmd = '''SELECT id, local_id FROM local_sources LIMIT %s'''
cur.execute(cmd, (n,))
sl = []
for r in cur.fetchall():
sl.append({'id': r[0], 'name': r[1]})
return sl
def recent_sources(self, n):
cur = self.conn.cursor()
cmd = '''SELECT COUNT(*), date_added,
row_number() OVER (ORDER BY date_added) rank
FROM local_sources
GROUP BY date_added
LIMIT %s;'''
cur.execute(cmd, (n,))
return [{'date': r[1], 'count': r[0], 'rank': r[2]} for r in cur.fetchall()]
def schema_tables(self, schemaname):
cur = self.conn.cursor()
cmd = '''SELECT tablename FROM pg_tables
WHERE schemaname = %s ORDER BY tablename;'''
cur.execute(cmd, (schemaname,))
t = []
for r in cur.fetchall():
t.append(r[0])
return t
def table_attributes(self, tablename):
cur = self.conn.cursor()
cmd = '''SELECT attname FROM pg_attribute, pg_type
WHERE typname = %s
AND attrelid = typrelid
AND attname NOT IN ('cmin', 'cmax', 'ctid', 'oid', 'tableoid', 'xmin', 'xmax');'''
cur.execute(cmd, (tablename,))
a = []
for r in cur.fetchall():
a.append(r[0])
return a
def global_attributes(self):
cur = self.conn.cursor()
cmd = '''SELECT id, name FROM global_attributes;'''
cur.execute(cmd)
return [{'id': r[0], 'name': r[1]} for r in cur.fetchall()]
def global_attribute_names(self):
cur = self.conn.cursor()
cmd = '''SELECT name FROM global_attributes;'''
cur.execute(cmd)
return [r[0] for r in cur.fetchall()]
def source_name(self, sid):
cur = self.conn.cursor()
cmd = '''SELECT local_id FROM local_sources WHERE id = %s;'''
cur.execute(cmd, (sid,))
return cur.fetchone()[0]
def source_stats(self, sid):
cur = self.conn.cursor()
stats = {}
cmd = '''SELECT COUNT(*) FROM local_entities WHERE source_id = %s;'''
cur.execute(cmd, (sid,))
stats['nent'] = cur.fetchone()[0]
cmd = '''SELECT COUNT(*), COUNT(a.local_id)
FROM local_fields f
LEFT JOIN attribute_mappings a
ON f.id = a.local_id
WHERE source_id = %s;'''
cur.execute(cmd, (sid,))
r = cur.fetchone()
stats['ncol'] = r[0]
stats['nmap'] = r[1]
cmd = '''SELECT COUNT(*) FROM entity_matches
WHERE entity_a IN (SELECT id FROM local_entities WHERE source_id = %s);'''
cur.execute(cmd, (sid,))
stats['ndup'] = cur.fetchone()[0]
return stats
def config_params(self, model_name):
cur = self.conn.cursor()
cmd ='''SELECT name, COALESCE(description, name), value FROM configuration_properties
WHERE module = %s;'''
cur.execute(cmd, (model_name,))
return [{'name': r[0], 'description': r[1], 'value': r[2]} for r in cur.fetchall()]
def set_config(self, param_name, param_value):
cur = self.conn.cursor()
cmd = '''UPDATE configuration_properties SET value = %s
WHERE name = %s;'''
cur.execute(cmd, (param_value, param_name,))
self.conn.commit()
return cmd % (param_name, param_value)
def dedup_model_exists(self):
cur = self.conn.cursor()
#cmd = '''SELECT COUNT(*) FROM learning_attrs;'''
cmd = '''SELECT COUNT(weight), COUNT(*) FROM entity_field_weights;'''
cur.execute(cmd)
r = cur.fetchone()
return (int(r[0]) == int(r[1]) and int(r[0]) > 0)
##
# Major jobs
##
def import_from_pg_table(self, schemaname, tablename, eidattr, sidattr, dataattr):
cur = self.conn.cursor()
# Make a copy for importing
eidconst = 'row_number() over ()' if eidattr is None else eidattr
sidconst = "'0'" if sidattr is None else sidattr
cmd = '''CREATE TEMP TABLE import_tmp AS
SELECT %s::TEXT AS sid, %s::TEXT AS eid, %s::TEXT FROM %s.%s''' \
% (sidconst, eidconst, '::TEXT,'.join(dataattr), schemaname, tablename)
cur.execute(cmd)
# Add new source(s)
cmd = '''INSERT INTO local_sources (local_id, date_added)
SELECT DISTINCT %s || '/' || sid, NOW() FROM import_tmp;'''
cur.execute(cmd, (tablename,))
# Get new source_id(s)
cmd = '''UPDATE import_tmp i SET sid = s.id FROM local_sources s
WHERE s.local_id = %s || '/' || i.sid;'''
cur.execute(cmd, (tablename,))
# Add data columns to local_fields
cmd = '''INSERT INTO local_fields (source_id, local_name)
SELECT sid::INTEGER, %s FROM import_tmp GROUP BY sid;'''
for a in dataattr:
cur.execute(cmd, (a,))
# Add entities to local_entities
cmd = '''INSERT INTO local_entities (source_id, local_id)
SELECT sid::INTEGER, eid
FROM import_tmp
GROUP BY sid, eid;
UPDATE import_tmp i SET eid = e.id FROM local_entities e
WHERE i.eid = e.local_id;'''
cur.execute(cmd)
# Add data to local_data
for a in dataattr:
cmd = '''INSERT INTO local_data (field_id, entity_id, value)
SELECT f.id, eid::INTEGER, i.%s
FROM import_tmp i, local_fields f
WHERE i.sid::INTEGER = f.source_id
AND f.local_name = %%s
AND i.%s IS NOT NULL
AND length(i.%s) > 0;''' \
% (a, a, a)
cur.execute(cmd, (a,))
# Preprocess source(s) for map and dedup
cmd = '''SELECT DISTINCT sid::INTEGER FROM import_tmp;'''
cur.execute(cmd)
for r in cur.fetchall():
self.preprocess_source(r[0])
self.conn.commit()
def import_attribute_dictionary(self, att_id, schemaname, tablename, columnname):
cur = self.conn.cursor()
cmd = '''INSERT INTO global_data (att_id, value)
SELECT %%s, %s::TEXT FROM %s.%s;''' \
% (columnname, schemaname, tablename)
cur.execute(cmd, (att_id,))
self.conn.commit()
def import_synonym_dictionary(self, att_id, schemaname, tablename, columna, columnb):
cur = self.conn.cursor()
cmd = '''INSERT INTO global_synonyms (att_id, value_a, value_b)
SELECT %%s, %s::TEXT, %s::TEXT FROM %s.%s;''' \
% (columna, columnb, schemaname, tablename)
cur.execute(cmd, (att_id,))
self.conn.commit()
def import_attribute_template(self, templatename, schemaname, tablename, columnname):
cur = self.conn.cursor()
cmd = '''INSERT INTO templates (name) SELECT %s;'''
cur.execute(cmd, (templatename,))
cmd = '''SELECT id FROM templates WHERE name = %s;;'''
cur.execute(cmd, (templatename,))
tid = cur.fetchone()[0]
cmd = '''INSERT INTO attribute_templates (template_id, att_id)
SELECT %%s, g.id
FROM %s.%s t, global_attributes g
WHERE lower(t.%s::TEXT) = lower(g.name);''' \
% (schemaname, tablename, columnname)
cur.execute(cmd, (tid,))
self.conn.commit()
def import_global_schema(self, schemaname, tablename, columnname):
cur = self.conn.cursor()
cmd = '''INSERT INTO global_attributes (name, derived_from)
SELECT %s, 'WEB' FROM %s.%s;''' \
% (columnname, schemaname, tablename)
cur.execute(cmd)
self.conn.commit()
def preprocess_source(self, sid):
cur = self.conn.cursor()
cmd = '''SELECT preprocess_source(%s);
--SELECT extract_new_data(%s, true);
TRUNCATE entity_test_group;
INSERT INTO entity_test_group
SELECT id FROM local_entities WHERE source_id = %s;
SELECT entities_preprocess_test_group('t');'''
cur.execute(cmd, (sid, sid,sid,))
self.conn.commit()
def init_dedup(self, important, irrelevant):
cur = self.conn.cursor()
#cmd = '''INSERT INTO learning_attrs (tag_id)
# SELECT id
# FROM global_attributes
# WHERE name = %s;'''
cmd = '''TRUNCATE entity_field_weights;
INSERT INTO entity_field_weights
SELECT id, 1.0 FROM global_attributes;'''
cur.execute(cmd)
cmd = '''UPDATE entity_field_weights SET initial_bias = %s
WHERE field_id IN (SELECT id FROM global_attributes WHERE name = %s);'''
for attr in important:
cur.execute(cmd, (10.0, attr))
for attr in irrelevant:
cur.execute(cmd, (0.1, attr))
cmd = '''UPDATE entity_field_weights SET weight = initial_bias;'''
cur.execute(cmd)
self.conn.commit()
def rebuild_dedup_models(self):
cur = self.conn.cursor()
cmd = '''--SELECT learn_weights(0.05, 0.00001, 5, 1000, 0.2);
TRUNCATE entity_test_group;
INSERT INTO entity_test_group SELECT id FROM local_entities;
SELECT entities_preprocess_test_group('t');
SELECT entities_weights_from_test_group();'''
cur.execute(cmd)
self.conn.commit()
def rebuild_schema_mapping_models(self):
cur = self.conn.cursor()
cmd = '''SELECT preprocess_global();'''
cur.execute(cmd)
self.conn.commit()
def schema_map_source(self, sourceid):
cur = self.conn.cursor()
cmd = '''SELECT qgrams_results_for_source(%s);
SELECT ngrams_results_for_source(%s);
SELECT mdl_results_for_source(%s);
SELECT nr_composite_load();'''
cur.execute(cmd, (sourceid, sourceid, sourceid))
self.conn.commit()
def dedup_source(self, sid):
cur = self.conn.cursor()
self.rebuild_dedup_models()
cmd = '''TRUNCATE entity_test_group;
INSERT INTO entity_test_group
SELECT id FROM local_entities WHERE source_id = %s;
SELECT entities_preprocess_test_group('t');
SELECT entities_field_similarities_for_test_group();
SELECT entities_results_for_test_group('f');'''
#cmd = '''SELECT self_join(0.00001);
# --SELECT cluster(0.95);
# --SELECT two_way_join(0.00001);
# --SELECT incr_cluster(0.95);'''
cur.execute(cmd, (sid,))
self.conn.commit()
def dedup_all(self):
cur = self.conn.cursor()
self.rebuild_dedup_models()
cmd = '''TRUNCATE entity_test_group;
INSERT INTO entity_test_group
SELECT id FROM local_entities;
SELECT entities_preprocess_test_group('t');
SELECT entities_field_similarities_for_test_group();
SELECT entities_results_for_test_group('f');'''
cur.execute(cmd)
self.conn.commit()
# get two entites to compare
def get_entities_to_compare(self, approx_sim, sort):
cur = self.conn.cursor()
cmd = '''SELECT entity_a, entity_b, similarity
FROM entity_similarities
ORDER BY random()
LIMIT 1;'''
if sort == 'high':
cmd = '''SELECT entity_a, entity_b, similarity FROM entity_similarities
WHERE human_label IS NULL ORDER BY similarity DESC;'''
if approx_sim is not None:
cmd = '''SELECT entity_a, entity_b, similarity
FROM entity_similarities
WHERE similarity BETWEEN %s - 0.05 AND %s + 0.05
ORDER BY random()
LIMIT 1;'''
cur.execute(cmd, (approx_sim, approx_sim))
else:
cur.execute(cmd)
rec = cur.fetchone()
e1, e2, s = rec
return (e1, e2, s)
def entity_data(self, eid):
cur = self.conn.cursor()
cmd = '''SELECT g.id, lf.id, COALESCE(g.name, lf.local_name), ld.value
FROM local_data ld
INNER JOIN local_fields lf
ON ld.field_id = lf.id
LEFT JOIN (SELECT ga.id, ga.name, am.local_id
FROM global_attributes ga, attribute_mappings am
WHERE ga.id = am.global_id) g
ON lf.id = g.local_id
WHERE ld.entity_id = %s;'''
cur.execute(cmd, (int(eid),))
data = {}
for rec in cur.fetchall():
gid, fid, name, value = rec
value = '' if None else value
data[name] = value
# data.append({'global_id': gid, 'local_id': fid,
# 'name': name, 'value': value})
return data
def save_entity_comparison(self, e1id, e2id, answer):
cur = self.conn.cursor()
cmd = '''UPDATE entity_similarities SET human_label = %s
WHERE entity_a = %s AND entity_b = %s;'''
cur.execute(cmd, (answer, e1id, e2id))
if answer == 'YES':
cmd = '''INSERT INTO entity_matches SELECT %s, %s;'''
cur.execute(cmd, (e1id, e2id))
self.conn.commit()
``` |
{
"source": "jpereiran/chana",
"score": 3
} |
#### File: chana/chana/pos_tagger.py
```python
import os
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
class ShipiboPosTagger:
"""
Instance of the pre-trained shipibo part-of-speech tagger
"""
def __init__(self):
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, "files/pos_tagger/shipibo_svm_model.pkl")
self.postagger = joblib.load(path)
def features(self, sentence, tags, index):
"""
Function that returns the features of a sentence to be used in the model
"""
return{
'word': sentence[ index ],
'prevWord': '' if index == 0 else sentence[ index - 1 ],
'nextWord': '' if index == len( sentence ) -1 else sentence[ index + 1 ],
'isFirst': index == 0,
'isLast': index == len( sentence ) - 1,
'isCapitalized': sentence[index][0].upper() == sentence[ index ][ 0],
'isAllCaps': sentence[ index ].upper() == sentence[ index ],
'isAllLowers': sentence[ index ].lower() == sentence[ index ],
'prefix-1': sentence[ index ][ 0 ],
'prefix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][:2],
'prefix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][:3],
'prefix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][:4],
'suffix-1': sentence[ index ][ -1 ],
'suffix-2': '' if ( len(sentence) < 2 ) else sentence[ index ][-2:],
'suffix-3': '' if ( len(sentence) < 3 ) else sentence[ index ][-3:],
'suffix-4': '' if ( len(sentence) < 4 ) else sentence[ index ][-4:],
'tag-1': '' if index == 0 else tags[ index - 1 ],
'tag-2': '' if index < 2 else tags[ index - 2 ]
}
def pos_tag(self, sentence):
"""
Method that predict the pos-tags of a shipibo sentence
"""
tags = []
tokens = sentence.split(" ")
for i in range(len(tokens)):
tags.append('')
for i in range (len(tokens)):
feat = []
feat.append(self.features(tokens,tags,i))
tag_predicted = self.postagger.predict(feat)[0]
tags[i] = tag_predicted
return tags
def full_pos_tag(self, sentence):
"""
Method that predict the pos-tags of a shipibo sentence and returns the full tag in spanish
"""
tags = self.pos_tag(sentence)
for i in range(len( tags)):
tags[i] = self.get_complete_tag(tags[i])
return tags
def get_complete_tag(self,pos):
"""
Method that predict the pos-tags of a shipibo sentence and returns the full tag in spanish
"""
if pos == "ADJ": return "Adjetivo"
elif pos == "ADV" : return "Adverbio"
elif pos == "CONJ" : return "Conjunción"
elif pos == "DET" : return "Determinante"
elif pos == "INTJ" : return "Interjección"
elif pos == "NOUN" : return "Nombre"
elif pos == "PROPN" : return "Nombre Propio"
elif pos == "NUM" : return "Numeral"
elif pos == "ONM" : return "Onomatopeya"
elif pos == "INTW" : return "Palabra Interrogativa"
elif pos == "ADP" : return "Postposición"
elif pos == "PRON" : return "Pronombre"
elif pos == "PUNCT" : return "Puntuación"
elif pos == "SYM" : return "Símbolo"
elif pos == "VERB": return "Verbo"
elif pos == "AUX" : return "Verbo Auxiliar"
return "Desconocido"
```
#### File: chana/chana/syllabificator.py
```python
def syllabify(word):
"""
Function that returns all the syllables of a word
"""
word_vc = get_vc(word)
sibilantes = ['m','n', 's', 'sh', 'x']
syllables = []
syllable = ""
actual_pos = len(word_vc) - 1
if len(word_vc) == 1:
syllables.append(word_vc[0][0])
return syllables
while actual_pos >= 0 and word_vc:
#vowl check
if word_vc[actual_pos][1] == 'V':
syllable = word_vc[actual_pos][0]
del word_vc[-1]
actual_pos = actual_pos - 1
#long vowel
if word_vc and (word_vc[actual_pos][0] == syllable or
word_vc[actual_pos][0] == accentuate(syllable)):
if (len(word_vc) > 1):
syllables.insert(0, syllable)
syllable = ""
else:
syllables.insert(0, syllable)
syllable = ""
elif word_vc and word_vc[actual_pos][1] == 'C':
if (word_vc[actual_pos][0] == 'u' or
word_vc[actual_pos][0] == accentuate('u') or word_vc[actual_pos][0] == 'h'):
syllables.insert(0, syllable)
syllable = ""
else:
#Se agrega a la syllable CV
syllable = word_vc[actual_pos][0] + syllable #C
actual_pos = actual_pos - 1
del word_vc[-1]
syllables.insert(0, syllable)
syllable = ""
else:
if (len(word_vc) < 2 and actual_pos != 0): #lone syllable
syllables.insert(0, syllable)
syllable = ""
actual_pos = actual_pos - 1
if (word_vc):
del word_vc[-1]
else:
syllables.insert(0, syllable)
syllable = ""
else: #consonant check
if word_vc[actual_pos][0] in sibilantes:
syllable = word_vc[actual_pos][0] + syllable
actual_pos = actual_pos - 1
if (word_vc):
del word_vc[-1]
#first CVC
if word_vc and word_vc[actual_pos][1] == 'V':
syllable = word_vc[actual_pos][0] + syllable #V
actual_pos = actual_pos - 1
del word_vc[-1]
#syllable = VC
if len(word_vc) and word_vc[actual_pos][1] == 'C':
if word_vc[actual_pos][0] == 'u' or word_vc[
actual_pos][0] == accentuate('u') or word_vc[actual_pos][0] == 'h':
syllables.insert(0, syllable)
syllable = ""
else:
#is CVC
syllable = word_vc[actual_pos][0] + syllable #V
syllables.insert(0, syllable)
syllable = ""
actual_pos = actual_pos - 1
del word_vc[-1]
else: #is VC
syllables.insert(0, syllable)
syllable = ""
else:
if word_vc and (word_vc[actual_pos][0] == 'u' or
word_vc[actual_pos][0] == accentuate('u')):
syllables.insert(0, syllable)
syllable = ""
actual_pos = actual_pos - 1
del word_vc[-1]
else:
if (word_vc[actual_pos][0] == 'h'):
syllable = word_vc[actual_pos][0]
actual_pos = actual_pos - 1
del word_vc[-1]
elif (word_vc[actual_pos][0] == 'u' or
word_vc[actual_pos][0] == accentuate('u')):
syllable = word_vc[actual_pos][0]
syllables.insert(0, syllable)
syllable = ""
actual_pos = actual_pos - 1
if (word_vc):
del word_vc[-1]
else:
if len(syllables):
if word_vc[actual_pos][0] == 't' and syllables[0][0] == 's':
syllables[0] = word_vc[actual_pos][0] + syllables[0]
if word_vc[actual_pos][0] == 'c' and syllables[0][0] == 'h':
syllables[0] = word_vc[actual_pos][0] + syllables[0]
if word_vc[actual_pos][0] == 's' and syllables[0][0] == 'h':
syllables[0] = word_vc[actual_pos][0] + syllables[0]
actual_pos = actual_pos - 1
if (word_vc):
del word_vc[-1]
return syllables
def get_vc(word):
"""
Function that returns all the vowels and consonants of a word
"""
structure = []
vowels = ['a', 'e', 'i', 'o']
acentuado = ['á', 'é', 'í', 'ó']
specials = ['ch', 'hu', 'sh', 'ts', 'qu']
pos_special_cons = -1
transformation = {
"ch": "1",
"hu": "2",
"sh": "3",
"ts": "4",
"qu": "5"
}
for special in specials:
if special in word:
word = word.replace(special, transformation[special])
for pos in range(0, len(word)):
if (pos_special_cons != -1):
if pos != pos_special_cons + 1:
if word[pos] in vowels or word[pos] in acentuado:
structure.append([word[pos], "V"])
else:
if word[pos] == " ":
structure.append([word[pos], " "])
else:
if word[pos] == "-":
structure.append([word[pos], "-"])
else:
structure.append([word[pos], "C"])
else:
structure[pos - 1] = [word[pos - 1] + word[pos], "C"]
else:
if word[pos] in vowels or word[pos] in acentuado:
structure.append([word[pos], "V"])
else:
if word[pos] == " ":
structure.append([word[pos], " "])
else:
if word[pos] == "-":
structure.append([word[pos], "-"])
else:
structure.append([word[pos], "C"])
for syllable in structure:
syllable[0] = change(syllable[0])
return structure
def change(syllable):
"""
Function that returns the original form of a syllable
"""
if "1" in syllable:
syllable = syllable.replace("1", "ch")
elif "2" in syllable:
syllable = syllable.replace("2", "hu")
elif "3" in syllable:
syllable = syllable.replace("3", "sh")
elif "4" in syllable:
syllable = syllable.replace("4", "ts")
elif "5" in syllable:
syllable = syllable.replace("5", "qu")
else:
syllable = syllable.replace("6", "bu")
return syllable
def accentuate(letter):
"""
Function that adds the accentuation mark of a letter
"""
if letter == "a":
letter = "á"
elif letter == "e":
letter = "é"
elif letter == "i":
letter = "í"
elif letter == "o":
letter = "ó"
elif letter == "u":
letter = "ú"
return letter
``` |
{
"source": "jperelli/django-variable-settings",
"score": 2
} |
#### File: django-variable-settings/variable_settings/models.py
```python
from django.db import models
class Setting(models.Model):
key = models.CharField(max_length=255)
value = models.CharField(max_length=255)
def __unicode__(self):
return '{{"{}": "{}"}}'.format(self.key, self.value)
class Meta(object):
app_label = 'variable_settings'
``` |
{
"source": "jperelli/pysimdjson",
"score": 3
} |
#### File: pysimdjson/tests/test_query.py
```python
import pytest
from simdjson import parse_query, ParsedJson
def test_parse_query_get():
# Simple unquoted string with a delimiter.
assert parse_query('.test.string') == [
(10, b'test'),
(10, b'string')
]
# Quoted string with an escaped quote within it.
assert parse_query('."t\\"t"') == [
(10, b't"t')
]
# Quoted string followed by delimiter and unquoted string.
assert parse_query('."test".string') == [
(10, b'test'),
(10, b'string')
]
def test_parse_query_array():
assert parse_query('."test"[]') == [
(10, b'test'),
(20, None)
]
assert parse_query('.[]') == [
(10, b''),
(20, None)
]
# Closing bracket without an opening.
with pytest.raises(ValueError):
parse_query(']')
def test_items():
doc = b'{"simple": 1}'
pj = ParsedJson(doc)
assert pj.items('.') == {"simple": 1}
doc = b'''{
"count": 2,
"results": [
{"name": "result_a"},
{"name": "result_b"}
],
"error": {
"message": "All good captain"
}
}'''
pj = ParsedJson(doc)
assert pj.items('.count') == 2
assert pj.items('.results') == [
{'name': 'result_a'},
{'name': 'result_b'}
]
assert pj.items('.results[].name') == [
'result_a',
'result_b'
]
assert pj.items('.error.message') == 'All good captain'
assert pj.items('.results[0]')
``` |
{
"source": "jperez3/lambda-demp",
"score": 3
} |
#### File: lambda-demp/lambda/app.py
```python
def super_important_function():
""" Super Important Function """
print("WHAT IS GOING ON?!?!")
print("(╯°□°)╯︵ ┻━┻) ")
print("")
print("Oh... It's actually working!")
print("┳━┳ ヽ(ಠل͜ಠ)ノ")
print("")
def lambda_handler(event, context):
""" AWS Lambda Handler """
print("************************************")
print("EXECUTING LAMBDA HANDLER")
print("************************************")
print("")
super_important_function()
def test_handler(event, context):
""" AWS Lambda Handler """
print("************************************")
print("EXECUTING TEST HANDLER")
print("************************************")
print("")
print("TEST ALL THE THINGS!!!")
``` |
{
"source": "jperez999/cudf",
"score": 3
} |
#### File: core/column/lists.py
```python
import pyarrow as pa
import cudf
from cudf.core.column import ColumnBase
from cudf.core.column.methods import ColumnMethodsMixin
from cudf.core.dtypes import ListDtype
from cudf.utils.dtypes import is_list_dtype
from cudf.utils.utils import buffers_from_pyarrow
class ListColumn(ColumnBase):
def __init__(
self,
data,
size,
dtype,
mask=None,
offset=0,
null_count=None,
children=(),
):
super().__init__(
data,
size,
dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
@property
def base_size(self):
return self.size
@property
def elements(self):
"""
Column containing the elements of each list (may itself be a
ListColumn)
"""
return self.children[1]
@property
def offsets(self):
"""
Integer offsets to elements specifying each row of the ListColumn
"""
return self.children[0]
@classmethod
def from_arrow(cls, array):
if array.type.num_children == 0:
return cudf.core.column.as_column(array)
else:
_, _, mask, _, _ = buffers_from_pyarrow(array)
offsets = cudf.core.column.as_column(array.offsets)
return ListColumn(
data=None,
size=len(array),
dtype=ListDtype.from_arrow(array.type),
offset=array.offset,
mask=mask,
null_count=array.null_count,
children=(offsets, ListColumn.from_arrow(array.values)),
)
def to_arrow(self):
offsets = self.offsets.to_arrow()
elements = self.elements.to_arrow()
if len(elements) == elements.null_count:
elements = pa.NullArray.from_pandas([None] * len(elements))
if self.nullable:
nbuf = self.mask.to_host_array().view("int8")
nbuf = pa.py_buffer(nbuf)
buffers = (nbuf, offsets.buffers()[1])
else:
buffers = offsets.buffers()
return pa.ListArray.from_buffers(
self.dtype.to_arrow(), len(self), buffers, children=[elements],
)
def list(self, parent=None):
return ListMethods(self, parent=parent)
class ListMethods(ColumnMethodsMixin):
"""
List methods for Series
"""
def __init__(self, column, parent=None):
if not is_list_dtype(column.dtype):
raise AttributeError(
"Can only use .cat accessor with a 'list' dtype"
)
self._column = column
self._parent = parent
@property
def leaves(self):
"""
From a Series of (possibly nested) lists, obtain the elements from
the innermost lists as a flat Series (one value per row).
Returns
-------
Series
Examples
--------
>>> a = cudf.Series([[[1, None], [3, 4]], None, [[5, 6]]])
>>> a.list.leaves
0 1
1 null
2 3
3 4
4 5
5 6
dtype: int64
"""
if type(self._column.elements) is ListColumn:
return self._column.elements.list(parent=self._parent).leaves
else:
return self._return_or_inplace(
self._column.elements, retain_index=False
)
``` |
{
"source": "jperez999/models-1",
"score": 2
} |
#### File: tf/blocks/cross.py
```python
from typing import List, Optional, Tuple, Union
import tensorflow as tf
from merlin.schema import Schema, Tags
from ..core import Filter, SequentialBlock, TabularBlock
from ..utils.tf_utils import maybe_deserialize_keras_objects, maybe_serialize_keras_objects
from .mlp import DenseMaybeLowRank, InitializerType, RegularizerType
def CrossBlock(
depth: int = 1,
filter: Optional[Union[Schema, Tags, List[str], Filter]] = None,
low_rank_dim: Optional[int] = None,
use_bias: bool = True,
kernel_initializer: InitializerType = "truncated_normal",
bias_initializer: InitializerType = "zeros",
kernel_regularizer: Optional[RegularizerType] = None,
bias_regularizer: Optional[RegularizerType] = None,
inputs: Optional[tf.keras.layers.Layer] = None,
**kwargs,
) -> SequentialBlock:
"""This block provides a way to create high-order feature interactions
by a number of stacked Cross Layers, from
DCN V2: Improved Deep & Cross Network [1].
See Eq. (1) for full-rank and Eq. (2) for low-rank version.
References
----------
.. [1]. Wang, Ruoxi, et al. "DCN V2: Improved deep & cross network and
practical lessons for web-scale learning to rank systems." Proceedings
of the Web Conference 2021. 2021. https://arxiv.org/pdf/2008.13535.pdf
Parameters
----------
depth : int, optional
Number of cross-layers to be stacked, by default 1
filter : Optional[Union[Schema, Tags, List[str], Filter]], optional
Features filter to be applied on the input, by default None
low_rank_dim : Optional[int], optional
If this argument is provided, the weight (`W in R(dxd)`),
where d is the input features dimension matrix, is factorized in a
low-rank matrix W = U*V where U and D have (dxr) shape and
`low_rank_dim = r`, by default None
use_bias : bool, optional
Enables or not the bias term, by default True
kernel_initializer : InitializerType, optional
Initializer to use on the kernel matrix, by default "truncated_normal"
bias_initializer : InitializerType, optional
Initializer to use on the bias vector, by default "zeros"
kernel_regularizer : Optional[RegularizerType], optional
Regularizer to use on the kernel matrix, by default None
bias_regularizer : Optional[RegularizerType], optional
Regularizer to use on the bias vector, by default None
inputs : Optional[tf.keras.layers.Layer], optional
If an `InputBlock` is provided, this block checks if features are
being aggregated with concat, otherwise it does that,
as cross blocks need features to be aggregated before, by default None
Returns
-------
SequentialBlock
A `SequentialBlock` with a number of stacked Cross layers
Raises
------
ValueError
Number of cross layers (depth) should be positive
"""
layers = [inputs, TabularBlock(aggregation="concat")] if inputs else []
if depth <= 0:
raise ValueError(f"Number of cross layers (depth) should be positive but is {depth}.")
for i in range(depth):
layers.append(
Cross(
low_rank_dim=low_rank_dim,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
output_x0=i < depth - 1,
)
)
return SequentialBlock(layers, filter=filter, block_name="CrossBlock", **kwargs)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class Cross(tf.keras.layers.Layer):
"""Implementation of the Cross Layers from
DCN V2: Improved Deep & Cross Network [1]_ -
See Eq. (1) for full-rank and Eq. (2) for low-rank version.
This layer creates interactions of all input features. When used inside `CrossBlock`,
stacked `Cross` layers can be used to high-order features interaction.
The `call` method accepts `inputs` as a tuple of size 2
tensors. The first input `x0` is the base layer that contains the original
features (usually the embedding layer); the second input `xi` is the output
of the previous `Cross` layer in the stack, i.e., the i-th `Cross`
layer. For the first `Cross` layer in the stack, x0 = xi.
The output is x_{i+1} = x0 .* ((W * xi + bias * xi) + xi,
where .* designates elementwise multiplication, W could be a full-rank
matrix, or a low-rank matrix U*V to reduce the computational cost, and
diag_scale increases the diagonal of W to improve training stability (
especially for the low-rank case).
References
----------
.. [1]. Wang, Ruoxi, et al. "DCN V2: Improved deep & cross network and
practical lessons for web-scale learning to rank systems." Proceedings
of the Web Conference 2021. 2021. https://arxiv.org/pdf/2008.13535.pdf
Parameters
----------
low_rank_dim : Optional[int], optional
If this argument is provided, the weight (`W in R(dxd)`),
where d is the input features dimension matrix, is factorized in a
low-rank matrix W = U*V where U and D have (dxr) shape and
`low_rank_dim = r`, by default None
use_bias : bool, optional
Enables or not the bias term, by default True
kernel_initializer : InitializerType, optional
Initializer to use on the kernel matrix, by default "truncated_normal"
bias_initializer : InitializerType, optional
Initializer to use on the bias vector, by default "zeros"
kernel_regularizer : Optional[RegularizerType], optional
Regularizer to use on the kernel matrix, by default None
bias_regularizer : Optional[RegularizerType], optional
Regularizer to use on the bias vector, by default None
output_x0 : bool
Whether to return a tuple containing the input of the first layer (`x0`),
which usually represents the input features concatenated, by default False
"""
def __init__(
self,
low_rank_dim: Optional[int] = None,
use_bias: bool = True,
kernel_initializer: InitializerType = "truncated_normal",
bias_initializer: InitializerType = "zeros",
kernel_regularizer: Optional[RegularizerType] = None,
bias_regularizer: Optional[RegularizerType] = None,
output_x0: bool = False,
**kwargs,
):
dense = kwargs.pop("dense", None)
if not dense:
dense = DenseMaybeLowRank(
low_rank_dim=low_rank_dim,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
super(Cross, self).__init__(**kwargs)
self.dense = dense
self.output_x0 = output_x0
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs: Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]], **kwargs):
if isinstance(inputs, tuple):
x0, x = inputs
else:
x0 = x = inputs
self.validate_inputs(x0, x)
projected = self.dense(x)
output = x0 * projected + x
if self.output_x0:
return x0, output
return output
def validate_inputs(self, x0, x):
tf.assert_equal(
tf.shape(x0),
tf.shape(x),
message="`x0` ({}) and `x` ({}) shapes mismatch!".format(x0.shape, x.shape),
)
def get_config(self):
config = dict()
config.update(super(Cross, self).get_config())
return maybe_serialize_keras_objects(self, config, ["dense"])
@classmethod
def from_config(cls, config):
config = maybe_deserialize_keras_objects(config, ["dense"])
return cls(**config)
```
#### File: tf/features/continuous.py
```python
from typing import List, Optional
import tensorflow as tf
from merlin.models.utils.doc_utils import docstring_parameter
from merlin.schema import Schema
from ..core import (
TABULAR_MODULE_PARAMS_DOCSTRING,
BlockType,
Filter,
TabularAggregationType,
TabularBlock,
)
@docstring_parameter(tabular_module_parameters=TABULAR_MODULE_PARAMS_DOCSTRING)
@tf.keras.utils.register_keras_serializable(package="merlin.models")
class ContinuousFeatures(TabularBlock):
"""Input block for continuous features.
Parameters
----------
features: List[str]
List of continuous features to include in this module.
{tabular_module_parameters}
"""
def __init__(
self,
features: List[str],
pre: Optional[BlockType] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
schema: Optional[Schema] = None,
name: Optional[str] = None,
**kwargs
):
super().__init__(
pre=pre,
post=post,
aggregation=aggregation,
schema=schema,
name=name,
is_input=True,
**kwargs
)
self.filter_features = Filter(features)
@classmethod
def from_features(cls, features, **kwargs):
return cls(features, **kwargs)
def call(self, inputs, *args, **kwargs):
cont_features = self.filter_features(inputs)
cont_features = {
k: tf.expand_dims(v, -1) if len(v.shape) == 1 else v for k, v in cont_features.items()
}
return cont_features
def compute_call_output_shape(self, input_shapes):
cont_features_sizes = self.filter_features.compute_output_shape(input_shapes)
cont_features_sizes = {
k: tf.TensorShape(list(v) + [1]) if len(v) == 1 else v
for k, v in cont_features_sizes.items()
}
return cont_features_sizes
def get_config(self):
config = super().get_config()
config["features"] = self.filter_features.feature_names
return config
def _get_name(self):
return "ContinuousFeatures"
def repr_ignore(self) -> List[str]:
return ["filter_features"]
def repr_extra(self):
return ", ".join(sorted(self.filter_features.feature_names))
```
#### File: tf/prediction/batch.py
```python
import tempfile
import typing as tp
import numpy as np
import tensorflow as tf
from merlin.core.dispatch import DataFrameType, concat_columns, get_lib
from merlin.schema import Schema, Tags
from ...utils.schema import select_targets
from ..core import Block, Model, RetrievalModel
from ..dataset import Dataset
class ModelEncode:
def __init__(
self,
model,
output_names,
data_iterator_func=None,
model_load_func=None,
model_encode_func=None,
output_concat_func=None,
):
super().__init__()
self._model = model
self.output_names = [output_names] if isinstance(output_names, str) else output_names
self.data_iterator_func = data_iterator_func
self.model_load_func = model_load_func
self.model_encode_func = model_encode_func
self.output_concat_func = output_concat_func
@property
def model(self):
if isinstance(self._model, str):
self._model = self.model_load_func(self._model)
return self._model
def __call__(self, df: DataFrameType) -> DataFrameType:
# Set defaults
iterator_func = self.data_iterator_func or (lambda x: [x])
encode_func = self.model_encode_func or (lambda x, y: x(y))
concat_func = self.output_concat_func or np.concatenate
# Iterate over batches of df and collect predictions
new_df = concat_columns(
[
df,
type(df)(
concat_func([encode_func(self.model, batch) for batch in iterator_func(df)]),
columns=self.output_names,
# index=_df.index,
),
]
)
# Return result
return new_df
def transform(self, col_selector, df: DataFrameType) -> DataFrameType:
return self(df[col_selector])
class TFModelEncode(ModelEncode):
def __init__(
self,
model: tp.Union[Model, tf.keras.Model],
output_names: tp.List[str] = None,
batch_size: int = 512,
save_path: tp.Optional[str] = None,
block_load_func: tp.Optional[tp.Callable[[str], Block]] = None,
schema: tp.Optional[Schema] = None,
output_concat_func=None,
):
save_path = save_path or tempfile.mkdtemp()
model.save(save_path)
model_load_func = block_load_func if block_load_func else tf.keras.models.load_model
output_names = output_names or model.block.last.task_names
if not output_concat_func:
output_concat_func = np.concatenate if len(output_names) == 1 else get_lib().concat
self.schema = schema or model.schema
super().__init__(
save_path,
output_names,
data_iterator_func=data_iterator_func(self.schema, batch_size=batch_size),
model_load_func=model_load_func,
model_encode_func=model_encode,
output_concat_func=output_concat_func,
)
# def fit_transform(self, data) -> nvt.Dataset:
# features = self.schema.column_names >> self
#
# # Fit and transform
# processor = nvt.Workflow(features)
# output = processor.fit_transform(data)
#
# return output
class ItemEmbeddings(TFModelEncode):
def __init__(
self, model: Model, dim: int, batch_size: int = 512, save_path: tp.Optional[str] = None
):
item_block = model.block.first.item_block()
schema = item_block.schema
output_names = [str(i) for i in range(dim)]
super().__init__(
item_block,
output_names,
save_path=save_path,
batch_size=batch_size,
schema=schema,
output_concat_func=np.concatenate,
)
class QueryEmbeddings(TFModelEncode):
def __init__(
self,
model: RetrievalModel,
dim: int,
batch_size: int = 512,
save_path: tp.Optional[str] = None,
):
query_block = model.block.first.query_block()
schema = query_block.schema
output_names = [str(i) for i in range(dim)]
super().__init__(
query_block,
output_names,
save_path=save_path,
batch_size=batch_size,
schema=schema,
output_concat_func=np.concatenate,
)
def model_encode(model, batch):
# TODO: How to handle list outputs?
model_outputs = model(batch[0])
if isinstance(model_outputs, dict):
return get_lib().DataFrame({key: encode_output(val) for key, val in model_outputs.items()})
return encode_output(model_outputs)
def encode_output(output: tf.Tensor):
if len(output.shape) == 2 and output.shape[1] == 1:
output = tf.squeeze(output)
return output.numpy()
def data_iterator_func(schema, batch_size: int = 512):
import merlin.io.dataset
cat_cols = schema.select_by_tag(Tags.CATEGORICAL).column_names
cont_cols = schema.select_by_tag(Tags.CONTINUOUS).column_names
targets = select_targets(schema).column_names
def data_iterator(dataset):
return Dataset(
merlin.io.dataset.Dataset(dataset),
batch_size=batch_size,
cat_names=cat_cols,
cont_names=cont_cols,
label_names=targets,
)
return data_iterator
```
#### File: tf/utils/tf_utils.py
```python
from typing import Union
import tensorflow as tf
from merlin.models.tf.typing import TabularData
def get_output_sizes_from_schema(schema, batch_size=0, max_sequence_length=None):
sizes = {}
for feature in schema:
name = feature.name
if feature.is_list:
sizes[name] = tf.TensorShape(
[
batch_size,
max_sequence_length if max_sequence_length else feature.value_count.max,
]
)
elif feature.HasField("shape"):
sizes[name] = tf.TensorShape([batch_size] + [d.size for d in feature.shape.dim])
else:
sizes[name] = tf.TensorShape([batch_size, 1])
return sizes
def calculate_batch_size_from_input_shapes(input_shapes):
values = []
for val in input_shapes.values():
if isinstance(val, tuple) and isinstance(val[0], tf.TensorShape):
values.append(val[0])
else:
values.append(val)
return values[0][0]
def maybe_serialize_keras_objects(
self,
config,
maybe_serialize_keys,
):
for key in maybe_serialize_keys:
maybe_value = getattr(self, key, None)
if maybe_value:
if isinstance(maybe_value, dict):
config[key] = {
k: tf.keras.utils.serialize_keras_object(v) for k, v in maybe_value.items()
}
elif isinstance(maybe_value, (list, tuple)):
config[key] = [tf.keras.utils.serialize_keras_object(v) for v in maybe_value]
else:
config[key] = tf.keras.utils.serialize_keras_object(maybe_value)
return config
def maybe_deserialize_keras_objects(
config, to_deserialize, deserialize_fn=tf.keras.utils.deserialize_keras_object
):
if isinstance(to_deserialize, list):
to_deserialize = {k: deserialize_fn for k in to_deserialize}
custom_objects = {}
for key, fn in to_deserialize.items():
maybe_val = config.get(key, None)
if maybe_val:
if isinstance(maybe_val, list):
config[key] = [fn(v, custom_objects=custom_objects) for v in maybe_val]
else:
config[key] = fn(maybe_val, custom_objects=custom_objects)
return config
def extract_topk(ks, scores, labels):
max_k = tf.reduce_max(ks)
topk_scores, topk_indices = tf.math.top_k(scores, max_k)
topk_labels = gather_torch_like(labels, topk_indices, max_k)
return topk_scores, topk_indices, topk_labels
def tranform_label_to_onehot(labels, vocab_size):
return tf.one_hot(tf.reshape(labels, (-1,)), vocab_size)
def create_output_placeholder(scores, ks):
return tf.Variable(tf.zeros([tf.shape(scores)[0], len(ks)], tf.float32))
def gather_torch_like(labels, indices, max_k):
# gather_indices = []
gather_indices = tf.TensorArray(tf.int32, size=tf.shape(indices)[0])
for i in range(tf.shape(indices)[0]):
gather_indices = gather_indices.write(
i,
tf.concat(
[i * tf.ones((max_k, 1), tf.int32), tf.expand_dims(indices[i, :], -1)], axis=1
),
)
all_indices = gather_indices.stack()
labels = tf.reshape(tf.gather_nd(labels, all_indices), tf.shape(indices))
return labels
def batch_ref(inputs: Union[tf.Tensor, TabularData]):
"""Get hash-code of a tensor or a dictionary of tensors."""
if isinstance(inputs, tf.Tensor):
return hash(inputs.ref())
refs = []
keys = sorted(inputs.keys())
for key in keys:
refs.append(inputs[key].ref())
return hash(tuple(refs))
```
#### File: tests/tf/test_dataset.py
```python
import os
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from sklearn.metrics import roc_auc_score
import merlin.models.tf as ml
import merlin.models.tf.dataset as tf_dataloader
from merlin.core.dispatch import make_df
from merlin.io.dataset import Dataset
from merlin.models.data.synthetic import SyntheticData
def test_nested_list():
num_rows = 100
batch_size = 12
df = pd.DataFrame(
{
"data": [
np.random.rand(np.random.randint(10) + 1, 3).tolist() for i in range(num_rows)
],
"data2": [np.random.rand(np.random.randint(10) + 1).tolist() for i in range(num_rows)],
"label": [np.random.rand() for i in range(num_rows)],
}
)
train_dataset = tf_dataloader.Dataset(
Dataset(df),
cont_names=["data", "data2"],
label_names=["label"],
batch_size=batch_size,
shuffle=False,
)
batch = next(iter(train_dataset))
# [[1,2,3],[3,1],[...],[]]
nested_data_col = tf.RaggedTensor.from_row_lengths(
batch[0]["data"][0][:, 0], tf.cast(batch[0]["data"][1][:, 0], tf.int32)
).to_tensor()
true_data_col = tf.reshape(
tf.ragged.constant(df.iloc[:batch_size, 0].tolist()).to_tensor(), [batch_size, -1]
)
# [1,2,3]
multihot_data2_col = tf.RaggedTensor.from_row_lengths(
batch[0]["data2"][0][:, 0], tf.cast(batch[0]["data2"][1][:, 0], tf.int32)
).to_tensor()
true_data2_col = tf.reshape(
tf.ragged.constant(df.iloc[:batch_size, 1].tolist()).to_tensor(), [batch_size, -1]
)
assert nested_data_col.shape == true_data_col.shape
assert np.allclose(nested_data_col.numpy(), true_data_col.numpy())
assert multihot_data2_col.shape == true_data2_col.shape
assert np.allclose(multihot_data2_col.numpy(), true_data2_col.numpy())
def test_shuffling():
num_rows = 10000
batch_size = 10000
df = pd.DataFrame({"a": np.asarray(range(num_rows)), "b": np.asarray([0] * num_rows)})
train_dataset = tf_dataloader.Dataset(
Dataset(df), cont_names=["a"], label_names=["b"], batch_size=batch_size, shuffle=True
)
batch = next(iter(train_dataset))
first_batch = tf.reshape(tf.cast(batch[0]["a"].cpu(), tf.int32), (batch_size,))
in_order = tf.range(0, batch_size, dtype=tf.int32)
assert (first_batch != in_order).numpy().any()
assert (tf.sort(first_batch) == in_order).numpy().all()
@pytest.mark.parametrize("batch_size", [10, 9, 8])
@pytest.mark.parametrize("drop_last", [True, False])
@pytest.mark.parametrize("num_rows", [100])
def test_tf_drp_reset(tmpdir, batch_size, drop_last, num_rows):
df = make_df(
{
"cat1": [1] * num_rows,
"cat2": [2] * num_rows,
"cat3": [3] * num_rows,
"label": [0] * num_rows,
"cont3": [3.0] * num_rows,
"cont2": [2.0] * num_rows,
"cont1": [1.0] * num_rows,
}
)
path = os.path.join(tmpdir, "Dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["cont3", "cont2", "cont1"]
label_name = ["label"]
data_itr = tf_dataloader.Dataset(
path,
cat_names=cat_names,
cont_names=cont_names,
batch_size=batch_size,
label_names=label_name,
shuffle=False,
drop_last=drop_last,
)
all_len = len(data_itr) if drop_last else len(data_itr) - 1
all_rows = 0
for idx, (X, y) in enumerate(data_itr):
all_rows += len(X["cat1"])
if idx < all_len:
assert list(X["cat1"].numpy()) == [1] * batch_size
assert list(X["cat2"].numpy()) == [2] * batch_size
assert list(X["cat3"].numpy()) == [3] * batch_size
assert list(X["cont1"].numpy()) == [1.0] * batch_size
assert list(X["cont2"].numpy()) == [2.0] * batch_size
assert list(X["cont3"].numpy()) == [3.0] * batch_size
if drop_last and num_rows % batch_size > 0:
assert num_rows > all_rows
else:
assert num_rows == all_rows
def test_tf_catname_ordering(tmpdir):
df = make_df(
{
"cat1": [1] * 100,
"cat2": [2] * 100,
"cat3": [3] * 100,
"label": [0] * 100,
"cont3": [3.0] * 100,
"cont2": [2.0] * 100,
"cont1": [1.0] * 100,
}
)
path = os.path.join(tmpdir, "Dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["cont3", "cont2", "cont1"]
label_name = ["label"]
data_itr = tf_dataloader.Dataset(
path,
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
)
for X, y in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
assert list(X["cont1"].numpy()) == [1.0] * 10
assert list(X["cont2"].numpy()) == [2.0] * 10
assert list(X["cont3"].numpy()) == [3.0] * 10
def test_tf_map(tmpdir):
df = make_df(
{
"cat1": [1] * 100,
"cat2": [2] * 100,
"cat3": [3] * 100,
"label": [0] * 100,
"sample_weight": [1.0] * 100,
"cont2": [2.0] * 100,
"cont1": [1.0] * 100,
}
)
path = os.path.join(tmpdir, "Dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = ["sample_weight", "cont2", "cont1"]
label_name = ["label"]
def add_sample_weight(features, labels, sample_weight_col_name="sample_weight"):
sample_weight = tf.cast(features.pop(sample_weight_col_name) > 0, tf.float32)
return features, labels, sample_weight
data_itr = tf_dataloader.Dataset(
path,
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
).map(add_sample_weight)
for X, y, sample_weight in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
assert list(X["cont1"].numpy()) == [1.0] * 10
assert list(X["cont2"].numpy()) == [2.0] * 10
assert list(sample_weight.numpy()) == [1.0] * 10
@pytest.mark.parametrize("batch_size", [1, 2, 4])
def test_validater(batch_size):
n_samples = 9
rand = np.random.RandomState(0)
gdf = make_df({"a": rand.randn(n_samples), "label": rand.randint(2, size=n_samples)})
dataloader = tf_dataloader.Dataset(
Dataset(gdf),
batch_size=batch_size,
cat_names=[],
cont_names=["a"],
label_names=["label"],
shuffle=False,
)
input_ = tf.keras.Input(name="a", dtype=tf.float32, shape=(1,))
x = tf.keras.layers.Dense(128, "relu")(input_)
x = tf.keras.layers.Dense(1, activation="softmax")(x)
model = tf.keras.Model(inputs=input_, outputs=x)
model.compile("sgd", "binary_crossentropy", metrics=["accuracy", tf.keras.metrics.AUC()])
validater = tf_dataloader.DatasetValidator(dataloader)
model.fit(dataloader, epochs=2, verbose=0, callbacks=[validater])
predictions, labels = [], []
for X, y_true in dataloader:
y_pred = model(X)
labels.extend(y_true.numpy()[:, 0])
predictions.extend(y_pred.numpy()[:, 0])
predictions = np.array(predictions)
labels = np.array(labels)
logs = {}
validater.on_epoch_end(0, logs)
auc_key = [i for i in logs if i.startswith("val_auc")][0]
true_accuracy = (labels == (predictions > 0.5)).mean()
estimated_accuracy = logs["val_accuracy"]
assert np.isclose(true_accuracy, estimated_accuracy, rtol=1e-6)
true_auc = roc_auc_score(labels, predictions)
estimated_auc = logs[auc_key]
assert np.isclose(true_auc, estimated_auc, rtol=1e-6)
def test_model_with_sparse_inputs(music_streaming_data: SyntheticData):
item_id_schema = music_streaming_data.schema.select_by_name(["user_id", "item_genres"])
inputs = ml.InputBlock(item_id_schema)
model = inputs.connect(ml.MLPBlock([64]), context=ml.BlockContext())
df = pd.DataFrame(
{
"item_genres": np.random.randint(0, 10, (32, 20)).tolist(),
"user_id": np.random.randint(0, 10, (32,)).tolist(),
}
)
train_dataset = tf_dataloader.Dataset(
Dataset(df),
cat_names=["user_id", "item_genres"],
batch_size=3,
shuffle=False,
)
batch = next(iter(train_dataset))[0]
out = model(batch)
assert out.shape[-1] == 64
``` |
{
"source": "jperez999/NVTabular",
"score": 2
} |
#### File: NVTabular/examples/dask-nvtabular-criteo-benchmark.py
```python
import argparse
import os
import time
import cudf
from dask.distributed import Client, performance_report
from dask_cuda import LocalCUDACluster
from nvtabular import Dataset, Workflow
from nvtabular import io as nvt_io
from nvtabular import ops as ops
def setup_rmm_pool(client, pool_size):
client.run(cudf.set_allocator, pool=True, initial_pool_size=pool_size, allocator="default")
return None
def main(args):
# Input
data_path = args.data_path
out_path = args.out_path
freq_limit = args.freq_limit
out_files_per_proc = args.splits
if args.protocol == "ucx":
os.environ["UCX_TLS"] = "tcp,cuda_copy,cuda_ipc,sockcm"
# Use Criteo dataset by default (for now)
cont_names = (
args.cont_names.split(",") if args.cont_names else ["I" + str(x) for x in range(1, 14)]
)
cat_names = (
args.cat_names.split(",") if args.cat_names else ["C" + str(x) for x in range(1, 27)]
)
label_name = ["label"]
if args.cat_splits:
tree_width = {name: int(s) for name, s in zip(cat_names, args.cat_splits.split(","))}
else:
tree_width = {col: 1 for col in cat_names}
if args.cat_names is None:
# Using Criteo... Use more hash partitions for
# known high-cardinality columns
tree_width["C20"] = 8
tree_width["C1"] = 8
tree_width["C22"] = 4
tree_width["C10"] = 4
tree_width["C21"] = 2
tree_width["C11"] = 2
tree_width["C23"] = 2
tree_width["C12"] = 2
# Specify categorical caching location
cat_cache = None
if args.cat_cache:
cat_cache = args.cat_cache.split(",")
if len(cat_cache) == 1:
cat_cache = cat_cache[0]
else:
# If user is specifying a list of options,
# they must specify an option for every cat column
assert len(cat_names) == len(cat_cache)
if isinstance(cat_cache, str):
cat_cache = {col: cat_cache for col in cat_names}
elif isinstance(cat_cache, list):
cat_cache = {name: c for name, c in zip(cat_names, cat_cache)}
else:
# Criteo/DLRM Defaults
cat_cache = {col: "device" for col in cat_names}
if args.cat_names is None:
cat_cache["C20"] = "host"
cat_cache["C1"] = "host"
# Only need to cache the largest two on a dgx-2
if args.n_workers < 16:
cat_cache["C22"] = "host"
cat_cache["C10"] = "host"
# Use total device size to calculate args.device_limit_frac
device_size = nvt_io.device_mem_size(kind="total")
device_limit = int(args.device_limit_frac * device_size)
device_pool_size = int(args.device_pool_frac * device_size)
part_size = int(args.part_mem_frac * device_size)
# Setup LocalCUDACluster
if args.protocol == "tcp":
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devs,
device_memory_limit=device_limit,
local_directory=args.dask_workspace,
dashboard_address=":3787",
)
else:
cluster = LocalCUDACluster(
protocol=args.protocol,
n_workers=args.n_workers,
CUDA_VISIBLE_DEVICES=args.devs,
enable_nvlink=True,
device_memory_limit=device_limit,
local_directory=args.dask_workspace,
dashboard_address=":3787",
)
client = Client(cluster)
# Setup RMM pool
if not args.no_rmm_pool:
setup_rmm_pool(client, device_pool_size)
# Define Dask NVTabular "Workflow"
processor = Workflow(
cat_names=cat_names, cont_names=cont_names, label_name=label_name, client=client
)
processor.add_feature([ops.ZeroFill(), ops.LogOp()])
processor.add_preprocess(
ops.Categorify(
out_path=out_path,
tree_width=tree_width,
cat_cache=cat_cache,
freq_threshold=freq_limit,
on_host=args.cat_on_host,
)
)
processor.finalize()
dataset = Dataset(data_path, "parquet", part_size=part_size)
# Execute the dask graph
runtime = time.time()
if args.profile is not None:
with performance_report(filename=args.profile):
processor.apply(
dataset,
shuffle=nvt_io.Shuffle.PER_WORKER
if args.worker_shuffle
else nvt_io.Shuffle.PER_PARTITION,
out_files_per_proc=out_files_per_proc,
output_path=out_path,
)
else:
processor.apply(
dataset,
shuffle=nvt_io.Shuffle.PER_WORKER
if args.worker_shuffle
else nvt_io.Shuffle.PER_PARTITION,
out_files_per_proc=out_files_per_proc,
output_path=out_path,
)
runtime = time.time() - runtime
print("\nDask-NVTabular DLRM/Criteo benchmark")
print("--------------------------------------")
print(f"partition size | {part_size}")
print(f"protocol | {args.protocol}")
print(f"device(s) | {args.devs}")
print(f"rmm-pool | {(not args.no_rmm_pool)}")
print(f"out_files_per_proc | {args.splits}")
print(f"worker-shuffle | {args.worker_shuffle}")
print("======================================")
print(f"Runtime[s] | {runtime}")
print("======================================\n")
client.close()
def parse_args():
parser = argparse.ArgumentParser(description="Merge (dask/cudf) on LocalCUDACluster benchmark")
parser.add_argument(
"-d",
"--devs",
default="0,1,2,3",
type=str,
help='GPU devices to use (default "0, 1, 2, 3").',
)
parser.add_argument(
"-p",
"--protocol",
choices=["tcp", "ucx"],
default="tcp",
type=str,
help="The communication protocol to use.",
)
parser.add_argument("--no-rmm-pool", action="store_true", help="Disable the RMM memory pool")
parser.add_argument(
"--profile",
metavar="PATH",
default=None,
type=str,
help="Write dask profile report (E.g. dask-report.html)",
)
parser.add_argument("--data-path", type=str, help="Raw dataset path.")
parser.add_argument("--out-path", type=str, help="Root output path.")
parser.add_argument("--dask-workspace", default=None, type=str, help="Dask workspace path.")
parser.add_argument(
"-s", "--splits", default=24, type=int, help="Number of splits to shuffle each partition"
)
parser.add_argument(
"--part-mem-frac",
default=0.162,
type=float,
help="Fraction of device memory for each partition",
)
parser.add_argument(
"-f", "--freq-limit", default=0, type=int, help="Frequency limit on cat encodings."
)
parser.add_argument(
"--device-limit-frac",
default=0.8,
type=float,
help="Fractional device-memory limit (per worker).",
)
parser.add_argument(
"--device-pool-frac", default=0.8, type=float, help="Fractional rmm pool size (per worker)."
)
parser.add_argument(
"--worker-shuffle", action="store_true", help="Perform followup shuffle on each worker."
)
parser.add_argument(
"--cat-names", default=None, type=str, help="List of categorical column names."
)
parser.add_argument(
"--cat-cache",
default=None,
type=str,
help='Where to cache each category (Ex "device, host, disk").',
)
parser.add_argument(
"--cat-on-host",
action="store_true",
help="Whether to move categorical data to host between tasks.",
)
parser.add_argument(
"--cat-splits",
default=None,
type=str,
help='How many splits to use for each category (Ex "8, 4, 2, 1").',
)
parser.add_argument(
"--cont-names", default=None, type=str, help="List of continuous column names."
)
args = parser.parse_args()
args.n_workers = len(args.devs.split(","))
return args
if __name__ == "__main__":
main(parse_args())
```
#### File: NVTabular/nvtabular/categorify.py
```python
import os
from operator import getitem
import cudf
import cupy as cp
import numpy as np
from cudf._lib.nvtx import annotate
from dask.base import tokenize
from dask.dataframe.core import _concat
from dask.highlevelgraph import HighLevelGraph
from fsspec.core import get_fs_token_paths
from nvtabular.worker import fetch_table_data, get_worker_cache
def _make_name(*args):
return "_".join(args)
@annotate("top_level_groupby", color="green", domain="nvt_python")
def _top_level_groupby(gdf, cat_cols, tree_width, cont_cols, sum_sq, on_host):
# Top-level operation for category-based groupby aggregations
output = {}
k = 0
for i, cat_col in enumerate(cat_cols):
# Compile aggregation dictionary and add "squared-sum"
# column(s) (necessary when `cont_cols` is non-empty)
df_gb = gdf[[cat_col] + cont_cols].copy(deep=False)
agg_dict = {}
agg_dict[cat_col] = ["count"]
for col in cont_cols:
agg_dict[col] = ["sum"]
if sum_sq:
name = _make_name(col, "pow2")
df_gb[name] = df_gb[col].pow(2)
agg_dict[name] = ["sum"]
# Perform groupby and flatten column index
# (flattening provides better cudf support)
gb = df_gb.groupby(cat_col, dropna=False).agg(agg_dict)
gb.columns = [
_make_name(*name) if name[0] == cat_col else _make_name(*((cat_col,) + name))
for name in gb.columns.to_flat_index()
]
gb.reset_index(inplace=True, drop=False)
del df_gb
# Split the result by the hash value of the categorical column
for j, split in enumerate(
gb.partition_by_hash([cat_col], tree_width[cat_col], keep_index=False)
):
if on_host:
output[k] = split.to_pandas()
else:
output[k] = split
k += 1
del gb
return output
@annotate("mid_level_groupby", color="green", domain="nvt_python")
def _mid_level_groupby(dfs, col, cont_cols, agg_list, freq_limit, on_host):
ignore_index = True
if on_host:
gb = cudf.from_pandas(_concat(dfs, ignore_index)).groupby(col, dropna=False).sum()
else:
gb = _concat(dfs, ignore_index).groupby(col, dropna=False).sum()
gb.reset_index(drop=False, inplace=True)
name_count = _make_name(col, "count")
if freq_limit:
gb = gb[gb[name_count] >= freq_limit]
required = [col]
if "count" in agg_list:
required.append(name_count)
ddof = 1
for cont_col in cont_cols:
name_sum = _make_name(col, cont_col, "sum")
if "sum" in agg_list:
required.append(name_sum)
if "mean" in agg_list:
name_mean = _make_name(col, cont_col, "mean")
required.append(name_mean)
gb[name_mean] = gb[name_sum] / gb[name_count]
if "var" in agg_list or "std" in agg_list:
n = gb[name_count]
x = gb[name_sum]
x2 = gb[_make_name(col, cont_col, "pow2", "sum")]
result = x2 - x ** 2 / n
div = n - ddof
div[div < 1] = 1
result /= div
result[(n - ddof) == 0] = np.nan
if "var" in agg_list:
name_var = _make_name(col, cont_col, "var")
required.append(name_var)
gb[name_var] = result
if "std" in agg_list:
name_std = _make_name(col, cont_col, "std")
required.append(name_std)
gb[name_std] = np.sqrt(result)
if on_host:
gb_pd = gb[required].to_pandas()
del gb
return gb_pd
return gb[required]
@annotate("write_gb_stats", color="green", domain="nvt_python")
def _write_gb_stats(dfs, base_path, col, on_host):
ignore_index = True
df = _concat(dfs, ignore_index)
if on_host:
df = cudf.from_pandas(df)
rel_path = "cat_stats.%s.parquet" % (col)
path = os.path.join(base_path, rel_path)
if len(df):
df = df.sort_values(col, na_position="first")
df.to_parquet(path, write_index=False, compression=None)
else:
df_null = cudf.DataFrame({col: [None]})
df_null[col] = df_null[col].astype(df[col].dtype)
df_null.to_parquet(path, write_index=False, compression=None)
del df
return path
@annotate("write_uniques", color="green", domain="nvt_python")
def _write_uniques(dfs, base_path, col, on_host):
ignore_index = True
df = _concat(dfs, ignore_index)
if on_host:
df = cudf.from_pandas(df)
rel_path = "unique.%s.parquet" % (col)
path = "/".join([base_path, rel_path])
if len(df):
# Make sure first category is Null
df = df.sort_values(col, na_position="first")
if not df[col]._column.has_nulls:
df = cudf.DataFrame(
{col: _concat([cudf.Series([None], dtype=df[col].dtype), df[col]], ignore_index)}
)
df.to_parquet(path, write_index=False, compression=None)
else:
df_null = cudf.DataFrame({col: [None]})
df_null[col] = df_null[col].astype(df[col].dtype)
df_null.to_parquet(path, write_index=False, compression=None)
del df
return path
def _finish_labels(paths, cols):
return {col: paths[i] for i, col in enumerate(cols)}
def _groupby_to_disk(
ddf,
write_func,
cols,
agg_cols,
agg_list,
out_path,
freq_limit,
tree_width,
on_host,
stat_name="categories",
):
if not cols:
return {}
# Update tree_width
if tree_width is None:
tree_width = {c: 8 for c in cols}
elif isinstance(tree_width, int):
tree_width = {c: tree_width for c in cols}
else:
for col in cols:
if col not in tree_width:
tree_width[col] = 8
# Make dedicated output directory for the categories
fs = get_fs_token_paths(out_path)[0]
out_path = fs.sep.join([out_path, stat_name])
fs.mkdirs(out_path, exist_ok=True)
dsk = {}
token = tokenize(ddf, cols, out_path, freq_limit, tree_width, on_host)
level_1_name = "level_1-" + token
split_name = "split-" + token
level_2_name = "level_2-" + token
level_3_name = "level_3-" + token
finalize_labels_name = stat_name + "-" + token
for p in range(ddf.npartitions):
dsk[(level_1_name, p)] = (
_top_level_groupby,
(ddf._name, p),
cols,
tree_width,
agg_cols,
("std" in agg_list or "var" in agg_list),
on_host,
)
k = 0
for c, col in enumerate(cols):
for s in range(tree_width[col]):
dsk[(split_name, p, c, s)] = (getitem, (level_1_name, p), k)
k += 1
for c, col in enumerate(cols):
for s in range(tree_width[col]):
dsk[(level_2_name, c, s)] = (
_mid_level_groupby,
[(split_name, p, c, s) for p in range(ddf.npartitions)],
col,
agg_cols,
agg_list,
freq_limit,
on_host,
)
dsk[(level_3_name, c)] = (
write_func,
[(level_2_name, c, s) for s in range(tree_width[col])],
out_path,
col,
on_host,
)
dsk[finalize_labels_name] = (
_finish_labels,
[(level_3_name, c) for c, col in enumerate(cols)],
cols,
)
graph = HighLevelGraph.from_collections(finalize_labels_name, dsk, dependencies=[ddf])
return graph, finalize_labels_name
def _category_stats(
ddf, cols, agg_cols, agg_list, out_path, freq_limit, tree_width, on_host, stat_name="categories"
):
# Check if we only need categories
if agg_cols == [] and agg_list == []:
agg_list = ["count"]
return _groupby_to_disk(
ddf,
_write_uniques,
cols,
agg_cols,
agg_list,
out_path,
freq_limit,
tree_width,
on_host,
stat_name=stat_name,
)
# Otherwise, getting category-statistics
if isinstance(agg_cols, str):
agg_cols = [agg_cols]
if agg_list == []:
agg_list = ["count"]
return _groupby_to_disk(
ddf,
_write_gb_stats,
cols,
agg_cols,
agg_list,
out_path,
freq_limit,
tree_width,
on_host,
stat_name=stat_name,
)
def _encode(name, path, gdf, cat_cache, na_sentinel=-1, freq_threshold=0):
value = None
if path:
if cat_cache is not None:
cat_cache = cat_cache if isinstance(cat_cache, str) else cat_cache.get(name, "disk")
if len(gdf):
with get_worker_cache("cats") as cache:
value = fetch_table_data(
cache, path, columns=[name], cache=cat_cache, cats_only=True
)
else:
value = cudf.io.read_parquet(path, index=False, columns=[name])
value.index.name = "labels"
value.reset_index(drop=False, inplace=True)
vals = gdf[name].copy(deep=False)
if value is None:
value = cudf.DataFrame({name: [None]})
value[name] = value[name].astype(vals.dtype)
value.index.name = "labels"
value.reset_index(drop=False, inplace=True)
if freq_threshold > 0:
codes = cudf.DataFrame({name: vals.copy(), "order": cp.arange(len(vals))})
codes = codes.merge(value, on=name, how="left").sort_values("order")["labels"]
codes.fillna(na_sentinel, inplace=True)
return codes.values
else:
# Use `searchsorted` if we are using a "full" encoding
labels = value[name].searchsorted(vals, side="left", na_position="first")
labels[labels >= len(value[name])] = na_sentinel
return labels
def _read_groupby_stat_df(path, name, cat_cache):
if cat_cache is not None:
cat_cache = cat_cache if isinstance(cat_cache, str) else cat_cache.get(name, "disk")
with get_worker_cache("stats") as cache:
if cache:
return fetch_table_data(cache, path, cache=cat_cache)
return cudf.io.read_parquet(path, index=False)
```
#### File: NVTabular/nvtabular/io.py
```python
import collections
import enum
import functools
import json
import logging
import math
import os
import queue
import random
import threading
import warnings
from collections import defaultdict
from io import BytesIO
from uuid import uuid4
import cudf
import cupy as cp
import dask
import dask_cudf
import numba.cuda as cuda
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from cudf._lib.nvtx import annotate
from cudf.io.parquet import ParquetWriter as pwriter
from dask.base import tokenize
from dask.dataframe.core import new_dd_object
from dask.dataframe.io.parquet.utils import _analyze_paths
from dask.delayed import Delayed
from dask.highlevelgraph import HighLevelGraph
from dask.utils import natural_sort_key, parse_bytes
from fsspec.core import get_fs_token_paths
from fsspec.utils import stringify_path
from nvtabular.worker import clean_worker_cache, get_worker_cache
LOG = logging.getLogger("nvtabular")
class Shuffle(enum.Enum):
PER_PARTITION = 0
PER_WORKER = 1
FULL = 2
#
# Helper Function definitions
#
def _check_shuffle_arg(shuffle):
if shuffle is None:
return shuffle
if isinstance(shuffle, Shuffle):
if shuffle == Shuffle.FULL:
raise ValueError('`shuffle="full"` is not yet supported.')
elif shuffle is True:
shuffle = Shuffle.PER_WORKER
warnings.warn("`shuffle=True` is deprecated. Using `PER_WORKER`.", DeprecationWarning)
elif shuffle is False:
shuffle = None
else:
raise ValueError(f"`shuffle={shuffle}` not recognized.")
return shuffle
def _allowable_batch_size(gpu_memory_frac, row_size):
free_mem = device_mem_size(kind="free")
gpu_memory = free_mem * gpu_memory_frac
return max(int(gpu_memory / row_size), 1)
def _shuffle_gdf(gdf, gdf_size=None):
""" Shuffles a cudf dataframe, returning a new dataframe with randomly
ordered rows """
gdf_size = gdf_size or len(gdf)
arr = cp.arange(gdf_size)
cp.random.shuffle(arr)
return gdf.iloc[arr]
def device_mem_size(kind="total"):
if kind not in ["free", "total"]:
raise ValueError("{0} not a supported option for device_mem_size.".format(kind))
try:
if kind == "free":
return int(cuda.current_context().get_memory_info()[0])
else:
return int(cuda.current_context().get_memory_info()[1])
except NotImplementedError:
import pynvml
pynvml.nvmlInit()
if kind == "free":
warnings.warn("get_memory_info is not supported. Using total device memory from NVML.")
size = int(pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(0)).total)
pynvml.nvmlShutdown()
return size
def guid():
""" Simple utility function to get random hex string
"""
return uuid4().hex
def _merge_general_metadata(meta_list):
""" Combine list of "general" metadata dicts into
a single dict
"""
if not meta_list:
return {}
meta = None
for md in meta_list:
if meta:
meta["data_paths"] += md["data_paths"]
meta["file_stats"] += md["file_stats"]
else:
meta = md.copy()
return meta
def _write_pq_metadata_file(md_list, fs, path):
""" Converts list of parquet metadata objects into
a single shared _metadata file.
"""
if md_list:
metadata_path = fs.sep.join([path, "_metadata"])
_meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]
with fs.open(metadata_path, "wb") as fil:
_meta.tofile(fil)
return
def _set_dtypes(chunk, dtypes):
for col, dtype in dtypes.items():
if type(dtype) is str:
if "hex" in dtype and chunk[col].dtype == "object":
chunk[col] = chunk[col].str.htoi()
chunk[col] = chunk[col].astype(np.int32)
else:
chunk[col] = chunk[col].astype(dtype)
return chunk
def _detect_format(data):
""" Utility to detect the format of `data`
"""
if isinstance(data, cudf.DataFrame):
return "cudf"
elif isinstance(data, pd.DataFrame):
return "pandas"
elif isinstance(data, pa.Table):
return "arrow"
else:
file_type = str(data).split(".")[-1]
if file_type not in ("parquet", "csv"):
raise ValueError("Data format not recognized.")
return file_type
#
# Writer Definitions
#
def _writer_cls_factory(output_format, output_path):
if output_format == "parquet":
writer_cls = ParquetWriter
elif output_format == "hugectr":
writer_cls = HugeCTRWriter
else:
raise ValueError("Output format not yet supported.")
fs = get_fs_token_paths(output_path)[0]
return writer_cls, fs
def writer_factory(
output_format,
output_path,
out_files_per_proc,
shuffle,
use_guid=False,
bytes_io=False,
num_threads=0,
):
if output_format is None:
return None
writer_cls, fs = _writer_cls_factory(output_format, output_path)
return writer_cls(
output_path,
num_out_files=out_files_per_proc,
shuffle=shuffle,
fs=fs,
use_guid=use_guid,
bytes_io=bytes_io,
num_threads=num_threads,
)
class Writer:
def __init__(self):
pass
def add_data(self, gdf):
raise NotImplementedError()
def package_general_metadata(self):
raise NotImplementedError()
@classmethod
def write_general_metadata(cls, data, fs, out_dir):
raise NotImplementedError()
@classmethod
def write_special_metadata(cls, data, fs, out_dir):
raise NotImplementedError()
def close(self):
pass
class ThreadedWriter(Writer):
def __init__(
self,
out_dir,
num_out_files=30,
num_threads=0,
cats=None,
conts=None,
labels=None,
shuffle=None,
fs=None,
use_guid=False,
bytes_io=False,
):
# set variables
self.out_dir = out_dir
self.cats = cats
self.conts = conts
self.labels = labels
self.shuffle = shuffle
self.column_names = None
if labels and conts:
self.column_names = labels + conts
self.col_idx = {}
self.num_threads = num_threads
self.num_out_files = num_out_files
self.num_samples = [0] * num_out_files
self.data_paths = None
self.need_cal_col_names = True
self.use_guid = use_guid
self.bytes_io = bytes_io
# Resolve file system
self.fs = fs or get_fs_token_paths(str(out_dir))[0]
# Only use threading if num_threads > 1
self.queue = None
if self.num_threads > 1:
# create thread queue and locks
self.queue = queue.Queue(num_threads)
self.write_locks = [threading.Lock() for _ in range(num_out_files)]
# signifies that end-of-data and that the thread should shut down
self._eod = object()
# create and start threads
for _ in range(num_threads):
write_thread = threading.Thread(target=self._write_thread, daemon=True)
write_thread.start()
def set_col_names(self, labels, cats, conts):
self.cats = cats
self.conts = conts
self.labels = labels
self.column_names = labels + conts
def _write_table(self, idx, data):
return
def _write_thread(self):
return
@annotate("add_data", color="orange", domain="nvt_python")
def add_data(self, gdf):
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
self.col_idx[str(x)] = i
# Generate `ind` array to map each row to an output file.
# This approach is certainly more optimized for shuffling
# than it is for non-shuffling, but using a single code
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
cp.floor_divide(ind, math.ceil(nrows / self.num_out_files), out=ind)
for x, group in enumerate(
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
# It seems that the `copy()` operations here are necessary
# (test_io.py::test_mulifile_parquet fails otherwise)...
if self.num_threads > 1:
self.queue.put((x, group.copy()))
else:
self._write_table(x, group.copy())
# wait for all writes to finish before exiting
# (so that we aren't using memory)
if self.num_threads > 1:
self.queue.join()
def package_general_metadata(self):
data = {}
if self.cats is None:
return data
data["data_paths"] = self.data_paths
data["file_stats"] = []
for i, path in enumerate(self.data_paths):
fn = path.split(self.fs.sep)[-1]
data["file_stats"].append({"file_name": fn, "num_rows": self.num_samples[i]})
# cats
data["cats"] = []
for c in self.cats:
data["cats"].append({"col_name": c, "index": self.col_idx[c]})
# conts
data["conts"] = []
for c in self.conts:
data["conts"].append({"col_name": c, "index": self.col_idx[c]})
# labels
data["labels"] = []
for c in self.labels:
data["labels"].append({"col_name": c, "index": self.col_idx[c]})
return data
@classmethod
def write_general_metadata(cls, data, fs, out_dir):
if not data:
return
data_paths = data.pop("data_paths", [])
num_out_files = len(data_paths)
# Write file_list
file_list_writer = fs.open(fs.sep.join([out_dir, "_file_list.txt"]), "w")
file_list_writer.write(str(num_out_files) + "\n")
for f in data_paths:
file_list_writer.write(f + "\n")
file_list_writer.close()
# Write metadata json
metadata_writer = fs.open(fs.sep.join([out_dir, "_metadata.json"]), "w")
json.dump(data, metadata_writer)
metadata_writer.close()
@classmethod
def write_special_metadata(cls, data, fs, out_dir):
pass
def _close_writers(self):
for writer in self.data_writers:
writer.close()
return None
def close(self):
if self.num_threads > 1:
# wake up all the worker threads and signal for them to exit
for _ in range(self.num_threads):
self.queue.put(self._eod)
# wait for pending writes to finish
self.queue.join()
# Close writers and collect various metadata
_general_meta = self.package_general_metadata()
_special_meta = self._close_writers()
# Move in-meomory file to disk
if self.bytes_io:
self._bytesio_to_disk()
return _general_meta, _special_meta
def _bytesio_to_disk(self):
raise NotImplementedError("In-memory buffering/shuffling not implemented for this format.")
class ParquetWriter(ThreadedWriter):
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = []
self.data_writers = []
self.data_bios = []
for i in range(self.num_out_files):
if self.use_guid:
fn = f"{i}.{guid()}.parquet"
else:
fn = f"{i}.parquet"
path = os.path.join(out_dir, fn)
self.data_paths.append(path)
if self.bytes_io:
bio = BytesIO()
self.data_bios.append(bio)
self.data_writers.append(pwriter(bio, compression=None))
else:
self.data_writers.append(pwriter(path, compression=None))
def _write_table(self, idx, data):
self.data_writers[idx].write_table(data)
def _write_thread(self):
while True:
item = self.queue.get()
try:
if item is self._eod:
break
idx, data = item
with self.write_locks[idx]:
self._write_table(idx, data)
finally:
self.queue.task_done()
@classmethod
def write_special_metadata(cls, md, fs, out_dir):
# Sort metadata by file name and convert list of
# tuples to a list of metadata byte-blobs
md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]
# Aggregate metadata and write _metadata file
_write_pq_metadata_file(md_list, fs, out_dir)
def _close_writers(self):
md_dict = {}
for writer, path in zip(self.data_writers, self.data_paths):
fn = path.split(self.fs.sep)[-1]
md_dict[fn] = writer.close(metadata_file_path=fn)
return md_dict
def _bytesio_to_disk(self):
for bio, path in zip(self.data_bios, self.data_paths):
gdf = cudf.io.read_parquet(bio, index=False)
bio.close()
if self.shuffle == Shuffle.PER_WORKER:
gdf = _shuffle_gdf(gdf)
gdf.to_parquet(path, compression=None, index=False)
return
class HugeCTRWriter(ThreadedWriter):
def __init__(self, out_dir, **kwargs):
super().__init__(out_dir, **kwargs)
self.data_paths = [os.path.join(out_dir, f"{i}.data") for i in range(self.num_out_files)]
self.data_writers = [open(f, "ab") for f in self.data_paths]
def _write_table(self, idx, data):
ones = np.array(([1] * data.shape[0]), dtype=np.intc)
df = data[self.column_names].to_pandas().astype(np.single)
for i in range(len(self.cats)):
df["___" + str(i) + "___" + self.cats[i]] = ones
df[self.cats[i]] = data[self.cats[i]].to_pandas().astype(np.longlong)
self.data_writers[idx].write(df.to_numpy().tobytes())
def _write_thread(self):
while True:
item = self.queue.get()
try:
if item is self._eod:
break
idx, data = item
with self.write_locks[idx]:
self._write_table(idx, data)
finally:
self.queue.task_done()
def _close_writers(self):
for i, writer in enumerate(self.data_writers):
if self.cats:
# Write HugeCTR Metadata
writer.seek(0)
# error_check (0: no error check; 1: check_num)
# num of samples in this file
# Dimension of the labels
# Dimension of the features
# slot_num for each embedding
# reserved for future use
header = np.array(
[
0,
self.num_samples[i],
len(self.labels),
len(self.conts),
len(self.cats),
0,
0,
0,
],
dtype=np.longlong,
)
writer.write(header.tobytes())
writer.close()
return None
def _bytesio_to_disk(self):
raise ValueError("hugectr binary format doesn't support PER_WORKER shuffle yet")
#
# Dask-based IO
#
@annotate("write_output_partition", color="green", domain="nvt_python")
def _write_output_partition(
gdf,
processed_path,
shuffle,
out_files_per_proc,
fs,
cat_names,
cont_names,
label_names,
output_format,
num_threads,
):
gdf_size = len(gdf)
out_files_per_proc = out_files_per_proc or 1
# Get cached writer (or create/cache a new one)
with get_worker_cache("writer") as writer_cache:
writer = writer_cache.get(processed_path, None)
if writer is None:
writer = writer_factory(
output_format,
processed_path,
out_files_per_proc,
shuffle,
use_guid=True,
bytes_io=(shuffle == Shuffle.PER_WORKER),
num_threads=num_threads,
)
writer.set_col_names(labels=label_names, cats=cat_names, conts=cont_names)
writer_cache[processed_path] = writer
# Add data
writer.add_data(gdf)
return gdf_size
def _ddf_to_dataset(
ddf,
fs,
output_path,
shuffle,
out_files_per_proc,
cat_names,
cont_names,
label_names,
output_format,
client,
num_threads,
):
# Construct graph for Dask-based dataset write
name = "write-processed"
write_name = name + tokenize(
ddf, shuffle, out_files_per_proc, cat_names, cont_names, label_names
)
task_list = []
dsk = {}
for idx in range(ddf.npartitions):
key = (write_name, idx)
dsk[key] = (
_write_output_partition,
(ddf._name, idx),
output_path,
shuffle,
out_files_per_proc,
fs,
cat_names,
cont_names,
label_names,
output_format,
num_threads,
)
task_list.append(key)
dsk[name] = (lambda x: x, task_list)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
out = Delayed(name, graph)
# Trigger write execution
if client:
out = client.compute(out).result()
else:
out = dask.compute(out, scheduler="synchronous")[0]
# Follow-up Shuffling and _metadata creation
_finish_dataset(client, ddf, output_path, fs, output_format)
def _finish_dataset(client, ddf, output_path, fs, output_format):
# Finish data writing
if client:
client.cancel(ddf)
ddf = None
out = client.run(_worker_finish, output_path)
general_md = []
special_md = []
for (gen, spec) in out.values():
general_md.append(gen)
if spec:
special_md.append(spec)
general_md = _merge_general_metadata(general_md)
special_md = dict(collections.ChainMap(*special_md))
else:
ddf = None
general_md, special_md = _worker_finish(output_path)
# Write metadata on client
wc, fs = _writer_cls_factory(output_format, output_path)
wc.write_general_metadata(general_md, fs, output_path)
wc.write_special_metadata(special_md, fs, output_path)
# Clean writer caches
if client:
client.run(clean_worker_cache, "writer")
else:
clean_worker_cache("writer")
def _worker_finish(processed_path):
general_md, special_md = {}, {}
with get_worker_cache("writer") as writer_cache:
writer = writer_cache.get(processed_path, None)
if writer:
general_md, special_md = writer.close()
return general_md, special_md
class Dataset:
""" Dask-based Dataset Class
Converts a dataset into a dask_cudf DataFrame on demand
Parameters
-----------
path_or_source : str, list of str, or <dask.dataframe|cudf|pd>.DataFrame
Dataset path (or list of paths), or a DataFrame. If string,
should specify a specific file or directory path. If this is a
directory path, the directory structure must be flat (nested
directories are not yet supported).
engine : str or DatasetEngine
DatasetEngine object or string identifier of engine. Current
string options include: ("parquet", "csv"). This argument
is ignored if path_or_source is a DataFrame type.
part_size : str or int
Desired size (in bytes) of each Dask partition.
If None, part_mem_fraction will be used to calculate the
partition size. Note that the underlying engine may allow
other custom kwargs to override this argument. This argument
is ignored if path_or_source is a DataFrame type.
part_mem_fraction : float (default 0.125)
Fractional size of desired dask partitions (relative
to GPU memory capacity). Ignored if part_size is passed
directly. Note that the underlying engine may allow other
custom kwargs to override this argument. This argument
is ignored if path_or_source is a DataFrame type.
storage_options: None or dict
Further parameters to pass to the bytes backend. This argument
is ignored if path_or_source is a DataFrame type.
"""
def __init__(
self,
path_or_source,
engine=None,
part_size=None,
part_mem_fraction=None,
storage_options=None,
dtypes=None,
**kwargs,
):
self.dtypes = dtypes
if isinstance(path_or_source, (dask.dataframe.DataFrame, cudf.DataFrame, pd.DataFrame)):
# User is passing in a <dask.dataframe|cudf|pd>.DataFrame
# Use DataFrameDatasetEngine
if isinstance(path_or_source, cudf.DataFrame):
path_or_source = dask_cudf.from_cudf(path_or_source, npartitions=1)
elif isinstance(path_or_source, pd.DataFrame):
path_or_source = dask_cudf.from_cudf(
cudf.from_pandas(path_or_source), npartitions=1
)
elif not isinstance(path_or_source, dask_cudf.DataFrame):
path_or_source = dask_cudf.from_dask_dataframe(path_or_source)
if part_size:
warnings.warn("part_size is ignored for DataFrame input.")
if part_mem_fraction:
warnings.warn("part_mem_fraction is ignored for DataFrame input.")
self.engine = DataFrameDatasetEngine(path_or_source)
else:
if part_size:
# If a specific partition size is given, use it directly
part_size = parse_bytes(part_size)
else:
# If a fractional partition size is given, calculate part_size
part_mem_fraction = part_mem_fraction or 0.125
assert part_mem_fraction > 0.0 and part_mem_fraction < 1.0
if part_mem_fraction > 0.25:
warnings.warn(
"Using very large partitions sizes for Dask. "
"Memory-related errors are likely."
)
part_size = int(device_mem_size(kind="total") * part_mem_fraction)
# Engine-agnostic path handling
paths = path_or_source
if hasattr(paths, "name"):
paths = stringify_path(paths)
if isinstance(paths, str):
paths = [paths]
storage_options = storage_options or {}
# If engine is not provided, try to infer from end of paths[0]
if engine is None:
engine = paths[0].split(".")[-1]
if isinstance(engine, str):
if engine == "parquet":
self.engine = ParquetDatasetEngine(
paths, part_size, storage_options=storage_options, **kwargs
)
elif engine == "csv":
self.engine = CSVDatasetEngine(
paths, part_size, storage_options=storage_options, **kwargs
)
else:
raise ValueError("Only parquet and csv supported (for now).")
else:
self.engine = engine(paths, part_size, storage_options=storage_options)
def to_ddf(self, columns=None, shuffle=False, seed=None):
""" Convert `Dataset` object to `dask_cudf.DataFrame`
Parameters
-----------
columns : str or list(str); default None
Columns to include in output `DataFrame`. If not specified,
the output will contain all known columns in the Dataset.
shuffle : bool; default False
Whether to shuffle the order of partitions in the output
`dask_cudf.DataFrame`. Note that this does not shuffle
the rows within each partition. This is because the data
is not actually loaded into memory for this operation.
seed : int; Optional
The random seed to use if `shuffle=True`. If nothing
is specified, the current system time will be used by the
`random` std library.
"""
# Use DatasetEngine to create ddf
ddf = self.engine.to_ddf(columns=columns)
# Shuffle the partitions of ddf (optional)
if shuffle and ddf.npartitions > 1:
parts = ddf.to_delayed()
random.seed(seed)
random.shuffle(parts)
ddf = dask_cudf.from_delayed(parts)
# Special dtype conversion (optional)
if self.dtypes:
_meta = _set_dtypes(ddf._meta, self.dtypes)
return ddf.map_partitions(_set_dtypes, self.dtypes, meta=_meta)
return ddf
def to_iter(self, columns=None, indices=None, shuffle=False, seed=None):
""" Convert `Dataset` object to a `cudf.DataFrame` iterator.
Note that this method will use `to_ddf` to produce a
`dask_cudf.DataFrame`, and materialize a single partition for
each iteration.
Parameters
-----------
columns : str or list(str); default None
Columns to include in each `DataFrame`. If not specified,
the outputs will contain all known columns in the Dataset.
indices : list(int); default None
A specific list of partition indices to iterate over. If
nothing is specified, all partitions will be returned in
order (or the shuffled order, if `shuffle=True`).
shuffle : bool; default False
Whether to shuffle the order of `dask_cudf.DataFrame`
partitions used by the iterator. If the `indices`
argument is specified, those indices correspond to the
partition indices AFTER the shuffle operation.
seed : int; Optional
The random seed to use if `shuffle=True`. If nothing
is specified, the current system time will be used by the
`random` std library.
"""
if isinstance(columns, str):
columns = [columns]
return DataFrameIter(
self.to_ddf(columns=columns, shuffle=shuffle, seed=seed), indices=indices
)
@property
def num_rows(self):
return self.engine.num_rows
class DatasetEngine:
""" DatasetEngine Class
Base class for Dask-powered IO engines. Engines must provide
a ``to_ddf`` method.
"""
def __init__(self, paths, part_size, storage_options=None):
paths = sorted(paths, key=natural_sort_key)
self.paths = paths
self.part_size = part_size
fs, fs_token, _ = get_fs_token_paths(paths, mode="rb", storage_options=storage_options)
self.fs = fs
self.fs_token = fs_token
def to_ddf(self, columns=None):
raise NotImplementedError(""" Return a dask_cudf.DataFrame """)
@property
def num_rows(self):
raise NotImplementedError(""" Returns the number of rows in the dataset """)
class ParquetDatasetEngine(DatasetEngine):
""" ParquetDatasetEngine
Dask-based version of cudf.read_parquet.
"""
def __init__(
self,
paths,
part_size,
storage_options,
row_groups_per_part=None,
legacy=False,
batch_size=None,
):
# TODO: Improve dask_cudf.read_parquet performance so that
# this class can be slimmed down.
super().__init__(paths, part_size, storage_options)
self.batch_size = batch_size
self._metadata, self._base = self.metadata
self._pieces = None
if row_groups_per_part is None:
file_path = self._metadata.row_group(0).column(0).file_path
path0 = (
self.fs.sep.join([self._base, file_path])
if file_path != ""
else self._base # This is a single file
)
if row_groups_per_part is None:
rg_byte_size_0 = (
cudf.io.read_parquet(path0, row_groups=0, row_group=0)
.memory_usage(deep=True, index=True)
.sum()
)
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
f"Row group size {rg_byte_size_0} is bigger than requested part_size "
f"{self.part_size}"
)
row_groups_per_part = 1.0
self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
@property
def pieces(self):
if self._pieces is None:
self._pieces = self._get_pieces(self._metadata, self._base)
return self._pieces
@property
@functools.lru_cache(1)
def metadata(self):
paths = self.paths
fs = self.fs
if len(paths) > 1:
# This is a list of files
dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)
base, fns = _analyze_paths(paths, fs)
elif fs.isdir(paths[0]):
# This is a directory
dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)
allpaths = fs.glob(paths[0] + fs.sep + "*")
base, fns = _analyze_paths(allpaths, fs)
else:
# This is a single file
dataset = pq.ParquetDataset(paths[0], filesystem=fs)
base = paths[0]
fns = [None]
metadata = None
if dataset.metadata:
# We have a metadata file
return dataset.metadata, base
else:
# Collect proper metadata manually
metadata = None
for piece, fn in zip(dataset.pieces, fns):
md = piece.get_metadata()
if fn:
md.set_file_path(fn)
if metadata:
metadata.append_row_groups(md)
else:
metadata = md
return metadata, base
@property
def num_rows(self):
metadata, _ = self.metadata
return metadata.num_rows
@annotate("get_pieces", color="green", domain="nvt_python")
def _get_pieces(self, metadata, data_path):
# get the number of row groups per file
file_row_groups = defaultdict(int)
for rg in range(metadata.num_row_groups):
fpath = metadata.row_group(rg).column(0).file_path
if fpath is None:
raise ValueError("metadata is missing file_path string.")
file_row_groups[fpath] += 1
# create pieces from each file, limiting the number of row_groups in each piece
pieces = []
for filename, row_group_count in file_row_groups.items():
row_groups = range(row_group_count)
for i in range(0, row_group_count, self.row_groups_per_part):
rg_list = list(row_groups[i : i + self.row_groups_per_part])
full_path = (
self.fs.sep.join([data_path, filename])
if filename != ""
else data_path # This is a single file
)
pieces.append((full_path, rg_list))
return pieces
@staticmethod
@annotate("read_piece", color="green", domain="nvt_python")
def read_piece(piece, columns):
path, row_groups = piece
return cudf.io.read_parquet(path, row_groups=row_groups, columns=columns, index=False)
def meta_empty(self, columns=None):
path, _ = self.pieces[0]
return cudf.io.read_parquet(path, row_groups=0, columns=columns, index=False).iloc[:0]
def to_ddf(self, columns=None):
pieces = self.pieces
name = "parquet-to-ddf-" + tokenize(self.fs_token, pieces, columns)
dsk = {
(name, p): (ParquetDatasetEngine.read_piece, piece, columns)
for p, piece in enumerate(pieces)
}
meta = self.meta_empty(columns=columns)
divisions = [None] * (len(pieces) + 1)
return new_dd_object(dsk, name, meta, divisions)
class CSVDatasetEngine(DatasetEngine):
""" CSVDatasetEngine
Thin wrapper around dask_cudf.read_csv.
"""
def __init__(self, paths, part_size, storage_options=None, **kwargs):
super().__init__(paths, part_size, storage_options)
self._meta = {}
self.csv_kwargs = kwargs
self.csv_kwargs["storage_options"] = storage_options
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
def to_ddf(self, columns=None):
if columns:
return dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)[
columns
]
return dask_cudf.read_csv(self.paths, chunksize=self.part_size, **self.csv_kwargs)
class DataFrameDatasetEngine(DatasetEngine):
""" DataFrameDatasetEngine
Allow NVT to interact with a dask_cudf.DataFrame object
in the same way as a dataset on disk.
"""
def __init__(self, ddf):
self._ddf = ddf
def to_ddf(self, columns=None):
if isinstance(columns, list):
return self._ddf[columns]
elif isinstance(columns, str):
return self._ddf[[columns]]
return self._ddf
@property
def num_rows(self):
return len(self._ddf)
class DataFrameIter:
def __init__(self, ddf, columns=None, indices=None):
self.indices = (
indices if isinstance(indices, list) and len(indices) > 0 else range(ddf.npartitions)
)
self._ddf = ddf
self.columns = columns
def __len__(self):
return len(self.indices)
def __iter__(self):
for i in self.indices:
part = self._ddf.get_partition(i)
if self.columns:
yield part[self.columns].compute(scheduler="synchronous")
else:
yield part.compute(scheduler="synchronous")
part = None
```
#### File: NVTabular/nvtabular/torch_dataloader.py
```python
import math
import queue
import threading
import cudf
import cupy as cp
import torch
from torch.utils.dlpack import from_dlpack
from nvtabular.io import _shuffle_gdf
from nvtabular.ops import _get_embedding_order
class TensorItr:
"""
Tensor dataset, for data already in tensor format.
(see preproc::ds_to_tensor)
Parameters
-----------
tensors : list of tensors
batch_size: the size of each batch to return.
pin_memory: allows pinning of cpu memory, if used.
shuffle: keyword to trigger the shuffle of batch
"""
def __init__(self, tensors, batch_size=1, pin_memory=False, shuffle=False):
self.tensors = tensors
self.batch_size = batch_size
self.num_samples = self.tensors[0].size(0)
if shuffle:
self.shuffle()
if pin_memory:
for tensor in self.tensors:
tensor.pin_memory()
def __len__(self):
if self.num_samples % self.batch_size == 0:
return self.num_samples // self.batch_size
else:
return self.num_samples // self.batch_size + 1
def __iter__(self):
for idx in range(0, self.num_samples, self.batch_size):
tens = [tensor[idx : idx + self.batch_size] for tensor in self.tensors]
yield tens[0], tens[1], tens[2]
del tens
def shuffle(self):
idx = torch.randperm(self.num_samples, dtype=torch.int64)
self.tensors = [tensor[idx] for tensor in self.tensors]
def _to_tensor(gdf: cudf.DataFrame, dtype, to_cpu=False):
if gdf.empty:
return
g = gdf.to_dlpack()
cols = gdf.columns
t = from_dlpack(g).type(dtype)
del g, gdf
return t, cols
def create_tensors(gdf, cat_names=None, cont_names=None, label_names=None):
gdf_cats, gdf_conts, gdf_label = (
gdf[_get_embedding_order(cat_names)],
gdf[cont_names],
gdf[label_names],
)
del gdf
if len(gdf_cats) > 0:
cats = _to_tensor(gdf_cats, torch.long, to_cpu=False)
if len(gdf_conts) > 0:
conts = _to_tensor(gdf_conts, torch.float32, to_cpu=False)
if len(gdf_label) > 0:
label = _to_tensor(gdf_label, torch.float32, to_cpu=False)
del gdf_cats, gdf_conts, gdf_label
return [cats[0], conts[0], label[0]]
def _get_final_cols(preproc):
if "cols" not in preproc.columns_ctx["final"]:
preproc.create_final_cols()
cat_names = _get_embedding_order(preproc.columns_ctx["final"]["cols"]["categorical"])
cont_names = sorted(preproc.columns_ctx["final"]["cols"]["continuous"])
label_name = sorted(preproc.columns_ctx["final"]["cols"]["label"])
return cat_names, cont_names, label_name
class ChunkQueue:
def __init__(
self,
num_chunks=2,
batch_size=None,
iterator=None,
shuffle=False,
cat_cols=None,
cont_cols=None,
label_cols=None,
):
self.num_chunks = num_chunks
self.itr = iterator
self.batch_size = batch_size
self.q_out = queue.Queue(1)
self.cat_cols = cat_cols
self.cont_cols = cont_cols
self.label_cols = label_cols
self.shuffle = shuffle
self.stopped = False
def get(self):
return self.q_out.get()
def batch(self):
current = []
for value in self.itr:
current.append(value)
if len(current) == self.num_chunks:
yield current
current = []
if len(current) > 0:
yield current
def load_chunks(self):
spill = None
for chunks in self.batch():
if self.stopped:
return
if spill and not spill.empty:
chunks.insert(0, spill)
chunks = cudf.core.reshape.concat(chunks)
chunks.reset_index(drop=True, inplace=True)
chunks, spill = self.get_batch_div_chunk(chunks)
if self.shuffle:
_shuffle_gdf(chunks)
if len(chunks) > 0:
chunks = create_tensors(
chunks,
cat_names=self.cat_cols,
cont_names=self.cont_cols,
label_names=self.label_cols,
)
# chunks tensorized
self.q_out.put(chunks)
chunks = None
# takes care final batch, which is less than batch size
if spill:
spill = create_tensors(
spill,
cat_names=self.cat_cols,
cont_names=self.cont_cols,
label_names=self.label_cols,
)
self.q_out.put(spill)
spill = None
self.q_out.put("end")
# For when an iterator is stopped before iteration is complete.
def stop(self):
self.stopped = True
self.q_out.queue.clear()
def get_batch_div_chunk(self, chunks):
spill_idx = int(chunks.shape[0] / self.batch_size) * self.batch_size
spill = cudf.DataFrame(chunks.iloc[spill_idx:])
chunks = cudf.DataFrame(chunks.iloc[:spill_idx])
if not chunks.empty:
chunks.reset_index(drop=True, inplace=True)
if not spill.empty:
spill.reset_index(drop=True, inplace=True)
return chunks, spill
class AsyncIterator:
"""
This class serves as the iterator class for the AsyncTensorBatchDatasetItr. This will control
iteration and allow for clean up after iteration is complete. Without requiring the destruction
of the Parent class.
"""
def __init__(
self, dataset=None, cats=None, conts=None, labels=None, batch_size=1, shuffle=False
):
self.buff = ChunkQueue(
iterator=TorchTensorBatchDatasetItr(dataset, shuffle=shuffle),
batch_size=batch_size,
cat_cols=cats,
cont_cols=conts,
label_cols=labels,
shuffle=shuffle,
)
def __iter__(self):
t1 = threading.Thread(target=self.buff.load_chunks)
t1.daemon = True
t1.start()
while True:
chunk = self.buff.get()
if isinstance(chunk, str):
break
yield from TensorItr(chunk, batch_size=self.buff.batch_size)
chunk = None
def __del__(self):
self.buff.stop()
class AsyncTensorBatchDatasetItr(torch.utils.data.IterableDataset):
def __init__(self, dataset, cats=None, conts=None, labels=None, batch_size=1, shuffle=False):
self.batch_size = batch_size
self.cats = cats
self.conts = conts
self.labels = labels
self.shuffle = shuffle
self.data = dataset
def __iter__(self):
return iter(
AsyncIterator(
dataset=self.data,
batch_size=self.batch_size,
cats=self.cats,
conts=self.conts,
labels=self.labels,
shuffle=self.shuffle,
)
)
def __len__(self):
return math.ceil(self.data.num_rows / self.batch_size)
class TorchTensorBatchDatasetItr(torch.utils.data.IterableDataset):
"""
For Torch Only:
Batch Tensor dataset, takes in list of files
and creates TorchTensorBatchFileItr for each
path supplied, supplying user defined size chunks.
Parameters
-----------
paths : list of input files that represent complete dataset
"""
def __init__(self, dataset, shuffle=None, **kwargs):
self.data = dataset
self.indices = cp.arange(dataset.to_ddf().npartitions)
if shuffle:
self.indices = cp.random.shuffle(self.indices)
def __iter__(self):
indices = self.gather_indices()
yield from self.data.to_iter(indices=indices)
def gather_indices(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
return self.indices
else:
per_worker = int(len(self.indices) // float(worker_info.num_workers)) + 1
worker_id = worker_info.id
start = worker_id * per_worker
return self.indices[start : start + per_worker]
def __len__(self):
return self.data.num_rows
class DLDataLoader(torch.utils.data.DataLoader):
def __len__(self):
return len(self.dataset)
``` |
{
"source": "jperez999/systems-1",
"score": 2
} |
#### File: dag/ops/faiss.py
```python
import json
import os
from shutil import copy2
import faiss
import numpy as np
from merlin.dag import ColumnSelector
from merlin.schema import ColumnSchema, Schema
from merlin.systems.dag.ops.operator import InferenceDataFrame, PipelineableInferenceOperator
class QueryFaiss(PipelineableInferenceOperator):
def __init__(self, index_path, topk=10):
self.index_path = str(index_path)
self.topk = topk
self._index = None
super().__init__()
@classmethod
def from_config(cls, config):
parameters = json.loads(config.get("params", ""))
index_path = parameters["index_path"]
topk = parameters["topk"]
operator = QueryFaiss(index_path, topk=topk)
operator._index = faiss.read_index(str(index_path))
return operator
def export(self, path, input_schema, output_schema, params=None, node_id=None, version=1):
params = params or {}
# TODO: Copy the index into the export directory
self_params = {
# TODO: Write the (relative) path from inside the export directory
"index_path": self.index_path,
"topk": self.topk,
}
self_params.update(params)
index_filename = os.path.basename(os.path.realpath(self.index_path))
# set index path to new path after export
new_index_path = os.path.join(
path, f"{node_id}_{QueryFaiss.__name__.lower()}", str(version), index_filename
)
copy2(self.index_path, new_index_path)
self.index_path = new_index_path
return super().export(path, input_schema, output_schema, self_params, node_id, version)
def transform(self, df: InferenceDataFrame):
user_vector = list(df.tensors.values())[0]
_, indices = self._index.search(user_vector, self.topk)
# distances, indices = self.index.search(user_vector, self.topk)
candidate_ids = np.array(indices).T.astype(np.int32)
return InferenceDataFrame({"candidate_ids": candidate_ids})
def compute_input_schema(
self,
root_schema: Schema,
parents_schema: Schema,
deps_schema: Schema,
selector: ColumnSelector,
) -> Schema:
input_schema = super().compute_input_schema(
root_schema, parents_schema, deps_schema, selector
)
if len(input_schema.column_schemas) > 1:
raise ValueError(
"More than one input has been detected for this node,"
/ f"inputs received: {input_schema.column_names}"
)
return input_schema
def compute_output_schema(
self, input_schema: Schema, col_selector: ColumnSelector, prev_output_schema: Schema = None
) -> Schema:
return Schema(
[
ColumnSchema("candidate_ids", dtype=np.int32),
]
)
def setup_faiss(item_vector, output_path):
index = faiss.IndexFlatL2(item_vector[0].shape[0])
index.add(item_vector)
faiss.write_index(index, str(output_path))
``` |
{
"source": "jperezdiaz/flintrock",
"score": 2
} |
#### File: flintrock/flintrock/flintrock.py
```python
import os
import posixpath
import errno
import json
import resource
import sys
import shutil
import textwrap
import urllib.parse
import urllib.request
import warnings
# External modules
import click
import yaml
# Flintrock modules
from . import ec2
from .exceptions import (
UsageError,
UnsupportedProviderError,
NothingToDo,
Error)
from flintrock import __version__
from .services import HDFS, Spark # TODO: Remove this dependency.
FROZEN = getattr(sys, 'frozen', False)
if FROZEN:
THIS_DIR = sys._MEIPASS
else:
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def format_message(*, message: str, indent: int=4, wrap: int=70):
"""
Format a lengthy message for printing to screen.
"""
return textwrap.indent(
textwrap.fill(
textwrap.dedent(text=message),
width=wrap),
prefix=' ' * indent)
def option_name_to_variable_name(option: str):
"""
Convert an option name like `--ec2-user` to the Python name it gets mapped to,
like `ec2_user`.
"""
return option.replace('--', '', 1).replace('-', '_')
def variable_name_to_option_name(variable: str):
"""
Convert a variable name like `ec2_user` to the Click option name it gets mapped to,
like `--ec2-user`.
"""
return '--' + variable.replace('_', '-')
def option_requires(
*,
option: str,
conditional_value=None,
requires_all: list=[],
requires_any: list=[],
scope: dict):
"""
Raise an exception if an option's requirements are not met.
The option's requirements are checked only if the option has a "truthy" value
(i.e. it's not a "falsy" value like '', None, or False), and if its value is
equal to conditional_value, if conditional_value is not None.
requires_all: Every option in this list must be defined.
requires_any: At least one option in this list must be defined.
This function looks for values by converting the option names to their
corresponding variable names (e.g. --option-a becomes option_a) and looking them
up in the provided scope.
"""
option_value = scope[option_name_to_variable_name(option)]
if option_value and \
(conditional_value is None or option_value == conditional_value):
if requires_all:
for required_option in requires_all:
required_name = option_name_to_variable_name(required_option)
if required_name not in scope or not scope[required_name]:
raise UsageError(
"Error: Missing option \"{missing_option}\" is required by "
"\"{option}{space}{conditional_value}\"."
.format(
missing_option=required_option,
option=option,
space=' ' if conditional_value is not None else '',
conditional_value=conditional_value if conditional_value is not None else ''))
if requires_any:
for required_option in requires_any:
required_name = option_name_to_variable_name(required_option)
if required_name in scope and scope[required_name] is not None:
break
else:
raise UsageError(
"Error: \"{option}{space}{conditional_value}\" requires at least "
"one of the following options to be set: {at_least}"
.format(
option=option,
space=' ' if conditional_value is not None else '',
conditional_value=conditional_value if conditional_value is not None else '',
at_least=', '.join(['"' + ra + '"' for ra in requires_any])))
def mutually_exclusive(*, options: list, scope: dict):
"""
Raise an exception if more than one of the provided options is specified.
This function looks for values by converting the option names to their
corresponding variable names (e.g. --option-a becomes option_a) and looking them
up in the provided scope.
"""
mutually_exclusive_names = [option_name_to_variable_name(o) for o in options]
used_options = set()
for name, value in scope.items():
if name in mutually_exclusive_names and scope[name]: # is not None:
used_options.add(name)
if len(used_options) > 1:
bad_option1 = used_options.pop()
bad_option2 = used_options.pop()
raise UsageError(
"Error: \"{option1}\" and \"{option2}\" are mutually exclusive.\n"
" {option1}: {value1}\n"
" {option2}: {value2}"
.format(
option1=variable_name_to_option_name(bad_option1),
value1=scope[bad_option1],
option2=variable_name_to_option_name(bad_option2),
value2=scope[bad_option2]))
def get_config_file() -> str:
"""
Get the path to Flintrock's default configuration file.
"""
config_dir = click.get_app_dir(app_name='Flintrock')
config_file = os.path.join(config_dir, 'config.yaml')
return config_file
@click.group()
@click.option('--config', default=get_config_file())
@click.option('--provider', default='ec2', type=click.Choice(['ec2']))
@click.version_option(version=__version__)
@click.pass_context
def cli(cli_context, config, provider):
"""
Flintrock
A command-line tool and library for launching Apache Spark clusters.
"""
cli_context.obj['provider'] = provider
if os.path.isfile(config):
with open(config) as f:
config_raw = yaml.safe_load(f)
config_map = config_to_click(normalize_keys(config_raw))
cli_context.default_map = config_map
else:
if config != get_config_file():
raise FileNotFoundError(errno.ENOENT, 'No such file', config)
@cli.command()
@click.argument('cluster-name')
@click.option('--num-slaves', type=int, required=True)
@click.option('--install-hdfs/--no-install-hdfs', default=False)
@click.option('--hdfs-version')
@click.option('--hdfs-download-source',
help="URL to download Hadoop from.",
default='http://www.apache.org/dyn/closer.lua/hadoop/common/hadoop-{v}/hadoop-{v}.tar.gz?as_json',
show_default=True)
@click.option('--install-spark/--no-install-spark', default=True)
@click.option('--spark-version',
help="Spark release version to install.")
@click.option('--spark-git-commit',
help="Git commit to build Spark from. "
"Set to 'latest' to build Spark from the latest commit on the "
"repository's default branch.")
@click.option('--spark-git-repository',
help="Git repository to clone Spark from.",
default='https://github.com/apache/spark',
show_default=True)
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-key-name')
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-instance-type', default='m3.medium', show_default=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
# We set some of these defaults to empty strings because of boto3's parameter validation.
# See: https://github.com/boto/boto3/issues/400
@click.option('--ec2-availability-zone', default='')
@click.option('--ec2-ami')
@click.option('--ec2-user')
@click.option('--ec2-spot-price', type=float)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-subnet-id', default='')
@click.option('--ec2-instance-profile-name', default='')
@click.option('--ec2-placement-group', default='')
@click.option('--ec2-tenancy', default='default')
@click.option('--ec2-ebs-optimized/--no-ec2-ebs-optimized', default=False)
@click.option('--ec2-instance-initiated-shutdown-behavior', default='stop',
type=click.Choice(['stop', 'terminate']))
@click.pass_context
def launch(
cli_context,
cluster_name,
num_slaves,
install_hdfs,
hdfs_version,
hdfs_download_source,
install_spark,
spark_version,
spark_git_commit,
spark_git_repository,
assume_yes,
ec2_key_name,
ec2_identity_file,
ec2_instance_type,
ec2_region,
ec2_availability_zone,
ec2_ami,
ec2_user,
ec2_spot_price,
ec2_vpc_id,
ec2_subnet_id,
ec2_instance_profile_name,
ec2_placement_group,
ec2_tenancy,
ec2_ebs_optimized,
ec2_instance_initiated_shutdown_behavior):
"""
Launch a new cluster.
"""
provider = cli_context.obj['provider']
services = []
option_requires(
option='--install-hdfs',
requires_all=['--hdfs-version'],
scope=locals())
option_requires(
option='--install-spark',
requires_any=[
'--spark-version',
'--spark-git-commit'],
scope=locals())
mutually_exclusive(
options=[
'--spark-version',
'--spark-git-commit'],
scope=locals())
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-key-name',
'--ec2-identity-file',
'--ec2-instance-type',
'--ec2-region',
'--ec2-ami',
'--ec2-user'],
scope=locals())
# The subnet is required for non-default VPCs because EC2 does not
# support user-defined default subnets.
# See: https://forums.aws.amazon.com/thread.jspa?messageID=707417
# https://github.com/mitchellh/packer/issues/1935#issuecomment-111235752
option_requires(
option='--ec2-vpc-id',
requires_all=['--ec2-subnet-id'],
scope=locals())
if install_hdfs:
hdfs = HDFS(version=hdfs_version, download_source=hdfs_download_source)
services += [hdfs]
if install_spark:
if spark_version:
spark = Spark(version=spark_version)
elif spark_git_commit:
print(
"Warning: Building Spark takes a long time. "
"e.g. 15-20 minutes on an m3.xlarge instance on EC2.")
if spark_git_commit == 'latest':
spark_git_commit = get_latest_commit(spark_git_repository)
print("Building Spark at latest commit: {c}".format(c=spark_git_commit))
spark = Spark(
git_commit=spark_git_commit,
git_repository=spark_git_repository)
services += [spark]
if provider == 'ec2':
return ec2.launch(
cluster_name=cluster_name,
num_slaves=num_slaves,
services=services,
assume_yes=assume_yes,
key_name=ec2_key_name,
identity_file=ec2_identity_file,
instance_type=ec2_instance_type,
region=ec2_region,
availability_zone=ec2_availability_zone,
ami=ec2_ami,
user=ec2_user,
spot_price=ec2_spot_price,
vpc_id=ec2_vpc_id,
subnet_id=ec2_subnet_id,
instance_profile_name=ec2_instance_profile_name,
placement_group=ec2_placement_group,
tenancy=ec2_tenancy,
ebs_optimized=ec2_ebs_optimized,
instance_initiated_shutdown_behavior=ec2_instance_initiated_shutdown_behavior)
else:
raise UnsupportedProviderError(provider)
def get_latest_commit(github_repository: str):
"""
Get the latest commit on the default branch of a repository hosted on GitHub.
"""
parsed_url = urllib.parse.urlparse(github_repository)
repo_domain, repo_path = parsed_url.netloc, parsed_url.path.strip('/')
if repo_domain != 'github.com':
raise UsageError(
"Error: Getting the latest commit is only supported "
"for repositories hosted on GitHub. "
"Provided repository domain was: {d}".format(d=repo_domain))
url = "https://api.github.com/repos/{rp}/commits".format(rp=repo_path)
try:
with urllib.request.urlopen(url) as response:
result = json.loads(response.read().decode('utf-8'))
return result[0]['sha']
except Exception as e:
raise Exception(
"Could not get latest commit for repository: {r}"
.format(r=repo_path)) from e
@cli.command()
@click.argument('cluster-name')
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.pass_context
def destroy(cli_context, cluster_name, assume_yes, ec2_region, ec2_vpc_id):
"""
Destroy a cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to destroy this cluster?",
abort=True)
print("Destroying {c}...".format(c=cluster.name))
cluster.destroy()
@cli.command()
@click.argument('cluster-name', required=False)
@click.option('--master-hostname-only', is_flag=True, default=False)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.pass_context
def describe(
cli_context,
cluster_name,
master_hostname_only,
ec2_region,
ec2_vpc_id):
"""
Describe an existing cluster.
Leave out the cluster name to find all Flintrock-managed clusters.
The output of this command is both human- and machine-friendly. Full cluster
descriptions are output in YAML.
"""
provider = cli_context.obj['provider']
search_area = ""
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if cluster_name:
cluster_names = [cluster_name]
else:
cluster_names = []
if provider == 'ec2':
search_area = "in region {r}".format(r=ec2_region)
clusters = ec2.get_clusters(
cluster_names=cluster_names,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if cluster_name:
cluster = clusters[0]
if master_hostname_only:
print(cluster.master_host)
else:
cluster.print()
else:
if master_hostname_only:
for cluster in sorted(clusters, key=lambda x: x.name):
print(cluster.name + ':', cluster.master_host)
else:
print("Found {n} cluster{s}{space}{search_area}.".format(
n=len(clusters),
s='' if len(clusters) == 1 else 's',
space=' ' if search_area else '',
search_area=search_area))
if clusters:
print('---')
for cluster in sorted(clusters, key=lambda x: x.name):
cluster.print()
# TODO: Provide different command or option for going straight to Spark Shell. (?)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
# TODO: Move identity-file to global, non-provider-specific option. (?)
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def login(cli_context, cluster_name, ec2_region, ec2_vpc_id, ec2_identity_file, ec2_user):
"""
Login to the master of an existing cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
# TODO: Check that master up first and error out cleanly if not
# via ClusterInvalidState.
cluster.login(user=user, identity_file=identity_file)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
# TODO: Move identity-file to global, non-provider-specific option. (?)
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def start(cli_context, cluster_name, ec2_region, ec2_vpc_id, ec2_identity_file, ec2_user):
"""
Start an existing, stopped cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.start_check()
print("Starting {c}...".format(c=cluster_name))
cluster.start(user=user, identity_file=identity_file)
@cli.command()
@click.argument('cluster-name')
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--assume-yes/--no-assume-yes', default=False)
@click.pass_context
def stop(cli_context, cluster_name, ec2_region, ec2_vpc_id, assume_yes):
"""
Stop an existing, running cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
cluster.stop_check()
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to stop this cluster?",
abort=True)
print("Stopping {c}...".format(c=cluster_name))
cluster.stop()
print("{c} is now stopped.".format(c=cluster_name))
@cli.command(name='run-command')
@click.argument('cluster-name')
@click.argument('command', nargs=-1)
@click.option('--master-only', help="Run on the master only.", is_flag=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.pass_context
def run_command(
cli_context,
cluster_name,
command,
master_only,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user):
"""
Run a shell command on a cluster.
Examples:
flintrock run-command my-cluster 'touch /tmp/flintrock'
flintrock run-command my-cluster -- yum install -y package
Flintrock will return a non-zero code if any of the cluster nodes raises an error
while running the command.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.run_command_check()
print("Running command on {target}...".format(
target="master only" if master_only else "cluster"))
cluster.run_command(
command=command,
master_only=master_only,
user=user,
identity_file=identity_file)
@cli.command(name='copy-file')
@click.argument('cluster-name')
@click.argument('local_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('remote_path', type=click.Path())
@click.option('--master-only', help="Copy to the master only.", is_flag=True)
@click.option('--ec2-region', default='us-east-1', show_default=True)
@click.option('--ec2-vpc-id', default='', help="Leave empty for default VPC.")
@click.option('--ec2-identity-file',
type=click.Path(exists=True, dir_okay=False),
help="Path to SSH .pem file for accessing nodes.")
@click.option('--ec2-user')
@click.option('--assume-yes/--no-assume-yes', default=False, help="Prompt before large uploads.")
@click.pass_context
def copy_file(
cli_context,
cluster_name,
local_path,
remote_path,
master_only,
ec2_region,
ec2_vpc_id,
ec2_identity_file,
ec2_user,
assume_yes):
"""
Copy a local file up to a cluster.
This will copy the file to the same path on each node of the cluster.
Examples:
flintrock copy-file my-cluster /tmp/file.102.txt /tmp/file.txt
flintrock copy-file my-cluster /tmp/spark-defaults.conf /tmp/
Flintrock will return a non-zero code if any of the cluster nodes raises an error.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
# We assume POSIX for the remote path since Flintrock
# only supports clusters running CentOS / Amazon Linux.
if not posixpath.basename(remote_path):
remote_path = posixpath.join(remote_path, os.path.basename(local_path))
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.copy_file_check()
if not assume_yes and not master_only:
file_size_bytes = os.path.getsize(local_path)
num_nodes = len(cluster.slave_ips) + 1 # TODO: cluster.num_nodes
total_size_bytes = file_size_bytes * num_nodes
if total_size_bytes > 10 ** 6:
print("WARNING:")
print(
format_message(
message="""\
You are trying to upload {total_size} bytes ({size} bytes x {count}
nodes in {cluster}). Depending on your upload bandwidth, this may take
a long time.
You may be better off uploading this file to a storage service like
Amazon S3 and downloading it from there to the cluster using
`flintrock run-command ...`.
""".format(
size=file_size_bytes,
count=num_nodes,
cluster=cluster_name,
total_size=total_size_bytes),
wrap=60))
click.confirm(
text="Are you sure you want to continue?",
default=True,
abort=True)
print("Copying file to {target}...".format(
target="master only" if master_only else "cluster"))
cluster.copy_file(
local_path=local_path,
remote_path=remote_path,
master_only=master_only,
user=user,
identity_file=identity_file)
def normalize_keys(obj):
"""
Used to map keys from config files to Python parameter names.
"""
if type(obj) != dict:
return obj
else:
return {k.replace('-', '_'): normalize_keys(v) for k, v in obj.items()}
def config_to_click(config: dict) -> dict:
"""
Convert a dictionary of configurations loaded from a Flintrock config file
to a dictionary that Click can use to set default options.
"""
service_configs = {}
if 'modules' in config:
print(
"WARNING: The name `modules` is deprecated and will be removed "
"in the next version of Flintrock.\n"
"Please update your config file to use `services` instead of `modules`.\n"
"You can do this by calling `flintrock configure`.")
config['services'] = config['modules']
if 'services' in config:
for service in config['services']:
if config['services'][service]:
service_configs.update(
{service + '_' + k: v for (k, v) in config['services'][service].items()})
ec2_configs = {
'ec2_' + k: v for (k, v) in config['providers']['ec2'].items()}
click_map = {
'launch': dict(
list(config['launch'].items()) +
list(ec2_configs.items()) +
list(service_configs.items())),
'describe': ec2_configs,
'destroy': ec2_configs,
'login': ec2_configs,
'start': ec2_configs,
'stop': ec2_configs,
'run-command': ec2_configs,
'copy-file': ec2_configs,
}
return click_map
@cli.command()
@click.option('--locate', is_flag=True, default=False,
help="Don't open an editor. "
"Just open the folder containing the configuration file.")
@click.pass_context
def configure(cli_context, locate):
"""
Configure Flintrock's defaults.
This will open Flintrock's configuration file in your default YAML editor so
you can set your defaults.
"""
config_file = get_config_file()
if not os.path.isfile(config_file):
print("Initializing config file from template...")
os.makedirs(os.path.dirname(config_file), exist_ok=True)
shutil.copyfile(
src=os.path.join(THIS_DIR, 'config.yaml.template'),
dst=config_file)
os.chmod(config_file, mode=0o644)
click.launch(config_file, locate=locate)
def flintrock_is_in_development_mode() -> bool:
"""
Check if Flintrock was installed in development mode.
Use this function to toggle behavior that only Flintrock developers should
see.
"""
# This esoteric technique was pulled from pip.
# See: https://github.com/pypa/pip/pull/3258/files#diff-ab583908279e865537dec218246edcfcR310
for path_item in sys.path:
egg_link = os.path.join(path_item, 'Flintrock.egg-link')
if os.path.isfile(egg_link):
return True
else:
return False
def set_open_files_limit(desired_limit):
"""
On POSIX systems, set the open files limit to the desired number, unless
it is already equal to or higher than that.
Setting a high limit enables Flintrock to launch or interact with really
large clusters.
Background discussion: https://github.com/nchammas/flintrock/issues/81
"""
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft_limit < desired_limit:
if desired_limit > hard_limit:
warnings.warn(
"Flintrock cannot set the open files limit to {l} "
"because the OS hard limit is {h}. Going with {h}. "
"You may have problems launching or interacting with "
"really large clusters."
.format(
l=desired_limit,
h=hard_limit),
category=RuntimeWarning,
stacklevel=2)
resource.setrlimit(
resource.RLIMIT_NOFILE,
(min(desired_limit, hard_limit), hard_limit))
def main() -> int:
if flintrock_is_in_development_mode():
warnings.simplefilter(action='always', category=DeprecationWarning)
# warnings.simplefilter(action='always', category=ResourceWarning)
set_open_files_limit(4096)
try:
# We pass in obj so we can add attributes to it, like provider, which
# get shared by all commands.
# See: http://click.pocoo.org/6/api/#click.Context
cli(obj={})
except NothingToDo as e:
print(e)
return 0
except UsageError as e:
print(e, file=sys.stderr)
return 2
except Error as e:
print(e, file=sys.stderr)
return 1
```
#### File: flintrock/tests/test_acceptance.py
```python
import json
import subprocess
import urllib.request
# Flintrock modules
from flintrock.exceptions import ClusterInvalidState
def test_describe_stopped_cluster(stopped_cluster):
p = subprocess.run([
'flintrock', 'describe', stopped_cluster],
stdout=subprocess.PIPE)
assert p.returncode == 0
assert p.stdout.startswith(stopped_cluster.encode())
def test_stop_stopped_cluster(stopped_cluster):
p = subprocess.run([
'flintrock', 'stop', stopped_cluster],
stdout=subprocess.PIPE)
assert p.returncode == 0
assert p.stdout == b"Cluster is already stopped.\n"
def test_try_launching_duplicate_stopped_cluster(stopped_cluster):
p = subprocess.run([
'flintrock', 'launch', stopped_cluster],
stderr=subprocess.PIPE)
assert p.returncode == 1
assert p.stderr.decode('utf-8').startswith(
"Cluster {c} already exists".format(c=stopped_cluster))
def test_start_running_cluster(running_cluster):
p = subprocess.run([
'flintrock', 'start', running_cluster],
stdout=subprocess.PIPE)
assert p.returncode == 0
assert p.stdout == b"Cluster is already running.\n"
def test_try_launching_duplicate_cluster(running_cluster):
p = subprocess.run([
'flintrock', 'launch', running_cluster],
stderr=subprocess.PIPE)
assert p.returncode == 1
assert p.stderr.decode('utf-8').startswith(
"Cluster {c} already exists".format(c=running_cluster))
def test_describe_running_cluster(running_cluster):
p = subprocess.run([
'flintrock', 'describe', running_cluster],
stdout=subprocess.PIPE)
assert p.returncode == 0
assert p.stdout.startswith(running_cluster.encode())
def test_run_command_on_running_cluster(running_cluster):
p = subprocess.run([
'flintrock', 'run-command', running_cluster, '--', 'ls', '-l'])
assert p.returncode == 0
def test_copy_file_on_running_cluster(running_cluster, local_file):
p = subprocess.run([
'flintrock', 'copy-file', running_cluster, local_file, '/tmp/copied_from_local'])
assert p.returncode == 0
def test_hdfs_on_running_cluster(running_cluster, remote_file):
hdfs_path = '/hdfs_file'
p = subprocess.run([
'flintrock', 'run-command', running_cluster, '--master-only', '--',
'./hadoop/bin/hdfs', 'dfs', '-put', remote_file, hdfs_path])
assert p.returncode == 0
p = subprocess.run([
'flintrock', 'run-command', running_cluster, '--',
'./hadoop/bin/hdfs', 'dfs', '-cat', hdfs_path])
assert p.returncode == 0
def test_spark_on_running_cluster(running_cluster, remote_file):
# TODO: Run a real query; e.g. sc.parallelize(range(10)).count()
p = subprocess.run([
'flintrock', 'run-command', running_cluster, '--',
'./spark/bin/pyspark', '--help'])
assert p.returncode == 0
p = subprocess.run([
'flintrock', 'describe', running_cluster, '--master-hostname-only'],
stdout=subprocess.PIPE)
master_address = p.stdout.strip().decode('utf-8')
assert p.returncode == 0
spark_master_ui = 'http://{m}:8080/json/'.format(m=master_address)
spark_ui_info = json.loads(
urllib.request.urlopen(spark_master_ui).read().decode('utf-8'))
assert spark_ui_info['status'] == 'ALIVE'
def test_operations_against_non_existent_cluster():
cluster_name = 'this_cluster_doesnt_exist_yo'
expected_error_message = (
b"No cluster " + cluster_name.encode('utf-8') + b" in region ")
for command in ['describe', 'stop', 'start', 'login', 'destroy']:
p = subprocess.run(
['flintrock', command, cluster_name],
stderr=subprocess.PIPE)
assert p.returncode == 1
assert p.stderr.startswith(expected_error_message)
for command in ['run-command']:
p = subprocess.run(
['flintrock', command, cluster_name, 'ls'],
stderr=subprocess.PIPE)
assert p.returncode == 1
assert p.stderr.startswith(expected_error_message)
for command in ['copy-file']:
p = subprocess.run(
['flintrock', command, cluster_name, __file__, '/remote/path'],
stderr=subprocess.PIPE)
assert p.returncode == 1
assert p.stderr.startswith(expected_error_message)
def test_operations_against_stopped_cluster(stopped_cluster):
p = subprocess.run(
['flintrock', 'run-command', stopped_cluster, 'ls'],
stderr=subprocess.PIPE)
expected_error_message = str(
ClusterInvalidState(
attempted_command='run-command',
state='stopped'))
assert p.returncode == 1
assert p.stderr.decode('utf-8').strip() == expected_error_message
p = subprocess.run(
['flintrock', 'copy-file', stopped_cluster, __file__, '/remote/path'],
stderr=subprocess.PIPE)
expected_error_message = str(
ClusterInvalidState(
attempted_command='copy-file',
state='stopped'))
assert p.returncode == 1
assert p.stderr.decode('utf-8').strip() == expected_error_message
def test_launch_with_bad_ami():
p = subprocess.run([
'flintrock', 'launch', 'whatever-cluster',
'--ec2-ami', 'ami-badbad00'],
stderr=subprocess.PIPE)
assert p.returncode == 1
assert p.stderr.startswith(b"Error: Could not find")
```
#### File: flintrock/tests/test_static.py
```python
import compileall
import os
# External modules
import pep8
import yaml
FLINTROCK_ROOT_DIR = (
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))))
TEST_TARGETS = [
'setup.py',
'flintrock/',
'tests/']
TEST_PATHS = [
os.path.join(FLINTROCK_ROOT_DIR, path) for path in TEST_TARGETS]
def test_code_compiles():
for path in TEST_PATHS:
if os.path.isdir(path):
result = compileall.compile_dir(path)
else:
result = compileall.compile_file(path)
# NOTE: This is not publicly documented, but a return of 1 means
# the compilation succeeded.
# See: http://bugs.python.org/issue25768
assert result == 1
def test_pep8_compliance():
style = pep8.StyleGuide(
config_file=os.path.join(FLINTROCK_ROOT_DIR, 'tox.ini'))
result = style.check_files(TEST_PATHS)
assert result.total_errors == 0
def test_config_template_is_valid():
config_template = os.path.join(FLINTROCK_ROOT_DIR, 'flintrock', 'config.yaml.template')
with open(config_template) as f:
yaml.safe_load(f)
``` |
{
"source": "jperezlapillo/Wave-U-Net",
"score": 3
} |
#### File: Wave-U-Net/Models/Mhe.py
```python
import tensorflow as tf
import math
"""
This script has been adapted from Liu et al. (2019)
https://github.com/wy1iu/MHE
"""
def add_thomson_constraint(W, n_filt, model, power):
"""
MHE implementation for hidden layers
:param input: a weights tensor with shape [filter_size, num_channels, num_filters]
:param model: indicates MHE model to use (standard or half-space)
:param power: alternatives for power-s parameter, Euclidean or angular distances
:return: adds the calculated thompson loss for the current layer to a tf collection
"""
W = tf.reshape(W, [-1, n_filt])
if model =='half_mhe':
W_neg = W*-1
W = tf.concat((W,W_neg), axis=1)
n_filt *= 2
W_norm = tf.sqrt(tf.reduce_sum(W*W, [0], keepdims=True) + 1e-4) # it was originally keep_dims
norm_mat = tf.matmul(tf.transpose(W_norm), W_norm)
inner_pro = tf.matmul(tf.transpose(W), W)
inner_pro /= norm_mat
if power =='0':
cross_terms = 2.0 - 2.0 * inner_pro
final = -tf.log(cross_terms + tf.diag([1.0] * n_filt))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_loss = 1 * tf.reduce_sum(final) / cnt
elif power =='1':
cross_terms = (2.0 - 2.0 * inner_pro + tf.diag([1.0] * n_filt))
final = tf.pow(cross_terms, tf.ones_like(cross_terms) * (-0.5))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_loss = 1 * tf.reduce_sum(final) / cnt
elif power =='2':
cross_terms = (2.0 - 2.0 * inner_pro + tf.diag([1.0] * n_filt))
final = tf.pow(cross_terms, tf.ones_like(cross_terms) * (-1))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_loss = 1* tf.reduce_sum(final) / cnt
elif power =='a0':
acos = tf.acos(inner_pro)/math.pi
acos += 1e-4
final = -tf.log(acos)
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_loss = 1* tf.reduce_sum(final) / cnt
elif power =='a1':
acos = tf.acos(inner_pro)/math.pi
acos += 1e-4
final = tf.pow(acos, tf.ones_like(acos) * (-1))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_loss = 1e-1 * tf.reduce_sum(final) / cnt
elif power =='a2':
acos = tf.acos(inner_pro)/math.pi
acos += 1e-4
final = tf.pow(acos, tf.ones_like(acos) * (-2))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_loss = 1e-1 * tf.reduce_sum(final) / cnt
tf.add_to_collection('thomson_loss', th_loss)
def add_thomson_constraint_final(W, n_filt, power):
"""
MHE implementation for output layer
:param input: a weights tensor with shape [filter_size, num_channels, num_filters]
:param power: alternatives for power-s parameter, Euclidean or angular distances
:return: adds the calculated thompson loss for the current layer to a tf collection
"""
W = tf.reshape(W, [-1, n_filt])
W_norm = tf.sqrt(tf.reduce_sum(W*W, [0], keepdims=True) + 1e-4) # it was originally keep_dims
norm_mat = tf.matmul(tf.transpose(W_norm), W_norm)
inner_pro = tf.matmul(tf.transpose(W), W)
inner_pro /= norm_mat
if power =='0':
cross_terms = 2.0 - 2.0 * inner_pro
final = -tf.log(cross_terms + tf.diag([1.0] * n_filt))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_final = 10 * tf.reduce_sum(final) / cnt
elif power =='1':
cross_terms = (2.0 - 2.0 * inner_pro + tf.diag([1.0] * n_filt))
final = tf.pow(cross_terms, tf.ones_like(cross_terms) * (-0.5))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_final = 10 * tf.reduce_sum(final) / cnt
elif power =='2':
cross_terms = (2.0 - 2.0 * inner_pro + tf.diag([1.0] * n_filt))
final = tf.pow(cross_terms, tf.ones_like(cross_terms) * (-1))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_final = 10 * tf.reduce_sum(final) / cnt
elif power =='a0':
acos = tf.acos(inner_pro)/math.pi
acos += 1e-4
final = -tf.log(acos)
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_final = 10 * tf.reduce_sum(final) / cnt
elif power =='a1':
acos = tf.acos(inner_pro)/math.pi
acos += 1e-4
final = tf.pow(acos, tf.ones_like(acos) * (-1))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_final = 1 * tf.reduce_sum(final) / cnt
elif power =='a2':
acos = tf.acos(inner_pro)/math.pi
acos += 1e-4
final = tf.pow(acos, tf.ones_like(acos) * (-2))
final -= tf.matrix_band_part(final, -1, 0)
cnt = n_filt * (n_filt - 1) / 2.0
th_final = 1 * tf.reduce_sum(final) / cnt
tf.add_to_collection('thomson_final', th_final)
```
#### File: Wave-U-Net/Models/UnetAudioSeparator.py
```python
import tensorflow as tf
import Models.InterpolationLayer
import Utils
from Utils import LeakyReLU
import numpy as np
import Models.OutputLayer
import Models.Mhe
from Models.Mhe import add_thomson_constraint
class UnetAudioSeparator:
'''
U-Net separator network for singing voice separation.
Uses valid convolutions, so it predicts for the centre part of the input - only certain input and output shapes are therefore possible (see getpadding function)
'''
def __init__(self, model_config):
'''
Initialize U-net
:param num_layers: Number of down- and upscaling layers in the network
:param mhe: Indicates if MHE regularization will be applied
:param mhe_model: Indicates MHE model to use (standard or half-space)
:param mhe_power: Euclidean or angular distance for MHE
'''
self.num_layers = model_config["num_layers"]
self.num_initial_filters = model_config["num_initial_filters"]
self.filter_size = model_config["filter_size"]
self.merge_filter_size = model_config["merge_filter_size"]
self.input_filter_size = model_config["input_filter_size"]
self.output_filter_size = model_config["output_filter_size"]
self.upsampling = model_config["upsampling"]
self.output_type = model_config["output_type"]
self.context = model_config["context"]
self.padding = "VALID" if model_config["context"] else "SAME" # requires capital letters here
self.source_names = model_config["source_names"]
self.num_channels = 1 if model_config["mono_downmix"] else 2
self.output_activation = model_config["output_activation"]
self.mhe = model_config["mhe"]
self.mhe_model = model_config["mhe_model"]
self.mhe_power = model_config["mhe_power"]
def get_padding(self, shape):
'''
Calculates the required amounts of padding along each axis of the input and output, so that the Unet works and has the given shape as output shape
:param shape: Desired output shape
:return: Input_shape, output_shape, where each is a list [batch_size, time_steps, channels]
'''
if self.context:
# Check if desired shape is possible as output shape - go from output shape towards lowest-res feature map
rem = float(shape[1]) # Cut off batch size number and channel
# Output filter size
rem = rem - self.output_filter_size + 1
# Upsampling blocks
for i in range(self.num_layers):
rem = rem + self.merge_filter_size - 1
rem = (rem + 1.) / 2.# out = in + in - 1 <=> in = (out+1)/
# Round resulting feature map dimensions up to nearest integer
x = np.asarray(np.ceil(rem),dtype=np.int64)
assert(x >= 2)
# Compute input and output shapes based on lowest-res feature map
output_shape = x
input_shape = x
# Extra conv
input_shape = input_shape + self.filter_size - 1
# Go from centre feature map through up- and downsampling blocks
for i in range(self.num_layers):
output_shape = 2*output_shape - 1 #Upsampling
output_shape = output_shape - self.merge_filter_size + 1 # Conv
input_shape = 2*input_shape - 1 # Decimation
if i < self.num_layers - 1:
input_shape = input_shape + self.filter_size - 1 # Conv
else:
input_shape = input_shape + self.input_filter_size - 1
# Output filters
output_shape = output_shape - self.output_filter_size + 1
input_shape = np.concatenate([[shape[0]], [input_shape], [self.num_channels]])
output_shape = np.concatenate([[shape[0]], [output_shape], [self.num_channels]])
return input_shape, output_shape
else:
return [shape[0], shape[1], self.num_channels], [shape[0], shape[1], self.num_channels]
def get_output(self, input, training, return_spectrogram=False, reuse=True):
'''
Creates symbolic computation graph of the U-Net for a given input batch
NOTE: the *tf.layer.conv1d* implementation was changed to *tf.nn.conv1d* in order to declare weights explicitly
and use them for MHE regularization. Hence, the activation function has to be declared outside the convolution
:param input: Input batch of mixtures, 3D tensor [batch_size, num_samples, num_channels]
:param reuse: Whether to create new parameter variables or reuse existing ones (JPL: doesn't change output)
:return: U-Net output: List of source estimates. Each item is a 3D tensor [batch_size, num_out_samples, num_channels]
'''
with tf.variable_scope("separator", reuse=reuse):
enc_outputs = list()
current_layer = input
# Down-convolution: Repeat strided conv
for i in range(self.num_layers):
# Variable scope corresponding to each layer
with tf.variable_scope("down_conv_"+str(i)): #, reuse=reuse):
# 1. Define weights tensor for downsampling blocks
n_filt = self.num_initial_filters + (self.num_initial_filters * i) # number of filters in the current layer
num_in_channels = current_layer.get_shape().as_list()[-1] # get number of in channels from input data
shape = [self.filter_size, num_in_channels, n_filt] # should be [kernel_size, num_in_channels, num_filters]
W = tf.get_variable('W', shape=shape) #, initializer=tf.random_normal_initializer()) # get weights for a given layer
# 2. Add MHE (thompson constraint) to the collection if in use
if self.mhe:
add_thomson_constraint(W, n_filt, self.mhe_model, self.mhe_power)
# 3. Create layer using tf.nn.conv1d instead of tf.layer.conv1d: this involves applying activation outside the function
current_layer = tf.nn.conv1d(current_layer, W, stride=1, padding=self.padding) # out = in - filter + 1
current_layer = tf.nn.leaky_relu(current_layer) # Built-in Leaky ReLu with alpha=0.2 (default) as in Utils.LeakyReLu
enc_outputs.append(current_layer) # Append the resulting feature vector to the output
current_layer = current_layer[:,::2,:] # Decimate by factor of 2 # out = (in-1)/2 + 1
# Last layer of the downsampling path to obtain features
with tf.variable_scope("down_conv_"+str(self.num_layers)): #, reuse=reuse):
n_filt = self.num_initial_filters + (self.num_initial_filters * self.num_layers) # number of filters in last layer
num_in_channels = current_layer.get_shape().as_list()[-1] # get number of in channels from input data
shape = [self.filter_size, num_in_channels, n_filt]
W = tf.get_variable('W', shape=shape) #, initializer=tf.random_normal_initializer()) # get weights
# Add MHE (thompson constraint) to the collection if in use
if self.mhe:
add_thomson_constraint(W, n_filt, self.mhe_model, self.mhe_power)
# Convolution STRIDE=1 WASNT IN THE ORIGINAL U-NET. THE SAME FOR NEXT CONV
current_layer = tf.nn.conv1d(current_layer, W, stride=1, padding=self.padding) # One more conv here since we need to compute features after last decimation
current_layer = tf.nn.leaky_relu(current_layer) # Built-in Leaky ReLu with alpha=0.2 (default) as in Utils.LeakyReLu
# Feature map here shall be X along one dimension
# Upconvolution
for i in range(self.num_layers):
#UPSAMPLING
current_layer = tf.expand_dims(current_layer, axis=1)
if self.upsampling == 'learned':
# Learned interpolation between two neighbouring time positions by using a convolution filter of width 2, and inserting the responses in the middle of the two respective inputs
current_layer = Models.InterpolationLayer.learned_interpolation_layer(current_layer, self.padding, i)
else:
if self.context:
current_layer = tf.image.resize_bilinear(current_layer, [1, current_layer.get_shape().as_list()[2] * 2 - 1], align_corners=True)
else:
current_layer = tf.image.resize_bilinear(current_layer, [1, current_layer.get_shape().as_list()[2]*2]) # out = in + in - 1
current_layer = tf.squeeze(current_layer, axis=1)
# UPSAMPLING FINISHED
assert(enc_outputs[-i-1].get_shape().as_list()[1] == current_layer.get_shape().as_list()[1] or self.context) #No cropping should be necessary unless we are using context
current_layer = Utils.crop_and_concat(enc_outputs[-i-1], current_layer, match_feature_dim=False)
# Change implementation to tf.nn.conv1d to save weights and use them to calculate MHE
with tf.variable_scope("up_conv_"+str(i)): #, reuse=reuse):
n_filt = self.num_initial_filters + (self.num_initial_filters * (self.num_layers - i - 1))
num_in_channels = current_layer.get_shape().as_list()[-1] # get number of in channels from input data
shape = [self.merge_filter_size, num_in_channels, n_filt] # merge_filter_size --> size of the upsampling filters
W = tf.get_variable('W', shape=shape) #, initializer=tf.random_normal_initializer()) # get weights
# Add MHE (thompson constraint) to the collection when in use
if self.mhe:
add_thomson_constraint(W, n_filt, self.mhe_model, self.mhe_power)
# De-Convolution
current_layer = tf.nn.conv1d(current_layer, W, stride=1, padding=self.padding) # out = in - filter + 1
current_layer = tf.nn.leaky_relu(current_layer) # Built-in Leaky ReLu with alpha=0.2 (default) as in Utils.LeakyReLu
# Last concatenation
current_layer = Utils.crop_and_concat(input, current_layer, match_feature_dim=False)
# Output layer
# Determine output activation function
if self.output_activation == "tanh":
out_activation = tf.tanh
elif self.output_activation == "linear":
out_activation = lambda x: Utils.AudioClip(x, training)
else:
raise NotImplementedError
if self.output_type == "direct":
return Models.OutputLayer.independent_outputs(current_layer, self.source_names, self.num_channels, self.output_filter_size, self.padding, out_activation)
elif self.output_type == "difference":
cropped_input = Utils.crop(input,current_layer.get_shape().as_list(), match_feature_dim=False)
#return Models.OutputLayer.difference_output(cropped_input, current_layer, self.source_names, self.num_channels, self.output_filter_size, self.padding, out_activation, training, self.mhe, self.mhe_power, reuse) # This line if MHE for Output layer is in use
return Models.OutputLayer.difference_output(cropped_input, current_layer, self.source_names, self.num_channels, self.output_filter_size, self.padding, out_activation, training) # Use this line if MHE for Output layer is not implemented
else:
raise NotImplementedError
``` |
{
"source": "jperezlatimes/p2p-python",
"score": 2
} |
#### File: p2p-python/p2p/__init__.py
```python
from builtins import str
from builtins import range
from builtins import object
import os
import re
import json
import math
import logging
import requests
import warnings
from time import mktime
from copy import deepcopy
from datetime import datetime
from datetime import date
from p2p import utils
from p2p.cache import NoCache
from p2p.decorators import retry
from .adapters import TribAdapter
from .filters import get_custom_param_value
from wsgiref.handlers import format_date_time
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
str = str
unicode = str
bytes = bytes
basestring = (str,bytes)
else:
# 'unicode' exists, must be Python 2
str = str
unicode = unicode
bytes = str
basestring = basestring
from .errors import (
P2PException,
P2PFileError,
P2PSlugTaken,
P2PNotFound,
P2PForbidden,
P2PSearchError,
P2PTimeoutError,
P2PRetryableError,
P2PFileURLNotFound,
P2PInvalidFileType,
P2PEncodingMismatch,
P2PUnknownAttribute,
P2PPhotoUploadError,
P2PInvalidAccessDefinition,
P2PUniqueConstraintViolated
)
log = logging.getLogger('p2p')
def get_connection():
"""
Get a connected p2p object. This function is meant to auto-discover
the settings from your shell environment or from Django.
We'll read these from your shell variables::
export P2P_API_KEY=your_p2p_api_key
export P2P_API_URL=url_of_p2p_endpoint
# Optional
export P2P_API_DEBUG=plz # display an http log
export P2P_IMAGE_SERVICES_URL=url_of_image_services_endpoint
Or those same settings from your Django settings::
P2P_API_KEY = your_p2p_api_key
P2P_API_URL = url_of_p2p_endpoint
P2P_API_DEBUG = plz # display an http log
# Optional
P2P_IMAGE_SERVICES_URL = url_of_image_services_endpoint
If you need to pass in your config, just create a new p2p object.
"""
# Try getting settings from Django
try:
from django.conf import settings
return P2P(
url=settings.P2P_API_URL,
auth_token=settings.P2P_API_KEY,
debug=settings.DEBUG,
preserve_embedded_tags=getattr(
settings,
'P2P_PRESERVE_EMBEDDED_TAGS',
True
),
image_services_url=getattr(
settings,
'P2P_IMAGE_SERVICES_URL',
None
)
)
except ImportError:
# Try getting settings from environment variables
if 'P2P_API_KEY' in os.environ:
kwargs = dict(
auth_token=os.environ['P2P_API_KEY'],
debug=os.environ.get('P2P_API_DEBUG', False),
preserve_embedded_tags=os.environ.get(
'P2P_PRESERVE_EMBEDDED_TAGS',
True
),
image_services_url=os.environ.get(
'P2P_IMAGE_SERVICES_URL',
None
)
)
if os.environ.get('P2P_API_URL', None):
kwargs['url'] = os.environ['P2P_API_URL']
return P2P(**kwargs)
raise P2PException(
"No connection settings available. Please put settings "
"in your environment variables or your Django config"
)
class P2P(object):
"""
Get a connection to the P2P Content Services API::
p2p = P2P(my_p2p_url, my_auth_token)
You can send debug messages to stderr by using the keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True)
A P2P object can cache the API calls you make. Pass a new Cache_
object with the cache keyword::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DictionaryCache())
A DictionaryCache just caches in a python variable. If you're using
Django caching::
p2p = P2P(my_p2p_url, my_auth_token, debug=True
cache=DjangoCache())
"""
def __init__(
self,
auth_token,
url="http://content-api.p2p.tribuneinteractive.com",
debug=False,
cache=NoCache(),
image_services_url=None,
product_affiliate_code='lanews',
source_code='latimes',
webapp_name='tRibbit',
state_filter='working,live,pending,copyready',
preserve_embedded_tags=True
):
self.config = {
'P2P_API_ROOT': url,
'P2P_API_KEY': auth_token,
'IMAGE_SERVICES_URL': image_services_url,
}
self.cache = cache
self.debug = debug
self.product_affiliate_code = product_affiliate_code
self.source_code = source_code
self.webapp_name = webapp_name
self.state_filter = state_filter
self.preserve_embedded_tags = preserve_embedded_tags
self.default_filter = {
'product_affiliate': self.product_affiliate_code,
'state': self.state_filter
}
self.default_content_item_query = {
'include': [
'web_url',
'section',
'related_items',
'content_topics',
'embedded_items'
],
'filter': self.default_filter
}
self.content_item_defaults = {
"content_item_type_code": "blurb",
"product_affiliate_code": self.product_affiliate_code,
"source_code": self.source_code,
"content_item_state_code": "live",
}
self.collection_defaults = {
"productaffiliate_code": self.product_affiliate_code,
}
self.s = requests.Session()
self.s.mount('https://', TribAdapter())
def get_content_item(self, slug, query=None, force_update=False):
"""
Get a single content item by slug.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
Use the parameter `force_update=True` to update the cache for this
item and query.
"""
if not query:
query = self.default_content_item_query
ci = self.cache.get_content_item(slug=slug, query=query)
if ci is None:
j = self.get("/content_items/%s.json" % (slug), query)
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
elif force_update:
j = self.get("/content_items/%s.json" % (slug),
query, if_modified_since=ci['last_modified_time'])
if j:
ci = j['content_item']
self.cache.save_content_item(ci, query=query)
return ci
def get_multi_content_items(self, ids, query=None, force_update=False):
"""
Get a bunch of content items at once. We need to use the content items
ids to use this API call.
The API only allows 25 items to be requested at once, so this function
breaks the list of ids into groups of 25 and makes multiple API calls.
Takes an optional `query` parameter which is dictionary containing
parameters to pass along in the API call. See the P2P API docs
for details on parameters.
"""
ret = list()
ids_query = list()
if_modified_since = format_date_time(
mktime(datetime(2000, 1, 1).utctimetuple()))
if not query:
query = self.default_content_item_query
# Pull as many items out of cache as possible
ret = [
self.cache.get_content_item(
id=i, query=query) for i in ids
]
assert len(ids) == len(ret)
# Go through what we had in cache and see if we need to
# retrieve anything
for i in range(len(ret)):
if ret[i] is None:
ids_query.append({
"id": ids[i],
"if_modified_since": if_modified_since,
})
elif force_update:
ids_query.append({
"id": ids[i],
"if_modified_since": format_date_time(
mktime(ret[i]['last_modified_time'].utctimetuple())),
})
if len(ids_query) > 0:
# We can only request 25 things at a time
# so we're gonna break up the list into batches
max_items = 25
# we have to use <gasp>MATH</gasp>
num_items = len(ids_query)
# how many batches of max_items do we have?
num_batches = int(
math.ceil(float(num_items) / float(max_items)))
# make a list of indices where we should break the item list
index_breaks = [j * max_items for j in range(num_batches)]
# break up the items into batches of 25
batches = [ids_query[i:i + max_items] for i in index_breaks]
resp = list()
for items in batches:
multi_query = query.copy()
multi_query['content_items'] = items
resp += self.post_json(
'/content_items/multi.json', multi_query)
new_items = list()
remove_ids = list()
for i in range(len(ret)):
if ret[i] is None or force_update:
new_item = resp.pop(0)
assert ids[i] == new_item['id']
if new_item['status'] == 200:
ret[i] = new_item['body']['content_item']
new_items.append(new_item['body']['content_item'])
elif new_item['status'] == 404:
ret[i] = None
remove_ids.append(ids[i])
elif new_item['status'] == 304:
continue
else:
raise P2PException(
'%(status)s fetching %(id)s' % new_item)
if len(new_items) > 0:
for i in new_items:
self.cache.save_content_item(i, query=query)
try:
if len(remove_ids) > 0:
for i in remove_ids:
self.cache.remove_content_item(id=i)
except NotImplementedError:
pass
return ret
def update_content_item(self, payload, slug=None):
"""
Update a content item.
Takes a single dictionary representing the content_item to be updated.
Refer to the P2P API docs for the content item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
content = payload.copy()
# Check if content_item is nested or if this is a flat data structure
if 'content_item' in content:
content = content['content_item'].copy()
data = payload.copy()
else:
data = {'content_item': content }
# if a slug was given, remove it from the content item
if slug is None:
slug = content.pop('slug')
try:
content.pop("web_url")
except KeyError:
pass
# Now that we've manipulated the content item, update
# the payload as well
data['content_item'] = content
url = "/content_items/%s.json"
url = url % slug
if not self.preserve_embedded_tags:
url += "?preserve_embedded_tags=false"
resp = self.put_json(url, data)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return resp
def hide_right_rail(self, slug):
"""
Hide the right rail from an HTML story. Provide the slug
of the content item you'd like to update.
"""
params = {
'custom_param_data': {'htmlstory-rhs-column-ad-enable': 'false'},
}
return self.update_content_item(params, slug=slug)
def show_right_rail(self, slug):
"""
Show the right rail on an HTML story
"""
params = {
'custom_param_data': {'htmlstory-rhs-column-ad-enable': 'true'},
}
return self.update_content_item(params, slug=slug)
def show_to_robots(self, slug):
"""
Add metadata to the item so it is seen by robots and remove any
noindex and nofollow tags.
"""
params = {
'custom_param_data': {'metadata-robots': ''},
}
return self.update_content_item(params, slug=slug)
def hide_to_robots(self, slug):
"""
Add metadata to the item so it is hidden from robots using
the noindex and nofollow tags.
"""
params = {
'custom_param_data': {'metadata-robots': 'noindex, nofollow'},
}
return self.update_content_item(params, slug=slug)
def search_topics(self, name):
"""
Searches P2P for topics starting with the given name
"""
params = {
'name': name,
'name_contains': True,
}
return self.get("/topics.json", params)
def add_topic(self, topic_id, slug=None):
"""
Update a topic_id item.
Takes a single dictionary representing the topic_id_item to be updated.
Refer to the P2P API docs for the topic_id item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
if slug is None:
slug = topic_id.pop('slug')
d = {'add_topic_ids': topic_id}
self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
def remove_topic(self, topic_id, slug=None):
"""
Update a topic_id item.
Takes a single dictionary representing the topic_id_item to be updated.
Refer to the P2P API docs for the topic_id item field names.
By default this function uses the value of the 'slug' key from the
dictionary to perform the API call. It takes an optional `slug`
parameter in case the dictionary does not contain a 'slug' key or if
the dictionary contains a changed slug.
"""
if slug is None:
slug = topic_id.pop('slug')
d = {'remove_topic_ids': topic_id}
self.put_json("/content_items/%s.json" % slug, d)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
def create_content_item(self, payload):
"""
Create a new content item.
Takes a single dictionary representing the new content item.
Refer to the P2P API docs for the content item field names.
"""
defaults = self.content_item_defaults.copy()
content = payload.copy()
# Check if content_item is nested or if this is a flat data structure
if 'content_item' in content:
item = content['content_item'].copy()
defaults.update(item)
content['content_item'] = defaults
data = content
else:
content = payload.copy()
defaults.update(content)
data = {'content_item': defaults}
url = '/content_items.json'
if not self.preserve_embedded_tags:
url += "?preserve_embedded_tags=false"
resp = self.post_json(url, data)
return resp
def clone_content_item(self, slug, clone_slug, keep_embeds=False, keep_relateds=False):
"""
Clone a P2P content item into the current market
Takes a single dict representing the content item to be cloned.
Refer to the P2P API docs for the content item field name
Flags keep_embeds and keep_relateds determines whether the embedded
and/or related items will persist in the cloned object
"""
# Extra include vars
query = {
"include": [
"contributors",
"related_items",
"embedded_items",
"programmed_custom_params",
"web_url",
"geocodes"
],
}
# Get the full fancy content item
content_item = self.get_content_item(slug, query)
# Datetime string format
fmt = '%Y-%m-%d %I:%M %p %Z'
# Format display and publish time
display_time_string = ''
publish_time_string = ''
if content_item.get('display_time'):
display_time_string = content_item.get('display_time').strftime(fmt)
# Format the corrections timestamp
corrections_date = get_custom_param_value(content_item, 'corrections_date', default_value='')
if not isinstance(corrections_date, basestring):
corrections_date = corrections_date.strftime(fmt)
# The story payload
payload = {
'slug': clone_slug,
'title': content_item.get('title'),
'titleline': content_item.get('titleline'),
'kicker_id': content_item.get('kicker_id'),
'seotitle': content_item.get('seotitle'),
'byline': '',
'body': content_item.get('body'),
'dateline': content_item.get('dateline'),
'seodescription': content_item.get('seodescription'),
'seo_keyphrase': content_item.get('seo_keyphrase'),
'content_item_state_code': 'working',
'content_item_type_code': content_item.get('content_item_type_code'),
'display_time': display_time_string,
'product_affiliate_code': self.product_affiliate_code,
'source_code': content_item.get('source_code'),
'canonical_url': content_item.get("web_url"),
}
# Update the custom param data
payload['custom_param_data'] = {
'enable-content-commenting': get_custom_param_value(content_item, 'enable-content-commenting'),
'leadart-size': get_custom_param_value(content_item, 'lead_image_size'),
'story-summary': get_custom_param_value(content_item, 'seodescription', default_value=''),
'article-correction-text': get_custom_param_value(content_item, 'corrections_text', default_value=''),
'article-correction-timestamp': corrections_date,
'snap-user-ids': get_custom_param_value(content_item, 'snap_user_ids', default_value='')
}
# HTML Story specific custom params
if payload['content_item_type_code'] == 'htmlstory':
html_params = {
'htmlstory-rhs-column-ad-enable': get_custom_param_value(content_item, 'htmlstory-rhs-column-ad-enable'),
'htmlstory-headline-enable': get_custom_param_value(content_item, 'htmlstory-headline-enable'),
'htmlstory-byline-enable': get_custom_param_value(content_item, 'htmlstory-byline-enable'),
'disable-publication-date': get_custom_param_value(content_item, 'disable-publication-date')
}
payload['custom_param_data'].update(html_params)
# Get alt_thumbnail_url and old_slug for thumbnail logic below
alt_thumbnail_url = content_item.get('alt_thumbnail_url')
# Only try to update if alt_thumbnail_url is a thing
if content_item.get('alt_thumbnail_url', None):
# data must be nested in this odd photo_upload key
# if source code is available then it will be placed on the payload, else it will
# default to the current users product affiliate source code
payload['photo_upload'] = {
'alt_thumbnail': {
'url': content_item.get('alt_thumbnail_url'),
"source_code": content_item.get('alt_thumb_source_id', self.source_code)
}
}
if keep_embeds:
# Compile the embedded items
payload['embedded_items'] = []
for item in content_item.get('embedded_items'):
embed_item = {
'embeddedcontentitem_id': item['embeddedcontentitem_id'],
'headline': item['headline'],
'subheadline': item['subheadline'],
'brief': item['brief'],
}
payload['embedded_items'].append(embed_item)
if keep_relateds:
# Compile the related items
payload['related_items'] = []
for item in content_item.get('related_items'):
related_item = {
'relatedcontentitem_id': item['relatedcontentitem_id'],
'headline': item['headline'],
'subheadline': item['subheadline'],
'brief': item['brief'],
}
payload['related_items'].append(related_item)
contributors = self._get_cloned_contributors(content_item)
if contributors:
del payload['byline']
payload['contributors'] = contributors
# Clone the thing
clone = self.create_content_item(payload)
clone = clone.get('story', clone.get('html_story'))
# if we have successfully cloned the content item, continue on
if not clone.get('id'):
raise P2PNotFound
return clone['id']
def _get_cloned_contributors(self, content_item):
"""
Take a content item and remove the contributers
This function is supposed to look at the byline in a content item and
caclulate the contributers or free_form_contributers from them
"""
clone_contributors = []
# Split apart the byline string and iterate through it
if content_item.get('byline', None):
bylines = content_item.get('byline').split(',')
for byline in bylines:
# Preemptively create a freeform contributor
byline = byline.strip()
byline_item = {"free_form_name": byline}
# Search the contributors array for a matching adv byline
for contributor in content_item.get('contributors'):
# Wade through the nestedness
contributor = contributor['contributor']
if byline.lower() in contributor['title'].lower():
# If a match was found, update the entry with the staff slug
byline_item = {'slug': contributor['slug']}
# Add the final result to the clone_contributors array
clone_contributors.append(byline_item);
return clone_contributors
def delete_content_item(self, slug):
"""
Delete the content item out of p2p
"""
result = self.delete(
'/content_items/%s.json' % slug)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return True if "destroyed successfully" in result else False
def create_or_update_content_item(self, content_item):
"""
Attempts to update a content item, if it doesn't exist, attempts to
create it::
create, response = p2p.create_or_update_content_item(item_dict)
TODO: swap the tuple that is returned.
"""
create = False
try:
response = self.update_content_item(content_item)
except P2PException:
response = self.create_content_item(content_item)
create = True
return (create, response)
def junk_content_item(self, slug):
"""
Sets a content item to junk status.
"""
return self.update_content_item({
'slug': slug,
'content_item_state_code': 'junk'
})
def content_item_exists(self, slug):
"""
Checks for the existance of a slug in content services
"""
exists = True
try:
self.get("/content_items/%s/exists" % (slug))
except P2PNotFound:
exists = False
return exists
def get_kickers(self, params):
"""
Retrieves all kickers for an affiliate.
"""
return self.get("/kickers.json", params)
def search(self, params):
"""
Searches P2P content items based on whatever is in the mystery params dictionary.
"""
return self.get("/content_items/search.json", params)
def search_collections(self, search_token, limit=20, product_affiliate_code=None):
"""
Requests a list of collections from P2P based on search term and owner.
"""
# Make a copy of our collection defaults
params = deepcopy(self.collection_defaults)
# Stick this search in there
params['search_token'] = search_token
# Also add the results length cutoff
params['limit'] = limit
# And if the user has provided a product affiliate code, override that
if product_affiliate_code:
params['productaffiliate_code'] = product_affiliate_code
# Make the search and return the results
return self.get('/collections/search.json', params)['search_results']['collections']
def get_collection(self, code, query=None, force_update=False):
"""
Get the data for this collection. To get the items in a collection,
use get_collection_layout.
"""
if query is None:
query = {'filter': self.default_filter}
if force_update:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
else:
collection = self.cache.get_collection(code, query=query)
if collection is None:
data = self.get('/collections/%s.json' % code, query)
collection = data['collection']
self.cache.save_collection(collection, query=query)
return collection
def create_collection(self, data):
"""
Create a new collection. Takes a single argument which should be a
dictionary of collection data.
Example:
p2p.create_collection({
'code': 'my_new_collection',
'name': 'My new collection',
'section_path': '/news/local',
// OPTIONAL PARAMS
'collection_type_code': 'misc', # default 'misc'
'last_modified_time': date, # defaults to now
'product_affiliate_code': 'chinews' # default to instance setting
})
"""
ret = self.post_json(
'/collections.json?id=%s' % data['code'],
{
'collection': {
'code': data['code'],
'name': data['name'],
'collectiontype_id': data.get('collection_type_id', 1),
'last_modified_time': data.get('last_modified_time',
datetime.utcnow()),
'sequence': 999
},
'product_affiliate_code': data.get(
'product_affiliate_code', self.product_affiliate_code),
'section_path': data['section_path']
})
if 'collection' in ret:
return ret['collection']
else:
raise P2PException(ret)
def delete_collection(self, code):
"""
Delete a collection
"""
ret = self.delete(
'/collections/%s.json' % code)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def override_layout(self, code, content_item_slugs):
"""
Override Collection Layout
"""
ret = self.put_json(
'/collections/override_layout.json?id=%s' % code,
{
'items': content_item_slugs,
'replace_layout': 'true'
}
)
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def push_into_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
# Enforce that a list of slugs is passed in (not a string)
if not isinstance(content_item_slugs, list):
log.warning("[P2P][push_into_collection] content_item_slugs is not a list: %s" % content_item_slugs)
content_item_slugs = [content_item_slugs]
ret = self.put_json(
'/collections/prepend.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def suppress_in_collection(
self,
code,
content_item_slugs,
affiliates=[]
):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/suppress.json?id=%s' % code,
{'items': [{
'slug': slug, 'affiliates': affiliates
} for slug in content_item_slugs]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def remove_from_collection(self, code, content_item_slugs):
"""
Push a list of content item slugs onto the top of a collection
"""
# Enforce that a list of slugs is passed in (not a string)
if not isinstance(content_item_slugs, list):
log.warning("[P2P][remove_from_collection] content_item_slugs is not a list: %s" % content_item_slugs)
content_item_slugs = [content_item_slugs]
ret = self.put_json(
'/collections/remove_items.json?id=%s' % code,
{'items': content_item_slugs})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def insert_position_in_collection(
self,
code,
slug,
affiliates=[]
):
"""
Suppress a list of slugs in the specified collection
"""
if not affiliates:
affiliates.append(self.product_affiliate_code)
ret = self.put_json(
'/collections/insert.json?id=%s' % code,
{'items': [{
'slug': slug, 'position': 1
}]})
try:
self.cache.remove_collection(code)
self.cache.remove_collection_layout(code)
except NotImplementedError:
pass
return ret
def append_contributors_to_content_item(self, slug, contributors):
"""
Push a list of editorial staff slugs into a content item's
contributors array for the display of advanced bylines
{
"items": [
{
"slug": "contributor_to_append_1"
},
{
"slug": "contributor_to_append_2"
}
]
}
"""
warnings.warn('append_contributors_to_content_item will be removed in version 2.1', DeprecationWarning)
ret = self.put_json(
'/content_items/%s/append_contributors.json' % slug,
{'items': contributors})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_contributors_from_content_item(self, slug, contributors):
"""
Pops a list of editorial staff slugs from a content item's
contributors array
Takes an array of slugs similar to append_contributors_to_content_item()
"""
ret = self.put_json(
'/content_items/%s/remove_contributors.json' % slug,
{'items': contributors})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_content_item_revision_list(self, slug, page):
"""
Accepts a slug and returns a list of revision dictionaries
Page should be a dict with the key 'page' and the desired number
"""
ret = self.get('/content_items/%s/revisions.json?page=%d' % (slug, page))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_content_item_revision_number(self, slug, number, query=None, related_items_query=None):
"""
Accepts a slug and a revision number, returns dict with
full content item information for that revision
"""
if query is None:
query = self.default_content_item_query
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get(
'/content_items/%s/revisions/%d.json'
% (slug, number), query)
# Drop unnecessary outer layer
content_item = content_item['content_item']
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']
]
related_items = self.get_multi_content_items(
ids, related_items_query, False)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if (
item is not None and
item_stub['relatedcontentitem_id'] == item['id']
):
item_stub['content_item'] = item
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return content_item
def push_into_content_item(self, slug, content_item_slugs):
"""
Push a list of content item slugs onto the top of the related
items list for a content item
"""
ret = self.put_json(
'/content_items/prepend_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def push_embed_into_content_item(self, slug, content_item_slugs, size="S"):
"""
Push a list of content item slugs into embedded items list
Accepts a list of slugs and an optional size, which will be applied to
all embeds.
client.push_embed_into_content_item(['slug-1', 'slug-2', 'slug-3'])
client.push_embed_into_content_item(
['slug-1', 'slug-2', 'slug-3'],
size='L'
)
Also accepts a list of dictionaries that provide a slug and custom size
for each embed.
client.push_embed_into_content_item([
dict(slug='slug-1', size='S'),
dict(slug='slug-2', size='L'),
dict(slug='slug-3', size='L'),
])
"""
items = []
for i, ci in enumerate(content_item_slugs):
if isinstance(ci, str):
d = dict(slug=ci, contentitem_size=size, position=i)
items.append(d)
elif isinstance(ci, dict):
d = dict(
slug=ci['slug'],
contentitem_size=ci.get('size', size),
position=i
)
items.append(d)
else:
raise ValueError("content_item_slugs are bad data")
ret = self.put_json(
'/content_items/append_embedded_items.json?id=%s' % slug,
{'items': items}
)
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_from_content_item(self, slug, content_item_slugs):
"""
Removes related items from a content item, accepts slug of content item
and list of one or more related item slugs
"""
ret = self.put_json(
'/content_items/remove_related_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def remove_embed_from_content_item(self, slug, content_item_slugs):
"""
Removes embed items from a content item, accepts slug of content item
and list of one or more related item slugs
"""
ret = self.put_json(
'/content_items/remove_embedded_items.json?id=%s' % slug,
{'items': content_item_slugs})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def insert_into_content_item(self, slug, content_item_slugs, position=1):
"""
Insert a list of content item slugs into the related items list for
a content item, starting at the specified position
"""
ret = self.put_json(
'/content_items/insert_related_items.json?id=%s' % slug,
{'items': [{
'slug': content_item_slugs[i], 'position': position + i
} for i in range(len(content_item_slugs))]})
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def append_into_content_item(self, slug, content_item_slugs):
"""
Convenience function to append a list of content item slugs to the end
of the related items list for a content item
"""
ci = self.get_content_item(slug)
ret = self.insert_into_content_item(
slug, content_item_slugs, position=(len(ci['related_items']) + 1))
try:
self.cache.remove_content_item(slug)
except NotImplementedError:
pass
return ret
def get_collection_layout(self, code, query=None, force_update=False):
if not query:
query = {
'include': 'items',
'filter': self.default_filter
}
if force_update:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(collection_layout, query=query)
else:
collection_layout = self.cache.get_collection_layout(
code, query=query)
if collection_layout is None:
resp = self.get('/current_collections/%s.json' % code, query)
collection_layout = resp['collection_layout']
collection_layout['code'] = code # response is missing this
self.cache.save_collection_layout(
collection_layout, query=query)
return collection_layout
def get_fancy_collection(
self,
code,
with_collection=False,
limit_items=25,
content_item_query=None,
collection_query=None,
include_suppressed=False,
force_update=False
):
"""
Make a few API calls to fetch all possible data for a collection
and its content items. Returns a collection layout with
extra 'collection' key on the layout, and a 'content_item' key
on each layout item.
"""
collection_layout = self.get_collection_layout(
code, query=collection_query, force_update=force_update)
if with_collection:
# Do we want more detailed data about the collection?
collection = self.get_collection(
code, query=collection_query, force_update=force_update)
collection_layout['collection'] = collection
if limit_items:
# We're only going to fetch limit_items number of things
# so cut out the extra items in the content_layout
collection_layout['items'] = \
collection_layout['items'][:limit_items]
# Process the list of collection layout items to gather ids to fetch,
# and to remove suppressed items, if necessary.
content_item_ids = list()
remove_these = list()
for ci in collection_layout['items']:
if not include_suppressed and float(ci['suppressed']) > 0:
remove_these.append(ci)
else:
content_item_ids.append(ci['contentitem_id'])
# If we're not including suppressed items, remove them from the data
if not include_suppressed:
for ci in remove_these:
collection_layout['items'].remove(ci)
# Retrieve all the content_items, 25 at a time
content_items = self.get_multi_content_items(
content_item_ids, query=content_item_query,
force_update=force_update)
# Loop through the collection items and add the corresponding content
# item data.
for ci in collection_layout['items']:
for ci2 in content_items:
if ci['contentitem_id'] == ci2['id']:
ci['content_item'] = ci2
break
return collection_layout
def get_fancy_content_item(
self,
slug,
query=None,
related_items_query=None,
force_update=False
):
if query is None:
query = deepcopy(self.default_content_item_query)
query['include'].append('related_items')
if related_items_query is None:
related_items_query = self.default_content_item_query
content_item = self.get_content_item(
slug, query, force_update=force_update)
# We have our content item, now loop through the related
# items, build a list of content item ids, and retrieve them all
ids = [item_stub['relatedcontentitem_id']
for item_stub in content_item['related_items']]
related_items = self.get_multi_content_items(
ids, related_items_query, force_update=force_update)
# now that we've retrieved all the related items, embed them into
# the original content item dictionary to make it fancy
for item_stub in content_item['related_items']:
item_stub['content_item'] = None
for item in related_items:
if (
item is not None and
item_stub['relatedcontentitem_id'] == item['id']
):
item_stub['content_item'] = item
return content_item
def get_section(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'include': 'default_section_path_collections'
}
if force_update:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
else:
section = self.cache.get_section(path, query)
if section is None:
data = self.get('/sections/show_collections.json', query)
section = data
self.cache.save_section(path, section, query)
return section
def get_section_configs(self, path, query=None, force_update=False):
if query is None:
query = {
'section_path': path,
'product_affiliate_code': self.product_affiliate_code,
'webapp_name': self.webapp_name
}
if force_update:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
else:
section = self.cache.get_section_configs(path, query)
if section is None:
data = self.get('/sections/show_configs.json', query)
section = data
self.cache.save_section_configs(path, section, query)
return section
def get_fancy_section(self, path, force_update=False):
section = self.get_section(path, force_update)
config = self.get_section_configs(path, force_update)
collections = list()
for c in section['results']['default_section_path_collections']:
collections.append({
'collection_type_code': c['collection_type_code'],
'name': c['name'],
'collection': self.get_fancy_collection(c['code'])
})
fancy_section = config['results']['section_config']
fancy_section['collections'] = collections
fancy_section['path'] = path
return fancy_section
def get_nav(self, collection_code, domain=None):
"""
get a simple dictionary of text and links for a navigation collection
"""
nav = list()
domain = domain.replace(
'http://', '').replace('https://', '').replace('/', '')
top_level = self.get_collection_layout(collection_code)
for item in top_level['items']:
fancy_item = self.get_fancy_content_item(item['slug'])
if 'url' not in fancy_item:
raise
sub_nav = list()
for sub_item in fancy_item['related_items']:
if 'url' in sub_item['content_item']:
url = sub_item['content_item']['url']
elif 'web_url' in sub_item['content_item']:
url = sub_item['content_item']['web_url']
else:
raise
if not url.startswith('http'):
url = 'http://' + domain + url
sub_nav.append({
'text': sub_item['headline'] or
sub_item['content_item']['title'],
'url': url,
'slug': sub_item['slug']
})
if fancy_item['url'].startswith('http'):
url = fancy_item['url']
path = url[url.find('/') + 1:url.rfind('/')]
else:
url = 'http://' + domain + fancy_item['url']
path = url[url.find('/', 7) + 1:url.rfind('/')]
nav.append({
'text': fancy_item['title'],
'url': url,
'slug': fancy_item['slug'],
'nav': sub_nav,
'path': path
})
return nav
def get_source_product_affiliates(self, min_date='', max_date='', page=1):
"""
Retrieves one or more product affiliate sources that have
been modified within a designated date range.
Why a date range? Who knows.
Dates must be of the format: YYYY-MM-DDTHH:MM:SSZ
"""
# Default max_date to today if non given
if not max_date:
max_date = date.today().strftime("%Y-%m-%dT%I:%M:%S%Z")
# Default min_date to the beginning of the epoch (1970)
if not min_date:
epoch = datetime.utcfromtimestamp(0)
min_date = epoch.strftime("%Y-%m-%dT%I:%M:%S%Z")
params = {
'page': page,
'minimum_date': min_date,
'maximum_date': max_date
}
return self.get("/source_product_affiliates/multi.json", params)
def get_product_affiliates(self, name='', code=''):
"""
Retrieves one or more affiliate source codes.
The Content Services endpoint takes either 'code' or 'name'
as arguments but not both.
"""
if name and name != 'all':
# If a name is specified, use it
params = {
'name': str(name)
}
elif name and name == 'all':
# Special case. If name is "all" get everything
params = {
'name': ''
}
elif code:
# If there is a code specified, use it instead of name
params = {
'code': str(code)
}
elif not name and not code:
# If the args are empty, get the defualt product affiliate info
params = {
'code': self.product_affiliate_code
}
return self.get("/product_affiliates/multi.json", params)
# Utilities
def http_headers(self, content_type=None, if_modified_since=None):
h = {'Authorization': 'Bearer %(P2P_API_KEY)s' % self.config}
if content_type is not None:
h['content-type'] = content_type
if type(if_modified_since) == datetime:
h['If-Modified-Since'] = format_date_time(
mktime(if_modified_since.utctimetuple()))
elif if_modified_since is not None:
h['If-Modified-Since'] = if_modified_since
return h
def _check_for_errors(self, resp, req_url):
"""
Parses the P2P response, scanning and raising for exceptions. When an
exception is raised, its message will contain the response url, a curl
string of the request and a dictionary of response data.
"""
curl = ''
request_log = {
'REQ_URL': req_url,
'REQ_HEADERS': self.http_headers(),
'RESP_URL': resp.url,
'STATUS': resp.status_code,
'RESP_BODY': resp.content,
'RESP_HEADERS': resp.headers,
# The time taken between sending the first byte of
# the request and finishing parsing the response headers
'SECONDS_ELAPSED': resp.elapsed.total_seconds()
}
if self.debug:
curl = utils.request_to_curl(resp.request)
log.debug("[P2P][RESPONSE] %s" % request_log)
resp_content = self.convert_response_bytes_to_string(resp)
if resp.status_code >= 500:
try:
if u'ORA-00001: unique constraint' in resp_content:
raise P2PUniqueConstraintViolated(resp.url, request_log, \
curl)
elif u'incompatible encoding regexp match' in resp_content:
raise P2PEncodingMismatch(resp.url, request_log, curl)
elif u'unknown attribute' in resp_content:
raise P2PUnknownAttribute(resp.url, request_log, curl)
elif u"Invalid access definition" in resp_content:
raise P2PInvalidAccessDefinition(resp.url, request_log, \
curl)
elif u"solr.tila.trb" in resp_content:
raise P2PSearchError(resp.url, request_log, curl)
elif u"Request Timeout" in resp_content:
raise P2PTimeoutError(resp.url, request_log, curl)
elif u'Duplicate entry' in resp_content:
raise P2PUniqueConstraintViolated(resp.url, request_log, \
curl)
elif (u'Failed to upload image to the photo service'
in resp_content):
raise P2PPhotoUploadError(resp.url, request_log, curl)
elif u"This file type is not supported" in resp_content:
raise P2PInvalidFileType(resp.url, request_log, curl)
elif re.search(r"The URL (.*) does not exist", resp_content):
raise P2PFileURLNotFound(resp.url, request_log)
data = resp.json()
except (ValueError, TypeError):
pass
raise P2PException(resp.url, request_log, curl)
elif resp.status_code == 404:
raise P2PNotFound(resp.url, request_log, curl)
elif resp.status_code >= 400:
if u'{"slug":["has already been taken"]}' in resp_content:
raise P2PSlugTaken(resp.url, request_log, curl)
elif u'{"code":["has already been taken"]}' in resp_content:
raise P2PSlugTaken(resp.url, request_log, curl)
elif resp.status_code == 403:
raise P2PForbidden(resp.url, request_log, curl)
try:
resp.json()
except ValueError:
pass
raise P2PException(resp_content, request_log, curl)
return request_log
def convert_response_bytes_to_string(self, response):
vartype = str(type(response.content))
if vartype == "<class 'bytes'>":
# Convert to str
return response.content.decode("utf-8")
elif vartype == "<class 'str'>":
# It's already a str, just return it
return response.content
# It is not a string type, return empty
return ''
@retry(P2PRetryableError)
def get(self, url, query=None, if_modified_since=None):
if query is not None:
url += '?' + utils.dict_to_qs(query)
resp = self.s.get(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(if_modified_since=if_modified_since),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][GET] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][GET] %s" % url)
resp_log = self._check_for_errors(resp, url)
# The API returns "Content item exists" when the /exists endpoint is called
# causing everything to go bonkers, Why do you do this!!!
resp_content = self.convert_response_bytes_to_string(resp)
if resp_content == "Content item exists":
return resp_content
try:
ret = utils.parse_response(resp.json())
if 'ETag' in resp.headers:
ret['etag'] = resp.headers['ETag']
if 'X-Total-Hits' in resp.headers:
ret['total-hits'] = resp.headers['X-Total-Hits']
return ret
except ValueError:
log.error('[P2P][GET] JSON VALUE ERROR ON SUCCESSFUL RESPONSE %s' % resp_log)
raise
@retry(P2PRetryableError)
def delete(self, url):
resp = self.s.delete(
self.config['P2P_API_ROOT'] + url,
headers=self.http_headers(),
verify=True)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][DELETE] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][DELETE] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
self._check_for_errors(resp, url)
return utils.parse_response(resp_content)
@retry(P2PRetryableError)
def post_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.post(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][POST] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][POST] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
resp_log = self._check_for_errors(resp, url)
if resp_content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('[P2P][POST] EXCEPTION IN JSON PARSE: %s' % resp_log)
raise
@retry(P2PRetryableError)
def put_json(self, url, data):
payload = json.dumps(utils.parse_request(data))
resp = self.s.put(
self.config['P2P_API_ROOT'] + url,
data=payload,
headers=self.http_headers('application/json'),
verify=True
)
# Log the request curl if debug is on
if self.debug:
log.debug("[P2P][PUT] %s" % utils.request_to_curl(resp.request))
# If debug is off, store a light weight log
else:
log.debug("[P2P][PUT] %s" % url)
resp_content = self.convert_response_bytes_to_string(resp)
resp_log = self._check_for_errors(resp, url)
if resp_content == "" and resp.status_code < 400:
return {}
else:
try:
return utils.parse_response(resp.json())
except Exception:
log.error('[P2P][POST] EXCEPTION IN JSON PARSE: %s' % resp_log)
raise
``` |
{
"source": "jperezsan/meraki-extended-automation-webapp",
"score": 2
} |
#### File: app/auth/__init__.py
```python
from flask import Blueprint, current_app
from ..models import Permission
auth = Blueprint('auth', __name__)
from . import views
from ..models import User
@auth.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
```
#### File: meraki-extended-automation-webapp/app/helpers.py
```python
from app import organization_data
def get_networks_dict(networks):
networks_dict = {}
for network in networks:
networks_dict[network.get("id")] = network
return networks_dict
def get_devices_dict(devices):
devices_dict = {}
for device in devices:
devices_dict[device.get("serial")] = device
return devices_dict
# Determine the loss average of each uplink
def get_loss_average(uplink):
_sum = 0
count = 0
for item in uplink["timeSeries"]:
loss_percent = item["lossPercent"]
if loss_percent is not None:
_sum = _sum + loss_percent
count += 1
if count > 0:
return _sum / count
return 0
# Determine the latency average of each uplink
def get_latency_average(uplink):
_sum = 0
count = 0
for item in uplink["timeSeries"]:
latency = item["latencyMs"]
if latency is not None:
_sum = _sum + latency
count += 1
if count > 0:
return int(_sum / count)
return 0
# Organization MX devices with uplinks and uplink status
def get_mx_devices_with_uplinks(organization_id, devices, networks_dict):
uplinks = organization_data.dashboard.organizations.getOrganizationDevicesUplinksLossAndLatency(organization_id)
devices_dict = get_devices_dict(devices)
# MX devices with uplinks information
filtered_devices = {}
for uplink in uplinks:
if uplink["serial"] in devices_dict:
if uplink["serial"] in filtered_devices:
filtered_devices[uplink["serial"]]["uplinks"].append(
{
"uplink": uplink["uplink"],
"loss_average": get_loss_average(uplink),
"latency_average": get_latency_average(uplink),
"ip": uplink["ip"]
})
else:
device = devices_dict[uplink["serial"]]
device["uplinks"] = []
device["uplinks"].append({
"uplink": uplink["uplink"],
"loss_average": get_loss_average(uplink),
"latency_average": get_latency_average(uplink),
"ip": uplink["ip"]
})
if device["networkId"] in networks_dict:
device["networkName"] = networks_dict[device["networkId"]]
filtered_devices[device["serial"]] = device
return filtered_devices
def get_hubs(org_id):
hubs = []
appliance_vpn_statuses = organization_data.dashboard.appliance.getOrganizationApplianceVpnStatuses(
org_id, total_pages="all")
try:
for item in appliance_vpn_statuses:
if str(item['vpnMode']) == "hub":
temp = {"hubName": item["networkName"],
"hubId": item["networkId"], "hubSerial": item["deviceSerial"]}
hubs.append(temp)
return hubs
except:
print("Error!")
print("Site-to-site VPN needs to be enabled for this organization")
print("Map Monitoring and DC Switchover modules will not work")
return None
# Assign color to network depending on latency, loss and active wans
def get_colored_networks(clean_networks, loss_tolerance, latency_tolerance, only_critical=False):
if only_critical:
critical_networks = clean_networks.copy()
for key, network in clean_networks.items():
uplinks = network["uplinks"]
network["color"] = "Green"
dead_uplinks = 0
packet_loss_uplinks = 0
high_latency_uplinks = 0
for uplink in uplinks:
# Restrict ip testing to 8.8.8.8 (Google) and Cisco Umbrella dns
# TODO: Add testing IPs in the configuration
if '8.8.8.8' not in uplink["ip"] and '208.67.222.222' not in uplink["ip"] and '208.67.220.220' not in uplink["ip"]:
continue
loss_value = uplink["loss_average"]
if loss_value is not None:
if loss_value < loss_tolerance:
uplink["loss_status"] = "Good"
uplink["color"] = "Green"
elif loss_value == 100:
uplink["loss_status"] = "Dead"
uplink["color"] = "Red"
dead_uplinks += 1
continue
else:
uplink["loss_status"] = "Bad"
uplink["color"] = "Orange"
packet_loss_uplinks += 1
continue
latency_value = uplink["latency_average"]
if latency_value is not None:
if latency_value > latency_tolerance:
uplink["latency_status"] = "Bad"
uplink["color"] = "Yellow"
high_latency_uplinks += 1
else:
uplink["latency_status"] = "Good"
uplink["color"] = "Green"
if dead_uplinks == len(uplinks):
network["color"] = "Red"
continue
elif dead_uplinks > 0:
network["color"] = "Blue"
continue
if packet_loss_uplinks == 0 and high_latency_uplinks == 0:
if only_critical:
del critical_networks[key]
continue
if packet_loss_uplinks > high_latency_uplinks:
network["color"] = "Orange"
elif high_latency_uplinks > 0:
network["color"] = "Yellow"
if only_critical:
return critical_networks
return clean_networks
```
#### File: app/sdwan/views.py
```python
from flask.helpers import flash
from . import sdwan
from flask import render_template, Response, request
from flask_login import login_required
import json
from .. import organization_data
from .. import helpers
from ..models import Permission
from ..decorators import permission_required
import os
@sdwan.route('/', methods=['GET'])
@login_required
@permission_required(Permission.MAP_MONITORING)
def index():
return render_template('sdwan/index.html', title='SDWAN module')
@sdwan.route('/mapMonitoring', methods=['GET'])
@login_required
@permission_required(Permission.MAP_MONITORING)
def map_monitoring():
faulty_filter = request.args.get('filter')
if faulty_filter is not None:
if "True" in faulty_filter:
flash("Showing only faulty networks")
organization_data.colored_networks = helpers.get_colored_networks(organization_data.mx_devices_with_uplinks,
organization_data.loss_tolerance,
organization_data.latency_tolerance, True)
else:
organization_data.colored_networks = helpers.get_colored_networks(organization_data.mx_devices_with_uplinks,
organization_data.loss_tolerance,
organization_data.latency_tolerance, False)
return render_template('sdwan/mapMonitoring.html', title='SDWAN - Map Monitoring', here_maps_api_key = os.getenv('HERE_MAPS_API_KEY'))
@sdwan.route('/mx-devices', methods=['GET'])
@login_required
@permission_required(Permission.MAP_MONITORING)
def get_mx_devices():
return Response(json.dumps(organization_data.colored_networks), mimetype='application/json')
``` |
{
"source": "jperezvisaires/tfg-intphys",
"score": 3
} |
#### File: modules/old/model_convlstm.py
```python
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import ConvLSTM2D, Conv2D
from tensorflow.keras.layers import MaxPooling2D, UpSampling2D, Concatenate
from tensorflow.keras.layers import BatchNormalization, Activation, Dropout, TimeDistributed
# Conv layer.
def conv_layer(x, filters, kernel_size=5, activation="relu", batch_norm=True):
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=1,
padding="same",
activation=activation)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
return x
# ConvLSTM layer.
def convlstm_layer(x, filters, kernel_size=5, strides=1, activation="tanh", return_sequences=True, batch_norm=True):
x = ConvLSTM2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
activation=activation,
dropout=0.1,
recurrent_dropout=0.15,
go_backwards=False,
return_sequences=return_sequences)(x)
if batch_norm:
x = BatchNormalization()(x)
return x
# ConvLSTM prediction model.
def convlstm_model(input_size,
scale,
input_frames,
final_filter,
final_activation,
dropout,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=32, kernel_size=5)
pool1 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm1)
convlstm2 = convlstm_layer(x=pool1, filters=32, kernel_size=5)
pool2 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm2)
convlstm3 = convlstm_layer(x=pool2, filters=64, kernel_size=5)
pool3 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm3)
convlstm4 = convlstm_layer(x=pool3, filters=64, kernel_size=5)
pool4 = MaxPooling3D(pool_size=(1,2,2), padding="same")(convlstm4)
convlstm5 = convlstm_layer(x=pool4, filters=128, kernel_size=5)
up5 = UpSampling3D(size=(1,2,2))(convlstm5)
convlstm6 = convlstm_layer(x=up5, filters=64, kernel_size=5)
up6 = UpSampling3D(size=(1,2,2))(convlstm6)
convlstm7 = convlstm_layer(x=up6, filters=64, kernel_size=5)
up7 = UpSampling3D(size=(1,2,2))(convlstm7)
convlstm8 = convlstm_layer(x=up7, filters=32, kernel_size=5)
up8 = UpSampling3D(size=(1,2,2))(convlstm8)
convlstm9 = convlstm_layer(x=up8, filters=32, kernel_size=5, return_sequences=False)
conv10 = conv_layer(x=convlstm9, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv10
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
def convlstm_model_skip(input_size,
scale,
input_frames,
final_filter,
final_activation,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=32, kernel_size=7)
pool1 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm1)
convlstm2 = convlstm_layer(x=pool1, filters=32, kernel_size=7)
pool2 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm2)
convlstm3 = convlstm_layer(x=pool2, filters=64, kernel_size=5)
pool3 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm3)
convlstm4 = convlstm_layer(x=pool3, filters=64, kernel_size=5)
pool4 = TimeDistributed(MaxPooling2D(pool_size=2, padding="same"))(convlstm4)
convlstm5_1 = convlstm_layer(x=pool4, filters=128, kernel_size=3)
convlstm5_2 = convlstm_layer(x=convlstm5_1, filters=128, kernel_size=3)
up5 = TimeDistributed(UpSampling2D(size=2))(convlstm5_2)
convlstm6 = convlstm_layer(x=up5, filters=64, kernel_size=5)
concat6 = Concatenate(axis=-1)([convlstm4, convlstm6])
up6 = TimeDistributed(UpSampling2D(size=2))(concat6)
convlstm7 = convlstm_layer(x=up6, filters=64, kernel_size=5)
concat7 = Concatenate(axis=-1)([convlstm3, convlstm7])
up7 = TimeDistributed(UpSampling2D(size=2))(concat7)
convlstm8 = convlstm_layer(x=up7, filters=32, kernel_size=7)
concat8 = Concatenate(axis=-1)([convlstm2, convlstm8])
up8 = TimeDistributed(UpSampling2D(size=2))(concat8)
convlstm9_1 = convlstm_layer(x=up8, filters=32, kernel_size=7)
concat9 = Concatenate(axis=-1)([convlstm1, convlstm9_1])
convlstm9_2 = convlstm_layer(x=concat9, filters=32, kernel_size=7, return_sequences=False)
conv10 = conv_layer(x=convlstm9_2, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv10
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
def convlstm_model_simple(input_size,
scale,
input_frames,
final_filter,
final_activation,
batch_norm):
scaled_input = (input_frames, int(input_size[0] * scale), int(input_size[1] * scale), input_size[2])
convlstm_input = Input(shape=(scaled_input))
convlstm1 = convlstm_layer(x=convlstm_input, filters=64, kernel_size=5)
convlstm2 = convlstm_layer(x=convlstm1, filters=64, kernel_size=5)
convlstm3 = convlstm_layer(x=convlstm2, filters=64, kernel_size=5)
convlstm4 = convlstm_layer(x=convlstm3, filters=64, kernel_size=5)
convlstm5 = convlstm_layer(x=convlstm4, filters=64, kernel_size=5, return_sequences=False)
conv6 = conv_layer(x=convlstm5, filters=final_filter, kernel_size=1, activation=final_activation)
convlstm_output = conv6
model = Model(inputs=convlstm_input, outputs=convlstm_output)
return model
# Get ConvLSTM model.
def get_convlstm_skip():
params = {'input_size': (288, 288, 1),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "sigmoid",
'batch_norm': True}
model = convlstm_model_skip(**params)
return model
def get_convlstm_simple():
params = {'input_size': (288, 288, 1),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "sigmoid",
'batch_norm': True}
model = convlstm_model_simple(**params)
return model
def get_convlstm():
params = {'input_size': (288, 288, 2),
'scale': 0.5,
'input_frames': 4,
'final_filter': 1,
'final_activation': "tanh",
'dropout': 0.0,
'batch_norm': True}
model = convlstm_model(**params)
return model
```
#### File: tfg-intphys/modules/prediction_losses.py
```python
import tensorflow as tf
import numpy as np
# Dice Loss + Cross Entropy Loss.
def entropy_dice():
def dice_loss(y_true, y_pred):
numerator = 2 * tf.math.reduce_sum(y_true * y_pred, axis=(1,2,3))
denominator = tf.math.reduce_sum(y_true + y_pred, axis=(1,2,3))
return tf.reshape(1 - numerator / denominator, (-1,1,1))
def entropy_dice_loss(y_true, y_pred):
return tf.keras.losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return entropy_dice_loss
# Dice Loss (F1 Score).
def dice():
def dice_loss(y_true, y_pred):
numerator = 2 * tf.math.reduce_sum(y_true * y_pred, axis=(1,2,3))
denominator = tf.math.reduce_sum(y_true + y_pred, axis=(1,2,3))
return 1 - numerator / denominator
def dice_coefficient(y_true, y_pred):
return 1 - dice_loss(y_true, y_pred)
return dice_loss, dice_coefficient
# Jaccard Loss (IoU).
def jaccard():
def dice_loss(y_true, y_pred):
numerator = 2 * tf.math.reduce_sum(y_true * y_pred, axis=(1,2,3))
denominator = tf.math.reduce_sum(y_true + y_pred, axis=(1,2,3))
return 1 - numerator / denominator
def dice_coefficient(y_true, y_pred):
return 1 - dice_loss(y_true, y_pred)
def jaccard_loss(y_true, y_pred):
return 1 - (dice_coefficient(y_true, y_pred)/(2 - dice_coefficient(y_true, y_pred)))
def jaccard_coefficient(y_true, y_pred):
return 1 - jaccard_loss(y_true, y_pred)
return jaccard_loss, jaccard_coefficient
# Intensity Loss (L1 and L2 combined implementation).
def intensity(l_num=1):
"""
Computes L1 and L2 losses (MAE and MSE)
Adapted from https://github.com/dyelax/Adversarial_Video_Generation/blob/master/Code/loss_functions.py
"""
def intensity_loss(y_true, y_pred):
return tf.math.reduce_mean(tf.math.abs(y_true - y_pred)**l_num)
return intensity_loss
# Gradient Difference Loss.
def gradient_difference(alpha=1):
"""
Computes the Gradient Difference Loss (GDL)
Adapted from https://github.com/dyelax/Adversarial_Video_Generation/blob/master/Code/loss_functions.py
"""
def gradient_loss(y_true, y_pred):
pred_dx, pred_dy = tf.image.image_gradients(y_pred)
true_dx, true_dy = tf.image.image_gradients(y_true)
grad_diff_x = tf.math.abs(true_dx - pred_dx)
grad_diff_y = tf.math.abs(true_dy - pred_dy)
return tf.math.reduce_mean(grad_diff_x ** alpha + grad_diff_y ** alpha)
return gradient_loss
# Loss combination of Intensity Loss and Gradient Difference Loss
def intensity_gradient(l_num=1, alpha=1):
intensity_loss = intensity(l_num)
gradient_loss = gradient_difference(alpha)
def intensity_gradient_loss(y_true, y_pred):
return (intensity_loss(y_true, y_pred) + gradient_loss(y_true, y_pred))
return intensity_gradient_loss
```
#### File: tfg-intphys/modules/segmentation_partition.py
```python
import numpy as np
import h5py
base_path = "train"
def partition_image2seg(num_scenes, path_hdf5, num_initial=1, test=False, base_path=base_path):
frames = 100
list_samples = []
list_targets_mask = []
for scene in range(num_initial, num_initial+num_scenes):
scene_path = "{}/{:05d}".format(base_path, scene)
with h5py.File(path_hdf5, "r") as f:
if scene_path in f:
for i in range(frames):
path_image = scene_path + "/scene/scene_{:03d}".format(i+1)
list_samples.append(path_image)
path_mask = scene_path + "/masks/masks_{:03d}".format(i+1)
list_targets_mask.append(path_mask)
partition = partition_dictionary(list_samples, num_scenes, test)
targets = targets_dictionary(list_samples, list_targets_mask)
return partition, targets
def partition_dictionary(list_samples, num_scenes, test):
frames = 100
if test:
train_scenes = int(num_scenes * 0.8)
train_frames = train_scenes * frames
vali_scenes = int(num_scenes * 0.1)
vali_frames = vali_scenes * frames
list_samples_train = list_samples[:train_frames]
list_samples_vali = list_samples[train_frames:(train_frames + vali_frames)]
list_samples_test = list_samples[(train_frames + vali_frames):]
partition = {"train": list_samples_train,
"validation": list_samples_vali,
"test": list_samples_test}
else:
train_scenes = int(num_scenes * 0.9)
train_frames = train_scenes * frames
list_samples_train = list_samples[:train_frames]
list_samples_vali = list_samples[train_frames:]
partition = {"train": list_samples_train,
"validation": list_samples_vali}
return partition
def targets_dictionary(list_samples, list_targets):
targets = {}
for i in range(len(list_samples)):
targets[list_samples[i]] = list_targets[i]
return targets
```
#### File: tfg-intphys/modules/train_unet_pred.py
```python
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Avoid most Tensorflow warning errors.
import sys
# Maths and utilites.
import numpy as np
import h5py
# Keras and Tensorflow.
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
# Import my modules.
sys.path.append("./modules")
from prediction_unet import get_unet_prediction
from prediction_partition import partition_seg2seg, partition_image2seg, partition_image2image, partition_depth2seg
from prediction_generators import unet_seg2seg_generator, unet_image2seg_generator, unet_image2image_generator, unet_depth2seg_generator
from prediction_losses import intensity, entropy_dice, dice, jaccard
def train_unet_image2seg_short(loss_name,
block_number,
load_loss_name="entropy_dice",
load_block_number=1,
num_scenes=1000,
epochs=3,
learning_rate=0,
model_loading=False,
model_summary=False,
check_partition=False):
# Check if GPU is being used.
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
print('GPU device not found')
USE_GPU = False
else:
print('Found GPU at: {}'.format(device_name))
# !nvidia-smi # GPU details.
USE_GPU = True
# Filename.
filename_unet = "unet-pred-image2seg-short-{}-{:02}".format(loss_name, block_number)
print("Filename: " + filename_unet)
# Create Unet model.
input_frames = 4
num_channels = 3
final_filters = 1
space_frames = 2
prediction_frame = 5
unet_model = get_unet_prediction(input_frames, num_channels, final_filters)
if model_summary:
unet_model.summary()
# Define losses and metrics
entropy_dice_loss = entropy_dice()
dice_loss, dice_coeff = dice()
jaccard_loss, jaccard_coeff = jaccard()
L1_loss = intensity(l_num=1)
L2_loss = intensity(l_num=2)
# Compile model
if loss_name == "L1":
print("Selected Loss: " + loss_name)
loss = L1_loss
metrics_list = [dice_coeff, jaccard_coeff]
elif loss_name == "L2":
print("Selected Loss: " + loss_name)
loss = L2_loss
metrics_list = [dice_coeff, jaccard_coeff]
elif loss_name == "entropy_dice":
print("Selected Loss: " + loss_name)
loss = entropy_dice_loss
metrics_list = [dice_coeff, jaccard_coeff, "mse"]
else:
print("Select a valid loss.")
if learning_rate:
print("Learning Rate: " + str(learning_rate))
optimizer = Adam(learning_rate=learning_rate)
else:
print("Learning Rate: " + str(1e-3))
optimizer = Adam()
unet_model.compile(loss=loss, optimizer=optimizer)
# Create partition dictionaries
path_hdf5 = "/content/temp_dataset/dataset-intphys-{:02}000.hdf5".format(block_number)
partition, targets = partition_image2seg(num_initial=(block_number - 1) * 1000 + 1,
num_scenes=num_scenes,
input_frames=input_frames,
space_frames=space_frames,
prediction_frame=prediction_frame,
path_hdf5=path_hdf5)
# Parameters.
params = {'dim': (288, 288),
'path_hdf5': path_hdf5,
'scale' : 0.5,
'batch_size': 32,
'input_frames': input_frames,
'num_channels': num_channels,
'shuffle': True}
# Generators.
training_generator = unet_image2seg_generator(partition["train"], targets, **params)
validation_generator = unet_image2seg_generator(partition["validation"], targets, **params)
# Check partition integrity.
if check_partition:
print(partition)
print(targets)
# Select model for training.
model = unet_model
filename = filename_unet
model_clean_weights = model.get_weights()
# Configure Keras callbacks for training.
model_checkpoint = ModelCheckpoint(filepath="./models/{}.h5".format(filename),
save_best_only=False,
verbose=1)
csv_log = CSVLogger(filename="./logs/{}.csv".format(filename),
separator=";")
cb_list = [model_checkpoint, csv_log]
# Load precious model.
if model_loading:
model = load_model("./models/unet-pred-image2seg-short-{}-{:02}.h5".format(load_loss_name, load_block_number), compile=False)
print("Loaded model: " + "unet-pred-image2seg-short-{}-{:02}.h5".format(load_loss_name, load_block_number))
saved_weights = model.get_weights()
model.compile(loss=loss, optimizer=optimizer)
model.set_weights(saved_weights)
# Clean weights before training
if not model_loading:
model.set_weights(model_clean_weights)
# Generator training.
train_history = model.fit(x=training_generator,
validation_data=validation_generator,
callbacks=cb_list,
epochs=epochs)
return model
def train_unet_seg2seg_short(loss_name, block_number, load_loss_name="mse", load_block_number=1, num_scenes=1000, epochs=4, learning_rate=0, model_loading=False, model_summary=False, check_partition=False):
# Check if GPU is being used.
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
print('GPU device not found')
USE_GPU = False
else:
print('Found GPU at: {}'.format(device_name))
# !nvidia-smi # GPU details.
USE_GPU = True
# Filename.
filename_unet = "unet-pred-seg2seg-short-{}-{:02}".format(loss_name, block_number)
print("Filename: " + filename_unet)
# Create Unet model.
input_frames = 5
num_channels = 1
final_filters = 1
space_frames = 3
prediction_frame = 3
unet_model = get_unet_prediction(input_frames, num_channels, final_filters)
if model_summary:
unet_model.summary()
# Define losses and metrics
entropy_dice_loss = entropy_dice()
dice_loss, dice_coeff = dice()
jaccard_loss, jaccard_coeff = jaccard()
# Compile model
if loss_name == "mse":
print("Selected Loss: " + loss_name)
loss = "mse"
metrics_list = [dice_coeff, "mae", "binary_crossentropy"]
elif loss_name == "mae":
print("Selected Loss: " + loss_name)
loss = "mae"
metrics_list = [dice_coeff, "mse", "binary_crossentropy"]
elif loss_name == "binary_crossentropy":
print("Selected Loss: " + loss_name)
loss = "binary_crossentropy"
metrics_list = [dice_coeff, "mse", "mae"]
elif loss_name == "entropy_dice":
print("Selected Loss: " + loss_name)
loss = entropy_dice_loss
metrics_list = [dice_coeff, "mse", "mae", "binary_crossentropy"]
if learning_rate:
print("Learning Rate: " + str(learning_rate))
optimizer = Adam(learning_rate=learning_rate)
else:
print("Learning Rate: " + str(1e-3))
optimizer = Adam()
unet_model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
# Create partition dictionaries
path_hdf5 = "/content/temp_dataset/dataset-intphys-{:02}000.hdf5".format(block_number)
partition, targets = partition_seg2seg(num_initial=(block_number - 1) * 1000 + 1,
num_scenes=num_scenes,
input_frames=input_frames,
space_frames=space_frames,
prediction_frame=prediction_frame,
path_hdf5=path_hdf5)
# Parameters.
params = {'dim': (288, 288),
'path_hdf5': path_hdf5,
'scale' : 0.5,
'batch_size': 32,
'input_frames': input_frames,
'num_channels': num_channels,
'shuffle': True}
# Generators.
training_generator = unet_seg2seg_generator(partition["train"], targets, **params)
validation_generator = unet_seg2seg_generator(partition["validation"], targets, **params)
# Check partition integrity.
if check_partition:
print(partition)
print(targets)
# Select model for training.
model = unet_model
filename = filename_unet
model_clean_weights = model.get_weights()
# Configure Keras callbacks for training.
model_checkpoint = ModelCheckpoint(filepath="./models/{}.h5".format(filename),
save_best_only=True,
verbose=1)
csv_log = CSVLogger(filename="./logs/{}.csv".format(filename),
separator=";")
cb_list = [model_checkpoint, csv_log]
# Load precious model.
if model_loading:
model = load_model("./models/unet-pred-seg2seg-short-{}-{:02}.h5".format(load_loss_name, load_block_number), compile=False)
print("Loaded model: " + "unet-pred-seg2seg-short-{}-{:02}.h5".format(load_loss_name, load_block_number))
saved_weights = model.get_weights()
model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
model.set_weights(saved_weights)
# Clean weights before training
if not model_loading:
model.set_weights(model_clean_weights)
# Generator training.
train_history = model.fit(x=training_generator,
validation_data=validation_generator,
callbacks=cb_list,
epochs=epochs)
return model
def train_unet_image2image_short(loss_name, block_number, load_loss_name="mse", load_block_number=1, num_scenes=1000, epochs=3, learning_rate=0, model_loading=False, model_summary=False, check_partition=False):
# Check if GPU is being used.
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
print('GPU device not found')
USE_GPU = False
else:
print('Found GPU at: {}'.format(device_name))
# !nvidia-smi # GPU details.
USE_GPU = True
# Filename.
filename_unet = "unet-pred-image2image-short-{}-{:02}".format(loss_name, block_number)
print("Filename: " + filename_unet)
# Create Unet model.
input_frames = 5
num_channels = 3
final_filters = 3
space_frames = 3
prediction_frame = 3
unet_model = get_unet_prediction(input_frames, num_channels, final_filters)
if model_summary:
unet_model.summary()
# Define losses and metrics
entropy_dice_loss = entropy_dice()
dice_loss, dice_coeff = dice()
jaccard_loss, jaccard_coeff = jaccard()
# Compile model
if loss_name == "mse":
print("Selected Loss: " + loss_name)
loss = "mse"
metrics_list = [dice_coeff, "mae", "binary_crossentropy"]
elif loss_name == "mae":
print("Selected Loss: " + loss_name)
loss = "mae"
metrics_list = [dice_coeff, "mse", "binary_crossentropy"]
elif loss_name == "binary_crossentropy":
print("Selected Loss: " + loss_name)
loss = "binary_crossentropy"
metrics_list = [dice_coeff, "mse", "mae"]
elif loss_name == "entropy_dice":
print("Selected Loss: " + loss_name)
loss = entropy_dice_loss
metrics_list = [dice_coeff, "mse", "mae", "binary_crossentropy"]
if learning_rate:
print("Learning Rate: " + str(learning_rate))
optimizer = Adam(learning_rate=learning_rate)
else:
print("Learning Rate: " + str(1e-3))
optimizer = Adam()
unet_model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
# Create partition dictionaries
path_hdf5 = "/content/temp_dataset/dataset-intphys-{:02}000.hdf5".format(block_number)
partition, targets = partition_image2image(num_initial=(block_number - 1) * 1000 + 1,
num_scenes=num_scenes,
input_frames=input_frames,
space_frames=space_frames,
prediction_frame=prediction_frame,
path_hdf5=path_hdf5)
# Parameters.
params = {'dim': (288, 288),
'path_hdf5': path_hdf5,
'scale' : 0.5,
'batch_size': 32,
'input_frames': input_frames,
'num_channels': num_channels,
'shuffle': True}
# Generators.
training_generator = unet_image2image_generator(partition["train"], targets, **params)
validation_generator = unet_image2image_generator(partition["validation"], targets, **params)
# Check partition integrity.
if check_partition:
print(partition)
print(targets)
# Select model for training.
model = unet_model
filename = filename_unet
model_clean_weights = model.get_weights()
# Configure Keras callbacks for training.
model_checkpoint = ModelCheckpoint(filepath="./models/{}.h5".format(filename),
save_best_only=True,
verbose=1)
csv_log = CSVLogger(filename="./logs/{}.csv".format(filename),
separator=";")
cb_list = [model_checkpoint, csv_log]
# Load precious model.
if model_loading:
model = load_model("./models/unet-pred-image2image-short-{}-{:02}.h5".format(load_loss_name, load_block_number), compile=False)
print("Loaded model: " + "unet-pred-image2image-short-{}-{:02}.h5".format(load_loss_name, load_block_number))
saved_weights = model.get_weights()
model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
model.set_weights(saved_weights)
# Clean weights before training
if not model_loading:
model.set_weights(model_clean_weights)
# Generator training.
train_history = model.fit(x=training_generator,
validation_data=validation_generator,
callbacks=cb_list,
epochs=epochs)
return model
def train_unet_seg2seg_long(loss_name, block_number, load_loss_name="mse", load_block_number=1, num_scenes=1000, epochs=4, learning_rate=0, model_loading=False, model_summary=False, check_partition=False):
# Check if GPU is being used.
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
print('GPU device not found')
USE_GPU = False
else:
print('Found GPU at: {}'.format(device_name))
# !nvidia-smi # GPU details.
USE_GPU = True
# Filename.
filename_unet = "unet-pred-seg2seg-long-{}-{:02}".format(loss_name, block_number)
print("Filename: " + filename_unet)
# Create Unet model.
input_frames = 5
num_channels = 1
final_filters = 1
space_frames = 3
prediction_frame = 15
unet_model = get_unet_prediction(input_frames, num_channels, final_filters)
if model_summary:
unet_model.summary()
# Define losses and metrics
entropy_dice_loss = entropy_dice()
dice_loss, dice_coeff = dice()
jaccard_loss, jaccard_coeff = jaccard()
# Compile model
if loss_name == "mse":
print("Selected Loss: " + loss_name)
loss = "mse"
metrics_list = [dice_coeff, "mae", "binary_crossentropy"]
elif loss_name == "mae":
print("Selected Loss: " + loss_name)
loss = "mae"
metrics_list = [dice_coeff, "mse", "binary_crossentropy"]
elif loss_name == "binary_crossentropy":
print("Selected Loss: " + loss_name)
loss = "binary_crossentropy"
metrics_list = [dice_coeff, "mse", "mae"]
elif loss_name == "entropy_dice":
print("Selected Loss: " + loss_name)
loss = entropy_dice_loss
metrics_list = [dice_coeff, "mse", "mae", "binary_crossentropy"]
if learning_rate:
print("Learning Rate: " + str(learning_rate))
optimizer = Adam(learning_rate=learning_rate)
else:
print("Learning Rate: " + str(1e-3))
optimizer = Adam()
unet_model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
# Create partition dictionaries
path_hdf5 = "/content/temp_dataset/dataset-intphys-{:02}000.hdf5".format(block_number)
partition, targets = partition_seg2seg(num_initial=(block_number - 1) * 1000 + 1,
num_scenes=num_scenes,
input_frames=input_frames,
space_frames=space_frames,
prediction_frame=prediction_frame,
path_hdf5=path_hdf5)
# Parameters.
params = {'dim': (288, 288),
'path_hdf5': path_hdf5,
'scale' : 0.5,
'batch_size': 32,
'input_frames': input_frames,
'num_channels': num_channels,
'shuffle': True}
# Generators.
training_generator = unet_seg2seg_generator(partition["train"], targets, **params)
validation_generator = unet_seg2seg_generator(partition["validation"], targets, **params)
# Check partition integrity.
if check_partition:
print(partition)
print(targets)
# Select model for training.
model = unet_model
filename = filename_unet
model_clean_weights = model.get_weights()
# Configure Keras callbacks for training.
model_checkpoint = ModelCheckpoint(filepath="./models/{}.h5".format(filename),
save_best_only=True,
verbose=1)
csv_log = CSVLogger(filename="./logs/{}.csv".format(filename),
separator=";")
cb_list = [model_checkpoint, csv_log]
# Load precious model.
if model_loading:
model = load_model("./models/unet-pred-seg2seg-long-{}-{:02}.h5".format(load_loss_name, load_block_number), compile=False)
print("Loaded model: " + "unet-pred-seg2seg-long-{}-{:02}.h5".format(load_loss_name, load_block_number))
saved_weights = model.get_weights()
model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
model.set_weights(saved_weights)
# Clean weights before training
if not model_loading:
model.set_weights(model_clean_weights)
# Generator training.
train_history = model.fit(x=training_generator,
validation_data=validation_generator,
callbacks=cb_list,
epochs=epochs)
return model
def train_unet_depth2seg_short(loss_name, block_number, load_loss_name, load_block_number, num_scenes=1000, epochs=3, learning_rate=False, model_loading=False, model_summary=False, check_partition=False):
# Check if GPU is being used.
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
print('GPU device not found')
USE_GPU = False
else:
print('Found GPU at: {}'.format(device_name))
# !nvidia-smi # GPU details.
USE_GPU = True
# Filename.
filename_unet = "unet-pred-depth2seg-short-{}-{:02}".format(loss_name, block_number)
print("Filename: " + filename_unet)
# Create Unet model.
input_frames = 5
num_channels = 1
final_filters = 1
space_frames = 3
prediction_frame = 3
unet_model = get_unet_prediction(input_frames, num_channels, final_filters)
if model_summary:
unet_model.summary()
# Define losses and metrics
entropy_dice_loss = entropy_dice()
dice_loss, dice_coeff = dice()
jaccard_loss, jaccard_coeff = jaccard()
# Compile model
if loss_name == "mse":
print("Selected Loss: " + loss_name)
loss = "mse"
metrics_list = [dice_coeff, "mae", "binary_crossentropy"]
elif loss_name == "mae":
print("Selected Loss: " + loss_name)
loss = "mae"
metrics_list = [dice_coeff, "mse", "binary_crossentropy"]
elif loss_name == "binary_crossentropy":
print("Selected Loss: " + loss_name)
loss = "binary_crossentropy"
metrics_list = [dice_coeff, "mse", "mae"]
elif loss_name == "entropy_dice":
print("Selected Loss: " + loss_name)
loss = entropy_dice_loss
metrics_list = [dice_coeff, "mse", "mae", "binary_crossentropy"]
if learning_rate:
print("Learning Rate: " + str(learning_rate))
optimizer = Adam(learning_rate=learning_rate)
else:
print("Learning Rate: " + str(1e-3))
optimizer = Adam()
unet_model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
# Create partition dictionaries
path_hdf5 = "/content/temp_dataset/dataset-intphys-{:02}000.hdf5".format(block_number)
partition, targets = partition_depth2seg(num_initial=(block_number - 1) * 1000 + 1,
num_scenes=num_scenes,
input_frames=input_frames,
space_frames=space_frames,
prediction_frame=prediction_frame,
path_hdf5=path_hdf5)
# Parameters.
params = {'dim': (288, 288),
'path_hdf5': path_hdf5,
'scale' : 0.5,
'batch_size': 32,
'input_frames': input_frames,
'num_channels': num_channels,
'shuffle': True}
# Generators.
training_generator = unet_depth2seg_generator(partition["train"], targets, **params)
validation_generator = unet_depth2seg_generator(partition["validation"], targets, **params)
# Check partition integrity.
if check_partition:
print(partition)
print(targets)
# Select model for training.
model = unet_model
filename = filename_unet
model_clean_weights = model.get_weights()
# Configure Keras callbacks for training.
model_checkpoint = ModelCheckpoint(filepath="./models/{}.h5".format(filename),
save_best_only=True,
verbose=1)
csv_log = CSVLogger(filename="./logs/{}.csv".format(filename),
separator=";")
cb_list = [model_checkpoint, csv_log]
# Load precious model.
if model_loading:
model = load_model("./models/unet-pred-depth2seg-short-{}-{:02}.h5".format(loss_name, block_number-1), compile=False)
print("Loaded model: " + "unet-pred-depth2seg-short-{}-{:02}.h5".format(loss_name, block_number-1))
saved_weights = model.get_weights()
model.compile(loss=loss, optimizer=optimizer, metrics=metrics_list)
model.set_weights(saved_weights)
# Clean weights before training
if not model_loading:
model.set_weights(model_clean_weights)
# Generator training.
train_history = model.fit(x=training_generator,
validation_data=validation_generator,
callbacks=cb_list,
epochs=epochs)
return model
``` |
{
"source": "jperier/Opti_QAP",
"score": 4
} |
#### File: Opti_QAP/.ipynb_checkpoints/emplacement-checkpoint.py
```python
class Emplacement:
"""
documentation
"""
def __init__(self, id, line, n):
self.id = id
line = line.split(' ')
distances = {}
i = 0
for elt in line:
if elt != '':
i+=1
if i != id:
distances[i] = int(elt.replace('\n', ''))
if len(distances) != n-1:
print("ERROR: len(distances) != n-1")
self.distances = distances
self.equipment = None
```
#### File: jperier/Opti_QAP/param_search.py
```python
from solution import Solution
from utils import extract, obj_simple, obj_simple_2, v_permute_one, recuit, tabou, brute_force, descente, get_best_voisin, get_best_voisin_small, nb_voisins
import json
from os import listdir
from os.path import isfile, join
import os
import datetime
import matplotlib.pyplot as plt
def logprint(*args, sep=' ', end='\n'):
for a in args:
s = str(a)
if log_file is not None:
log_file.write(s + sep)
print(s, sep=sep, end='')
if log_file is not None:
log_file.write(end)
print(end=end)
def make_graph(file, nb_steps, value_at_steps, min_at_steps):
if len(value_at_steps) == 0 or len(min_at_steps) == 0:
return
gap = nb_steps/len(value_at_steps)
axis = [i*gap for i in range(len(value_at_steps))]
plt.plot(axis, value_at_steps, 'b')
plt.plot(axis, min_at_steps, 'r')
plt.ylabel('Fitness')
plt.xlabel('Step')
graph_path = 'logs/'+file+'/'
try:
os.mkdir(graph_path)
except FileExistsError:
pass
plt.savefig(graph_path + str(now.day)+'_'+str(now.hour)+'h'+str(now.minute)+'.png')
plt.close()
# *********************************************************************************************
TEST_MODE = False
plt.rcParams['figure.figsize'] = [25, 10] # Pour que ce soit plus grand
conf_path = 'configs/'
files = [f for f in listdir(conf_path) if isfile(join(conf_path, f))]
if "main.json" in files:
files.remove("main.json")
# Calcul temps total, initialisation des solutions
tps_total = 0
solutions = {}
for f in files:
with open(conf_path+f, 'r') as file:
d = json.load(file)
for t in d['runtimes']:
if d['algo'] == 'recuit':
n = len(d['mus'])
elif d['algo'] == 'tabou':
n = len(d['list_sizes'])
elif d['algo'] == 'descente' and d['f_voisin_small']:
n = 2
else:
n = 1
tps_total += n*t
emplacements, equipments = extract('data/'+d['file'])
solutions[d['file']] = Solution(emplacements, equipments)
print('Total runtime :', tps_total/60, 'mins (', tps_total/3600, 'h )')
now = datetime.datetime.now()
log_file = None
for f in files:
with open(conf_path+f, 'r') as file:
d = json.load(file)
path = 'data/' + d['file']
s = solutions[d['file']]
for max_time in d["runtimes"]:
if log_file is not None:
log_file.close()
log_file = open('logs/'+f[:-5]+'.log', 'a')
now = datetime.datetime.now()
logprint("\n\n**************************************************")
logprint(now, '\n')
if TEST_MODE:
max_time = 1
logprint('----- TEST MODE ------')
logprint('Config : ', f, '\n')
logprint(d)
logprint('runtime = ', max_time)
logprint("Fitness initiale : ", obj_simple(s))
logprint("Nombre de voisins : ", nb_voisins(s))
# Recuit
if d['algo'] == 'recuit':
for mu in d['mus']:
s_recuit, f_min, nb_steps, value_at_steps, min_at_steps = recuit(f_voisin=v_permute_one,
f_obj=obj_simple_2,
s=s,
mu=mu,
max_time=max_time,
return_stats=True)
logprint('mu = ', mu)
logprint("Fitness recuit : ", f_min)
logprint("nb_steps = ", nb_steps)
# Méthode tabou
elif d['algo'] == 'tabou':
for list_size in d['list_sizes']:
if d['f_voisin_small']:
f_voisin = get_best_voisin_small
else:
f_voisin = get_best_voisin
s_tabou, f_min, nb_steps, value_at_steps, min_at_steps = tabou(f_voisin=f_voisin,
f_obj=obj_simple_2,
s=s,
list_size=list_size,
max_time=max_time,
return_stats=True)
logprint('\nlist_size = ', list_size)
logprint('f_voisin = ', f_voisin)
logprint("Fitness tabou : ", obj_simple_2(s_tabou))
logprint("nb_steps = ", nb_steps)
elif d['algo'] == 'descente':
if d['f_voisin_small']:
f_voisin = get_best_voisin_small
else:
f_voisin = get_best_voisin
s_d, f_min, nb_steps, value_at_steps, min_at_steps, nb_restart = descente(f_voisin,
obj_simple_2,
s,
max_time=max_time,
return_stats=True)
logprint('f_voisin = ', f_voisin)
logprint("Fitness descente: ", f_min)
logprint("nb_steps = ", nb_steps)
logprint('nb_restart = ', nb_restart)
elif d['algo'] == 'brute_force':
s_brute_force, f_min, nb_steps, value_at_steps, min_at_steps = brute_force(obj_simple_2,
s,
max_time,
return_stats=True)
logprint("Fitness Brute Force: ", f_min)
logprint("nb_steps = ", nb_steps)
# Graphe et libération de la mémoire
make_graph(f[:-5], nb_steps, value_at_steps, min_at_steps)
del value_at_steps, min_at_steps
``` |
{
"source": "jperkelens/pants",
"score": 2
} |
#### File: backend/project_info/filedeps_test.py
```python
from typing import List, Optional, Set
from pants.backend.codegen.protobuf.target_types import ProtobufLibrary
from pants.backend.project_info import filedeps
from pants.engine.target import Dependencies, Sources, Target
from pants.testutil.goal_rule_test_base import GoalRuleTestBase
class MockTarget(Target):
alias = "tgt"
core_fields = (Sources, Dependencies)
class FiledepsTest(GoalRuleTestBase):
goal_cls = filedeps.Filedeps
@classmethod
def rules(cls):
return (*super().rules(), *filedeps.rules())
@classmethod
def target_types(cls):
return [MockTarget, ProtobufLibrary]
def setup_target(
self,
path: str,
*,
sources: Optional[List[str]] = None,
dependencies: Optional[List[str]] = None,
) -> None:
if sources:
self.create_files(path, sources)
self.add_to_build_file(
path, f"tgt(sources={sources or []}, dependencies={dependencies or []})",
)
def assert_filedeps(
self,
*,
targets: List[str],
expected: Set[str],
transitive: bool = False,
globs: bool = False,
) -> None:
args = []
if globs:
args.append("--filedeps-globs")
if transitive:
args.append("--filedeps-transitive")
self.assert_console_output(*expected, args=(*args, *targets))
def test_no_target(self) -> None:
self.assert_filedeps(targets=[], expected=set())
def test_one_target_no_source(self) -> None:
self.setup_target("some/target")
self.assert_filedeps(targets=["some/target"], expected={"some/target/BUILD"})
def test_one_target_one_source(self) -> None:
self.setup_target("some/target", sources=["file.py"])
self.assert_filedeps(
targets=["some/target"], expected={"some/target/BUILD", "some/target/file.py"}
)
def test_one_target_multiple_source(self) -> None:
self.setup_target("some/target", sources=["file1.py", "file2.py"])
self.assert_filedeps(
targets=["some/target"],
expected={"some/target/BUILD", "some/target/file1.py", "some/target/file2.py"},
)
def test_one_target_no_source_one_dep(self) -> None:
self.setup_target("dep/target", sources=["file.py"])
self.setup_target("some/target", dependencies=["dep/target"])
self.assert_filedeps(targets=["some/target"], expected={"some/target/BUILD"})
self.assert_filedeps(
targets=["some/target"],
transitive=True,
expected={"some/target/BUILD", "dep/target/BUILD", "dep/target/file.py"},
)
def test_one_target_one_source_with_dep(self) -> None:
self.setup_target("dep/target", sources=["file.py"])
self.setup_target("some/target", sources=["file.py"], dependencies=["dep/target"])
direct_files = {"some/target/BUILD", "some/target/file.py"}
self.assert_filedeps(
targets=["some/target"], expected=direct_files,
)
self.assert_filedeps(
targets=["some/target"],
transitive=True,
expected={*direct_files, "dep/target/BUILD", "dep/target/file.py",},
)
def test_multiple_targets_one_source(self) -> None:
self.setup_target("some/target", sources=["file.py"])
self.setup_target("other/target", sources=["file.py"])
self.assert_filedeps(
targets=["some/target", "other/target"],
expected={
"some/target/BUILD",
"some/target/file.py",
"other/target/BUILD",
"other/target/file.py",
},
)
def test_multiple_targets_one_source_with_dep(self) -> None:
self.setup_target("dep1/target", sources=["file.py"])
self.setup_target("dep2/target", sources=["file.py"])
self.setup_target("some/target", sources=["file.py"], dependencies=["dep1/target"])
self.setup_target("other/target", sources=["file.py"], dependencies=["dep2/target"])
direct_files = {
"some/target/BUILD",
"some/target/file.py",
"other/target/BUILD",
"other/target/file.py",
}
self.assert_filedeps(
targets=["some/target", "other/target"], expected=direct_files,
)
self.assert_filedeps(
targets=["some/target", "other/target"],
transitive=True,
expected={
*direct_files,
"dep1/target/BUILD",
"dep1/target/file.py",
"dep2/target/BUILD",
"dep2/target/file.py",
},
)
def test_multiple_targets_one_source_overlapping(self) -> None:
self.setup_target("dep/target", sources=["file.py"])
self.setup_target("some/target", sources=["file.py"], dependencies=["dep/target"])
self.setup_target("other/target", sources=["file.py"], dependencies=["dep/target"])
direct_files = {
"some/target/BUILD",
"some/target/file.py",
"other/target/BUILD",
"other/target/file.py",
}
self.assert_filedeps(targets=["some/target", "other/target"], expected=direct_files)
self.assert_filedeps(
targets=["some/target", "other/target"],
transitive=True,
expected={*direct_files, "dep/target/BUILD", "dep/target/file.py"},
)
def test_globs(self) -> None:
self.create_files("some/target", ["test1.py", "test2.py"])
self.add_to_build_file("some/target", target="tgt(sources=['test*.py'])")
self.assert_filedeps(
targets=["some/target"],
expected={"some/target/BUILD", "some/target/test*.py"},
globs=True,
)
def test_build_with_file_ext(self) -> None:
self.create_file("some/target/BUILD.ext", contents="tgt()")
self.assert_filedeps(targets=["some/target"], expected={"some/target/BUILD.ext"})
def test_codegen_targets_use_protocol_files(self) -> None:
# That is, don't output generated files.
self.create_file("some/target/f.proto")
self.add_to_build_file("some/target", "protobuf_library()")
self.assert_filedeps(
targets=["some/target"], expected={"some/target/BUILD", "some/target/f.proto"}
)
```
#### File: python/dependency_inference/module_mapper_test.py
```python
from pathlib import PurePath
from textwrap import dedent
from typing import Optional
import pytest
from pants.backend.python.dependency_inference.module_mapper import (
FirstPartyModuleToAddressMapping,
PythonModule,
PythonModuleOwner,
ThirdPartyModuleToAddressMapping,
map_first_party_modules_to_addresses,
map_module_to_address,
map_third_party_modules_to_addresses,
)
from pants.backend.python.target_types import PythonLibrary, PythonRequirementLibrary
from pants.core.util_rules import source_files, stripped_source_files
from pants.engine.addresses import Address
from pants.engine.rules import RootRule
from pants.testutil.engine.util import Params
from pants.testutil.option.util import create_options_bootstrapper
from pants.testutil.test_base import TestBase
from pants.util.frozendict import FrozenDict
@pytest.mark.parametrize(
"stripped_path,expected",
[
(PurePath("top_level.py"), "top_level"),
(PurePath("dir", "subdir", "__init__.py"), "dir.subdir"),
(PurePath("dir", "subdir", "app.py"), "dir.subdir.app"),
(
PurePath("src", "python", "project", "not_stripped.py"),
"src.python.project.not_stripped",
),
],
)
def test_create_module_from_path(stripped_path: PurePath, expected: str) -> None:
assert PythonModule.create_from_stripped_path(stripped_path) == PythonModule(expected)
def test_first_party_modules_mapping() -> None:
util_addr = Address.parse("src/python/util:strutil")
test_addr = Address.parse("tests/python/project_test:test")
mapping = FirstPartyModuleToAddressMapping(
FrozenDict({"util.strutil": util_addr, "project_test.test": test_addr})
)
assert mapping.address_for_module("util.strutil") == util_addr
assert mapping.address_for_module("util.strutil.ensure_text") == util_addr
assert mapping.address_for_module("util") is None
assert mapping.address_for_module("project_test.test") == test_addr
assert mapping.address_for_module("project_test.test.TestDemo") == test_addr
assert mapping.address_for_module("project_test.test.TestDemo.method") is None
assert mapping.address_for_module("project_test") is None
assert mapping.address_for_module("project.test") is None
def test_third_party_modules_mapping() -> None:
colors_addr = Address.parse("//:ansicolors")
pants_addr = Address.parse("//:pantsbuild")
mapping = ThirdPartyModuleToAddressMapping(
FrozenDict({"colors": colors_addr, "pants": pants_addr})
)
assert mapping.address_for_module("colors") == colors_addr
assert mapping.address_for_module("colors.red") == colors_addr
assert mapping.address_for_module("pants") == pants_addr
assert mapping.address_for_module("pants.task") == pants_addr
assert mapping.address_for_module("pants.task.task") == pants_addr
assert mapping.address_for_module("pants.task.task.Task") == pants_addr
class ModuleMapperTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
*stripped_source_files.rules(),
*source_files.rules(),
map_first_party_modules_to_addresses,
map_module_to_address,
map_third_party_modules_to_addresses,
RootRule(PythonModule),
)
@classmethod
def target_types(cls):
return [PythonLibrary, PythonRequirementLibrary]
def test_map_first_party_modules_to_addresses(self) -> None:
options_bootstrapper = create_options_bootstrapper(
args=["--source-root-patterns=['src/python', 'tests/python', 'build-support']"]
)
# Two modules belonging to the same target. We should generate subtargets for each file.
self.create_files("src/python/project/util", ["dirutil.py", "tarutil.py"])
self.add_to_build_file("src/python/project/util", "python_library()")
# A module with two owners, meaning that neither should be resolved.
self.create_file("src/python/two_owners.py")
self.add_to_build_file("src/python", "python_library()")
self.create_file("build-support/two_owners.py")
self.add_to_build_file("build-support", "python_library()")
# A package module. Because there's only one source file belonging to the target, we should
# not generate subtargets.
self.create_file("tests/python/project_test/demo_test/__init__.py")
self.add_to_build_file("tests/python/project_test/demo_test", "python_library()")
result = self.request_single_product(FirstPartyModuleToAddressMapping, options_bootstrapper)
assert result.mapping == FrozenDict(
{
"project.util.dirutil": Address(
"src/python/project/util", relative_file_path="dirutil.py", target_name="util",
),
"project.util.tarutil": Address(
"src/python/project/util", relative_file_path="tarutil.py", target_name="util",
),
"project_test.demo_test": Address(
"tests/python/project_test/demo_test",
relative_file_path="__init__.py",
target_name="demo_test",
),
}
)
def test_map_third_party_modules_to_addresses(self) -> None:
self.add_to_build_file(
"3rdparty/python",
dedent(
"""\
python_requirement_library(
name='ansicolors',
requirements=['ansicolors==1.21'],
module_mapping={'ansicolors': ['colors']},
)
python_requirement_library(
name='req1',
requirements=['req1', 'two_owners'],
)
python_requirement_library(
name='un_normalized',
requirements=['Un-Normalized-Project>3', 'two_owners'],
)
"""
),
)
result = self.request_single_product(
ThirdPartyModuleToAddressMapping, Params(create_options_bootstrapper())
)
assert result.mapping == FrozenDict(
{
"colors": Address.parse("3rdparty/python:ansicolors"),
"req1": Address.parse("3rdparty/python:req1"),
"un_normalized_project": Address.parse("3rdparty/python:un_normalized"),
}
)
def test_map_module_to_address(self) -> None:
options_bootstrapper = create_options_bootstrapper(
args=["--source-root-patterns=['source_root1', 'source_root2', '/']"]
)
def get_owner(module: str) -> Optional[Address]:
return self.request_single_product(
PythonModuleOwner, Params(PythonModule(module), options_bootstrapper)
).address
# First check that we can map 3rd-party modules.
self.add_to_build_file(
"3rdparty/python",
dedent(
"""\
python_requirement_library(
name='ansicolors',
requirements=['ansicolors==1.21'],
module_mapping={'ansicolors': ['colors']},
)
"""
),
)
assert get_owner("colors.red") == Address.parse("3rdparty/python:ansicolors")
# Check a first party module using a module path.
self.create_file("source_root1/project/app.py")
self.create_file("source_root1/project/file2.py")
self.add_to_build_file("source_root1/project", "python_library()")
assert get_owner("project.app") == Address(
"source_root1/project", relative_file_path="app.py", target_name="project"
)
# Check a package path
self.create_file("source_root2/project/subdir/__init__.py")
self.add_to_build_file("source_root2/project/subdir", "python_library()")
assert get_owner("project.subdir") == Address(
"source_root2/project/subdir", relative_file_path="__init__.py", target_name="subdir",
)
# Test a module with no owner (stdlib). This also sanity checks that we can handle when
# there is no parent module.
assert get_owner("typing") is None
# Test a module with a single owner with a top-level source root of ".". Also confirm we
# can handle when the module includes a symbol (like a class name) at the end.
self.create_file("script.py")
self.add_to_build_file("", "python_library(name='script')")
assert get_owner("script.Demo") == Address(
"", relative_file_path="script.py", target_name="script"
)
```
#### File: backend/python/register.py
```python
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.pants_requirement import PantsRequirement
from pants.backend.python.python_artifact import PythonArtifact
from pants.backend.python.python_requirements import PythonRequirements
from pants.backend.python.rules import (
ancestor_files,
coverage,
create_python_binary,
pex,
pex_cli,
pex_environment,
pex_from_targets,
pytest_runner,
python_sources,
repl,
run_python_binary,
run_setup_py,
)
from pants.backend.python.subsystems import python_native_code, subprocess_environment
from pants.backend.python.target_types import (
PythonBinary,
PythonDistribution,
PythonLibrary,
PythonRequirementLibrary,
PythonRequirementsFile,
PythonTests,
)
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.python.python_requirement import PythonRequirement
def build_file_aliases():
return BuildFileAliases(
objects={
"python_requirement": PythonRequirement,
"python_artifact": PythonArtifact,
"setup_py": PythonArtifact,
},
context_aware_object_factories={
"python_requirements": PythonRequirements,
PantsRequirement.alias: PantsRequirement,
},
)
def rules():
return (
*coverage.rules(),
*ancestor_files.rules(),
*python_sources.rules(),
*dependency_inference_rules.rules(),
*pex.rules(),
*pex_cli.rules(),
*pex_environment.rules(),
*pex_from_targets.rules(),
*pytest_runner.rules(),
*create_python_binary.rules(),
*python_native_code.rules(),
*repl.rules(),
*run_python_binary.rules(),
*run_setup_py.rules(),
*subprocess_environment.rules(),
)
def target_types():
return [
PythonBinary,
PythonDistribution,
PythonLibrary,
PythonRequirementLibrary,
PythonRequirementsFile,
PythonTests,
]
```
#### File: python/rules/pytest_runner_integration_test.py
```python
import os
import re
from pathlib import Path, PurePath
from textwrap import dedent
from typing import List, Optional
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.rules import pex, pex_from_targets, pytest_runner, python_sources
from pants.backend.python.rules.coverage import create_coverage_config
from pants.backend.python.rules.pytest_runner import PythonTestFieldSet
from pants.backend.python.target_types import PythonLibrary, PythonRequirementLibrary, PythonTests
from pants.core.goals.test import TestDebugRequest, TestResult
from pants.core.util_rules import source_files, stripped_source_files
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents, FileContent
from pants.engine.process import InteractiveRunner
from pants.engine.rules import RootRule
from pants.testutil.engine.util import Params
from pants.testutil.external_tool_test_base import ExternalToolTestBase
from pants.testutil.interpreter_selection_utils import skip_unless_python27_and_python3_present
from pants.testutil.option.util import create_options_bootstrapper
class PytestRunnerIntegrationTest(ExternalToolTestBase):
source_root = "tests/python"
package = os.path.join(source_root, "pants_test")
good_source = FileContent(path="test_good.py", content=b"def test():\n pass\n")
bad_source = FileContent(path="test_bad.py", content=b"def test():\n assert False\n")
py3_only_source = FileContent(path="test_py3.py", content=b"def test() -> None:\n pass\n")
library_source = FileContent(path="library.py", content=b"def add_two(x):\n return x + 2\n")
conftest_source = FileContent(
path="conftest.py",
content=b"def pytest_runtest_setup(item):\n" b" print('In conftest!')\n",
)
def write_file(self, file_content: FileContent) -> None:
self.create_file(
relpath=PurePath(self.package, file_content.path).as_posix(),
contents=file_content.content.decode(),
)
def create_python_library(
self,
source_files: List[FileContent],
*,
name: str = "library",
dependencies: Optional[List[str]] = None,
) -> None:
for source_file in source_files:
self.write_file(source_file)
source_globs = [PurePath(source_file.path).name for source_file in source_files] + [
"__init__.py"
]
self.add_to_build_file(
self.package,
dedent(
f"""\
python_library(
name={repr(name)},
sources={source_globs},
dependencies={[*(dependencies or ())]},
)
"""
),
)
self.create_file(os.path.join(self.package, "__init__.py"))
def create_python_test_target(
self,
source_files: List[FileContent],
*,
dependencies: Optional[List[str]] = None,
interpreter_constraints: Optional[str] = None,
) -> None:
self.add_to_build_file(
relpath=self.package,
target=dedent(
f"""\
python_tests(
name='target',
dependencies={dependencies or []},
compatibility={[interpreter_constraints] if interpreter_constraints else []},
)
"""
),
)
for source_file in source_files:
self.write_file(source_file)
def setup_thirdparty_dep(self) -> None:
self.add_to_build_file(
relpath="3rdparty/python",
target=dedent(
"""\
python_requirement_library(
name='ordered-set',
requirements=['ordered-set==3.1.1'],
)
"""
),
)
@classmethod
def target_types(cls):
return [PythonLibrary, PythonTests, PythonRequirementLibrary]
@classmethod
def rules(cls):
return (
*super().rules(),
create_coverage_config,
*pytest_runner.rules(),
*python_sources.rules(),
*pex.rules(),
*pex_from_targets.rules(),
*source_files.rules(),
*stripped_source_files.rules(),
RootRule(PythonTestFieldSet),
# For conftest detection.
*dependency_inference_rules.rules(),
)
def run_pytest(
self,
*,
address: Optional[Address] = None,
passthrough_args: Optional[str] = None,
junit_xml_dir: Optional[str] = None,
use_coverage: bool = False,
execution_slot_var: Optional[str] = None,
) -> TestResult:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns={self.source_root}",
# pin to lower versions so that we can run Python 2 tests
"--pytest-version=pytest>=4.6.6,<4.7",
"--pytest-pytest-plugins=['zipp==1.0.0', 'pytest-cov>=2.8.1,<2.9']",
]
if passthrough_args:
args.append(f"--pytest-args='{passthrough_args}'")
if junit_xml_dir:
args.append(f"--pytest-junit-xml-dir={junit_xml_dir}")
if use_coverage:
args.append("--test-use-coverage")
if execution_slot_var:
args.append(f"--pytest-execution-slot-var={execution_slot_var}")
if not address:
address = Address(self.package, target_name="target")
params = Params(
PythonTestFieldSet.create(PythonTests({}, address=address)),
create_options_bootstrapper(args=args),
)
test_result = self.request_single_product(TestResult, params)
debug_request = self.request_single_product(TestDebugRequest, params)
if debug_request.process is not None:
debug_result = InteractiveRunner(self.scheduler).run(debug_request.process)
assert test_result.exit_code == debug_result.exit_code
return test_result
def test_single_passing_test(self) -> None:
self.create_python_test_target([self.good_source])
result = self.run_pytest()
assert result.exit_code == 0
assert f"{self.package}/test_good.py ." in result.stdout
def test_single_failing_test(self) -> None:
self.create_python_test_target([self.bad_source])
result = self.run_pytest()
assert result.exit_code == 1
assert f"{self.package}/test_bad.py F" in result.stdout
def test_mixed_sources(self) -> None:
self.create_python_test_target([self.good_source, self.bad_source])
result = self.run_pytest()
assert result.exit_code == 1
assert f"{self.package}/test_good.py ." in result.stdout
assert f"{self.package}/test_bad.py F" in result.stdout
def test_absolute_import(self) -> None:
self.create_python_library([self.library_source])
source = FileContent(
path="test_absolute_import.py",
content=dedent(
"""\
from pants_test.library import add_two
def test():
assert add_two(2) == 4
"""
).encode(),
)
self.create_python_test_target([source], dependencies=[":library"])
result = self.run_pytest()
assert result.exit_code == 0
assert f"{self.package}/test_absolute_import.py ." in result.stdout
def test_relative_import(self) -> None:
self.create_python_library([self.library_source])
source = FileContent(
path="test_relative_import.py",
content=dedent(
"""\
from .library import add_two
def test():
assert add_two(2) == 4
"""
).encode(),
)
self.create_python_test_target([source], dependencies=[":library"])
result = self.run_pytest()
assert result.exit_code == 0
assert f"{self.package}/test_relative_import.py ." in result.stdout
def test_transitive_dep(self) -> None:
self.create_python_library([self.library_source])
transitive_dep_fc = FileContent(
path="transitive_dep.py",
content=dedent(
"""\
from pants_test.library import add_two
def add_four(x):
return add_two(x) + 2
"""
).encode(),
)
self.create_python_library(
[transitive_dep_fc], name="transitive_dep", dependencies=[":library"]
)
source = FileContent(
path="test_transitive_dep.py",
content=dedent(
"""\
from pants_test.transitive_dep import add_four
def test():
assert add_four(2) == 6
"""
).encode(),
)
self.create_python_test_target([source], dependencies=[":transitive_dep"])
result = self.run_pytest()
assert result.exit_code == 0
assert f"{self.package}/test_transitive_dep.py ." in result.stdout
def test_thirdparty_dep(self) -> None:
self.setup_thirdparty_dep()
source = FileContent(
path="test_3rdparty_dep.py",
content=dedent(
"""\
from ordered_set import OrderedSet
def test():
assert OrderedSet((1, 2)) == OrderedSet([1, 2])
"""
).encode(),
)
self.create_python_test_target([source], dependencies=["3rdparty/python:ordered-set"])
result = self.run_pytest()
assert result.exit_code == 0
assert f"{self.package}/test_3rdparty_dep.py ." in result.stdout
def test_thirdparty_transitive_dep(self) -> None:
self.setup_thirdparty_dep()
library_fc = FileContent(
path="library.py",
content=dedent(
"""\
import string
from ordered_set import OrderedSet
alphabet = OrderedSet(string.ascii_lowercase)
"""
).encode(),
)
self.create_python_library(
[library_fc], dependencies=["3rdparty/python:ordered-set"],
)
source = FileContent(
path="test_3rdparty_transitive_dep.py",
content=dedent(
"""\
from pants_test.library import alphabet
def test():
assert 'a' in alphabet and 'z' in alphabet
"""
).encode(),
)
self.create_python_test_target([source], dependencies=[":library"])
result = self.run_pytest()
assert result.exit_code == 0
assert f"{self.package}/test_3rdparty_transitive_dep.py ." in result.stdout
@skip_unless_python27_and_python3_present
def test_uses_correct_python_version(self) -> None:
self.create_python_test_target(
[self.py3_only_source], interpreter_constraints="CPython==2.7.*"
)
py2_result = self.run_pytest()
assert py2_result.exit_code == 2
assert "SyntaxError: invalid syntax" in py2_result.stdout
Path(
self.build_root, self.package, "BUILD"
).unlink() # Cleanup in order to recreate the target
self.create_python_test_target(
[self.py3_only_source], interpreter_constraints="CPython>=3.6"
)
py3_result = self.run_pytest()
assert py3_result.exit_code == 0
assert f"{self.package}/test_py3.py ." in py3_result.stdout
def test_respects_passthrough_args(self) -> None:
source = FileContent(
path="test_config.py",
content=dedent(
"""\
def test_run_me():
pass
def test_ignore_me():
pass
"""
).encode(),
)
self.create_python_test_target([source])
result = self.run_pytest(passthrough_args="-k test_run_me")
assert result.exit_code == 0
assert f"{self.package}/test_config.py ." in result.stdout
assert "collected 2 items / 1 deselected / 1 selected" in result.stdout
def test_junit(self) -> None:
self.create_python_test_target([self.good_source])
result = self.run_pytest(junit_xml_dir="dist/test-results")
assert result.exit_code == 0
assert f"{self.package}/test_good.py ." in result.stdout
assert result.xml_results is not None
digest_contents = self.request_single_product(DigestContents, result.xml_results)
assert len(digest_contents) == 1
file = digest_contents[0]
assert file.path.startswith("dist/test-results")
assert b"pants_test.test_good" in file.content
def test_coverage(self) -> None:
self.create_python_test_target([self.good_source])
result = self.run_pytest(use_coverage=True)
assert result.exit_code == 0
assert f"{self.package}/test_good.py ." in result.stdout
assert result.coverage_data is not None
def test_conftest_handling(self) -> None:
"""Tests that we a) inject a dependency on conftest.py and b) skip running directly on
conftest.py."""
self.create_python_test_target([self.good_source])
self.create_file(
PurePath(self.source_root, self.conftest_source.path).as_posix(),
self.conftest_source.content.decode(),
)
self.add_to_build_file(self.source_root, "python_tests()")
result = self.run_pytest(passthrough_args="-s")
assert result.exit_code == 0
assert f"{self.package}/test_good.py In conftest!\n." in result.stdout
result = self.run_pytest(
address=Address(self.source_root, relative_file_path="conftest.py")
)
assert result.exit_code is None
def test_execution_slot_variable(self) -> None:
source = FileContent(
path="test_concurrency_slot.py",
content=dedent(
"""\
import os
def test_fail_printing_slot_env_var():
slot = os.getenv("SLOT")
print(f"Value of slot is {slot}")
# Deliberately fail the test so the SLOT output gets printed to stdout
assert 1 == 2
"""
).encode(),
)
self.create_python_test_target([source])
result = self.run_pytest(execution_slot_var="SLOT")
assert result.exit_code == 1
assert re.search(r"Value of slot is \d+", result.stdout)
```
#### File: pants/base/exiter_integration_test.py
```python
from pants.testutil.pants_integration_test import PantsIntegrationTest, ensure_daemon
class ExiterIntegrationTest(PantsIntegrationTest):
"""Tests that "interesting" exceptions are properly rendered."""
@ensure_daemon
def test_unicode_containing_exception(self):
"""Test whether we can run a single target without special flags."""
pants_run = self.run_pants(["run", "testprojects/src/python/unicode/compilation_failure"])
self.assert_failure(pants_run)
self.assertIn("import sys¡", pants_run.stderr)
```
#### File: pants/engine/desktop.py
```python
from abc import ABC
from dataclasses import dataclass
from pathlib import PurePath
from typing import ClassVar, Iterable, Iterator
from pants.engine.console import Console
from pants.engine.process import InteractiveProcess, InteractiveRunner
from pants.util.osutil import get_os_name
from pants.util.strutil import safe_shlex_join
@dataclass(frozen=True)
class _Opener(ABC):
console: Console
runner: InteractiveRunner
program: ClassVar[str]
def _iter_openers(self, files: Iterable[PurePath]) -> Iterator[InteractiveProcess]:
# N.B.: We cannot mark this method @abc.abstractmethod due to:
# https://github.com/python/mypy/issues/5374
raise NotImplementedError()
def open(self, files: Iterable[PurePath]) -> None:
for request in self._iter_openers(files):
open_command = safe_shlex_join(request.argv)
try:
result = self.runner.run(request)
if result.exit_code != 0:
self.console.print_stderr(
f"Failed to open files for viewing using `{open_command}` - received exit "
f"code {result.exit_code}."
)
except Exception as e:
self.console.print_stderr(
f"Failed to open files for viewing using " f"`{open_command}`: {e}"
)
self.console.print_stderr(
f"Ensure {self.program} is installed on your `PATH` and " f"re-run this goal."
)
class _DarwinOpener(_Opener):
program = "open"
def _iter_openers(self, files: Iterable[PurePath]) -> Iterator[InteractiveProcess]:
yield InteractiveProcess(
argv=(self.program, *(str(f) for f in files)), run_in_workspace=True
)
class _LinuxOpener(_Opener):
program = "xdg-open"
def _iter_openers(self, files: Iterable[PurePath]) -> Iterator[InteractiveProcess]:
for f in files:
yield InteractiveProcess(argv=(self.program, str(f)), run_in_workspace=True)
_OPENERS_BY_OSNAME = {"darwin": _DarwinOpener, "linux": _LinuxOpener}
def ui_open(console: Console, runner: InteractiveRunner, files: Iterable[PurePath]) -> None:
"""Opens the given files with the appropriate application for the current operating system.
Any failures to either locate an appropriate application to open the files with or else execute
that program are reported to the console stderr.
"""
osname = get_os_name()
opener_type = _OPENERS_BY_OSNAME.get(osname)
if opener_type is None:
console.print_stderr(f"Could not open {' '.join(map(str, files))} for viewing.")
console.print_stderr(
f"Opening files for viewing is currently not supported for the "
f"{osname} operating system."
)
return
opener = opener_type(console, runner)
opener.open(files)
```
#### File: pants/engine/engine_aware.py
```python
from abc import ABC
from typing import Dict, Optional
from pants.engine.fs import Digest
from pants.util.logging import LogLevel
class EngineAware(ABC):
"""This is a marker class used to indicate that the output of an `@rule` can send metadata about
the rule's output to the engine.
EngineAware defines abstract methods on the class, all of which return an Optional[T], and which
are expected to be overridden by concrete types implementing EngineAware.
"""
def level(self) -> Optional[LogLevel]:
"""Overrides the level of the workunit associated with this type."""
return None
def message(self) -> Optional[str]:
"""Sets an optional result message on the workunit."""
return None
def artifacts(self) -> Optional[Dict[str, Digest]]:
"""Sets a map of names to `Digest`s to appear as artifacts on the workunit."""
return None
```
#### File: engine/internals/uuid_test.py
```python
from uuid import UUID
from pants.engine.internals.uuid import UUIDRequest
from pants.engine.internals.uuid import rules as uuid_rules
from pants.engine.rules import RootRule
from pants.testutil.test_base import TestBase
class UUIDTest(TestBase):
@classmethod
def rules(cls):
return (
*super().rules(),
*uuid_rules(),
RootRule(UUIDRequest),
)
def test_distinct_uuids(self):
uuid1 = self.request_single_product(UUID, UUIDRequest())
uuid2 = self.request_single_product(UUID, UUIDRequest())
assert uuid1 != uuid2
def test_identical_uuids(self):
uuid1 = self.request_single_product(UUID, UUIDRequest(randomizer=0.0))
uuid2 = self.request_single_product(UUID, UUIDRequest(randomizer=0.0))
assert uuid1 == uuid2
```
#### File: pants/init/logging.py
```python
import http.client
import logging
import os
import sys
import warnings
from logging import Formatter, Handler, LogRecord, StreamHandler
from typing import List, Optional, TextIO, Tuple
import pants.util.logging as pants_logging
from pants.base.exception_sink import ExceptionSink
from pants.engine.internals.native import Native
from pants.util.dirutil import safe_mkdir
from pants.util.logging import LogLevel
# Although logging supports the WARN level, its not documented and could conceivably be yanked.
# Since pants has supported 'warn' since inception, leave the 'warn' choice as-is but explicitly
# setup a 'WARN' logging level name that maps to 'WARNING'.
logging.addLevelName(logging.WARNING, "WARN")
logging.addLevelName(pants_logging.TRACE, "TRACE")
def init_rust_logger(log_level: LogLevel, log_show_rust_3rdparty: bool, use_color: bool) -> None:
Native().init_rust_logging(log_level.level, log_show_rust_3rdparty, use_color)
class NativeHandler(StreamHandler):
"""This class is installed as a Python logging module handler (using the logging.addHandler
method) and proxies logs to the Rust logging infrastructure."""
def __init__(
self,
log_level: LogLevel,
stream: Optional[TextIO] = None,
native_filename: Optional[str] = None,
):
if stream is not None and native_filename is not None:
raise RuntimeError("NativeHandler must output to either a stream or a file, not both")
super().__init__(stream)
self.native = Native()
self.native_filename = native_filename
self.setLevel(log_level.level)
if stream:
try:
self.native.setup_stderr_logger(log_level.level)
except Exception as e:
print(f"Error setting up pantsd logger: {e!r}", file=sys.stderr)
raise e
def emit(self, record: LogRecord) -> None:
self.native.write_log(
msg=self.format(record), level=record.levelno, target=f"{record.name}:pid={os.getpid()}"
)
def flush(self) -> None:
self.native.flush_log()
def __repr__(self) -> str:
return (
f"NativeHandler(id={id(self)}, level={self.level}, filename={self.native_filename}, "
f"stream={self.stream})"
)
class ExceptionFormatter(Formatter):
"""Uses the `--print-exception-stacktrace` option to decide whether to render stacktraces."""
def formatException(self, exc_info):
if ExceptionSink.should_print_exception_stacktrace:
return super().formatException(exc_info)
return "\n(Use --print-exception-stacktrace to see more error details.)"
def clear_logging_handlers():
logger = logging.getLogger(None)
for handler in get_logging_handlers():
logger.removeHandler(handler)
def get_logging_handlers() -> Tuple[Handler, ...]:
logger = logging.getLogger(None)
return tuple(logger.handlers)
def set_logging_handlers(handlers: Tuple[Handler, ...]):
clear_logging_handlers()
logger = logging.getLogger(None)
for handler in handlers:
logger.addHandler(handler)
def _common_logging_setup(level: LogLevel, warnings_filter_regexes: Optional[List[str]]) -> None:
def trace_fn(self, message, *args, **kwargs):
if self.isEnabledFor(LogLevel.TRACE.level):
self._log(LogLevel.TRACE.level, message, *args, **kwargs)
logging.Logger.trace = trace_fn # type: ignore[attr-defined]
logger = logging.getLogger(None)
level.set_level_for(logger)
# This routes warnings through our loggers instead of straight to raw stderr.
logging.captureWarnings(True)
for message_regexp in warnings_filter_regexes or ():
warnings.filterwarnings(action="ignore", message=message_regexp)
if logger.isEnabledFor(LogLevel.TRACE.level):
http.client.HTTPConnection.debuglevel = 1 # type: ignore[attr-defined]
requests_logger = logging.getLogger("requests.packages.urllib3")
LogLevel.TRACE.set_level_for(requests_logger)
requests_logger.propagate = True
def setup_logging(global_bootstrap_options):
"""Sets up logging for a pants run.
This is called in two contexts: 1) PantsRunner, 2) DaemonPantsRunner. In the latter case, the
loggers are saved and restored around this call, so in both cases it runs with no handlers
configured (and asserts so!).
"""
if get_logging_handlers():
raise AssertionError("setup_logging should not be called while Handlers are installed.")
ignores = global_bootstrap_options.ignore_pants_warnings
global_level = global_bootstrap_options.level
log_dir = global_bootstrap_options.logdir
log_show_rust_3rdparty = global_bootstrap_options.log_show_rust_3rdparty
use_color = global_bootstrap_options.colors
init_rust_logger(global_level, log_show_rust_3rdparty, use_color)
setup_logging_to_stderr(global_level, warnings_filter_regexes=ignores)
if log_dir:
setup_logging_to_file(global_level, log_dir=log_dir, warnings_filter_regexes=ignores)
def setup_logging_to_stderr(
level: LogLevel, *, warnings_filter_regexes: Optional[List[str]] = None
) -> None:
"""Sets up Python logging to stderr, proxied to Rust via a NativeHandler.
We deliberately set the most verbose logging possible (i.e. the TRACE log level), here, and let
the Rust logging faculties take care of filtering.
"""
_common_logging_setup(level, warnings_filter_regexes)
python_logger = logging.getLogger(None)
handler = NativeHandler(level, stream=sys.stderr)
handler.setFormatter(ExceptionFormatter())
python_logger.addHandler(handler)
LogLevel.TRACE.set_level_for(python_logger)
def setup_logging_to_file(
level: LogLevel,
*,
log_dir: str,
log_filename: str = "pants.log",
warnings_filter_regexes: Optional[List[str]] = None,
) -> NativeHandler:
native = Native()
logger = logging.getLogger(None)
_common_logging_setup(level, warnings_filter_regexes)
safe_mkdir(log_dir)
log_path = os.path.join(log_dir, log_filename)
fd = native.setup_pantsd_logger(log_path, level.level)
ExceptionSink.reset_interactive_output_stream(os.fdopen(os.dup(fd), "a"))
handler = NativeHandler(level, native_filename=log_path)
logger.addHandler(handler)
return handler
```
#### File: scm/subsystems/changed.py
```python
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Tuple, cast
from pants.backend.project_info import dependees
from pants.backend.project_info.dependees import Dependees, DependeesRequest
from pants.base.build_environment import get_buildroot
from pants.base.deprecated import resolve_conflicting_options
from pants.engine.addresses import Address
from pants.engine.collection import Collection
from pants.engine.internals.graph import Owners, OwnersRequest
from pants.engine.rules import Get, RootRule, collect_rules, rule
from pants.option.option_value_container import OptionValueContainer
from pants.option.subsystem import Subsystem
from pants.scm.scm import Scm
class DependeesOption(Enum):
NONE = "none"
DIRECT = "direct"
TRANSITIVE = "transitive"
@dataclass(frozen=True)
class ChangedRequest:
sources: Tuple[str, ...]
dependees: DependeesOption
class ChangedAddresses(Collection[Address]):
pass
@rule
async def find_changed_owners(request: ChangedRequest) -> ChangedAddresses:
owners = await Get(Owners, OwnersRequest(request.sources))
if request.dependees == DependeesOption.NONE:
return ChangedAddresses(owners)
dependees_with_roots = await Get(
Dependees,
DependeesRequest(
owners, transitive=request.dependees == DependeesOption.TRANSITIVE, include_roots=True,
),
)
return ChangedAddresses(dependees_with_roots)
@dataclass(frozen=True)
class ChangedOptions:
"""A wrapper for the options from the `Changed` Subsystem.
This is necessary because parsing of these options happens before conventional subsystems are
configured, so the normal mechanisms like `SubsystemRule` would not work properly.
"""
since: Optional[str]
diffspec: Optional[str]
dependees: DependeesOption
@classmethod
def from_options(cls, options: OptionValueContainer) -> "ChangedOptions":
since = resolve_conflicting_options(
old_option="changes_since",
new_option="since",
old_scope="changed",
new_scope="changed",
old_container=options,
new_container=options,
)
dependees = resolve_conflicting_options(
old_option="include_dependees",
new_option="dependees",
old_scope="changed",
new_scope="changed",
old_container=options,
new_container=options,
)
return cls(since, options.diffspec, dependees)
@property
def provided(self) -> bool:
return bool(self.since) or bool(self.diffspec)
def changed_files(self, *, scm: Scm) -> List[str]:
"""Determines the files changed according to SCM/workspace and options."""
if self.diffspec:
return cast(List[str], scm.changes_in(self.diffspec, relative_to=get_buildroot()))
changes_since = self.since or scm.current_rev_identifier
return cast(
List[str],
scm.changed_files(
from_commit=changes_since, include_untracked=True, relative_to=get_buildroot()
),
)
class Changed(Subsystem):
"""Tell Pants to detect what files and targets have changed from Git.
See https://www.pantsbuild.org/docs/advanced-target-selection.
"""
options_scope = "changed"
@classmethod
def register_options(cls, register):
register(
"--since",
type=str,
default=None,
help="Calculate changes since this Git spec (commit range/SHA/ref).",
)
register(
"--changes-since",
"--parent",
type=str,
default=None,
removal_version="2.1.0.dev0",
removal_hint=(
"Use `--changed-since` instead of `--changed-parent` or `--changed-changes-since`."
),
help="Calculate changes since this tree-ish/scm ref.",
)
register(
"--diffspec",
type=str,
default=None,
help="Calculate changes contained within a given Git spec (commit range/SHA/ref).",
)
register(
"--dependees",
type=DependeesOption,
default=DependeesOption.NONE,
help="Include direct or transitive dependees of changed targets.",
)
register(
"--include-dependees",
type=DependeesOption,
default=DependeesOption.NONE,
help="Include direct or transitive dependees of changed targets.",
removal_version="2.1.0.dev0",
removal_hint="Use `--changed-dependees` instead of `--changed-include-dependees`.",
)
register(
"--fast",
type=bool,
default=False,
help="Stop searching for owners once a source is mapped to at least one owning target.",
removal_version="2.1.0.dev0",
removal_hint="The option `--changed-fast` no longer does anything.",
)
def rules():
return [
*collect_rules(),
RootRule(ChangedRequest),
*dependees.rules(),
]
```
#### File: testutil/option/fakes.py
```python
from collections import defaultdict
from pants.option.parser_hierarchy import enclosing_scope
from pants.option.ranked_value import Rank, RankedValue
from pants.option.scope import GLOBAL_SCOPE
class _FakeOptionValues:
def __init__(self, option_values):
self._option_values = option_values
def __iter__(self):
return iter(self._option_values.keys())
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
if hasattr(self, key):
return getattr(self, key, default)
return default
def __getattr__(self, key):
try:
value = self._option_values[key]
except KeyError:
# Instead of letting KeyError raise here, re-raise an AttributeError to not break getattr().
raise AttributeError(key)
return value.value if isinstance(value, RankedValue) else value
def get_rank(self, key):
value = self._option_values[key]
return value.rank if isinstance(value, RankedValue) else Rank.FLAG
def is_flagged(self, key):
return self.get_rank(key) == Rank.FLAG
def is_default(self, key):
return self.get_rank(key) in (Rank.NONE, Rank.HARDCODED)
@property
def option_values(self):
return self._option_values
def create_options(options, passthru_args=None, fingerprintable_options=None):
"""Create a fake Options object for testing.
Note that the returned object only provides access to the provided options values. There is
no registration mechanism on this object. Code under test shouldn't care about resolving
cmd-line flags vs. config vs. env vars etc. etc.
:param dict options: A dict of scope -> (dict of option name -> value).
:param list passthru_args: A list of passthrough command line argument values.
:param dict fingerprintable_options: A dict of scope -> (dict of option name -> option type).
This registry should contain entries for any of the
`options` that are expected to contribute to fingerprinting.
:returns: An fake `Options` object encapsulating the given scoped options.
"""
fingerprintable = fingerprintable_options or defaultdict(dict)
class FakeOptions:
def for_scope(self, scope):
# TODO(<NAME>): Some users pass in A dict of scope -> _FakeOptionValues instead of a
# dict of scope -> (dict of option name -> value). Clean up these usages and kill this
# accommodation.
options_for_this_scope = options.get(scope) or {}
if isinstance(options_for_this_scope, _FakeOptionValues):
options_for_this_scope = options_for_this_scope.option_values
if passthru_args:
# TODO: This is _very_ partial support for passthrough args: this should be
# inspecting the kwargs of option registrations to decide which arguments to
# extend: this explicit `passthrough_args` argument is only passthrough because
# it is marked as such.
pa = options_for_this_scope.get("passthrough_args", [])
if isinstance(pa, RankedValue):
pa = pa.value
options_for_this_scope["passthrough_args"] = [*pa, *passthru_args]
scoped_options = {}
if scope:
scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)
scoped_options.update(options_for_this_scope)
return _FakeOptionValues(scoped_options)
def for_global_scope(self):
return self.for_scope(GLOBAL_SCOPE)
def items(self):
return list(options.items())
@property
def scope_to_flags(self):
return {}
def get_fingerprintable_for_scope(self, bottom_scope):
"""Returns a list of fingerprintable (option type, option value) pairs for the given
scope.
Note that this method only collects values for a single scope, NOT from
all enclosing scopes as in the Options class!
:param str bottom_scope: The scope to gather fingerprintable options for.
"""
pairs = []
option_values = self.for_scope(bottom_scope)
for option_name, option_type in fingerprintable[bottom_scope].items():
pairs.append((option_type, option_values[option_name]))
return pairs
def __getitem__(self, scope):
return self.for_scope(scope)
return FakeOptions()
```
#### File: pants_test/logging/native_engine_logging_integration_test.py
```python
from pants.testutil.pants_integration_test import PantsIntegrationTest, read_pantsd_log
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
class NativeEngineLoggingTest(PantsIntegrationTest):
@classmethod
def use_pantsd_env_var(cls):
"""Some of the tests here expect to read the standard error after an intentional failure.
However, when pantsd is enabled, these errors are logged to logs/exceptions.<pid>.log So
stderr appears empty. (see #7320)
"""
return False
def test_native_logging(self) -> None:
expected_msg = r"\[DEBUG\] Launching \d+ root"
pants_run = self.run_pants(["-linfo", "list", "3rdparty::"])
self.assertNotRegex(pants_run.stderr, expected_msg)
pants_run = self.run_pants(["-ldebug", "list", "3rdparty::"])
self.assertRegex(pants_run.stderr, expected_msg)
class PantsdNativeLoggingTest(PantsDaemonIntegrationTestBase):
def test_pantsd_file_logging(self) -> None:
with self.pantsd_successful_run_context("debug") as ctx:
daemon_run = ctx.runner(["list", "3rdparty::"])
ctx.checker.assert_started()
assert "[DEBUG] connecting to pantsd on port" in daemon_run.stderr_data
pantsd_log = "\n".join(read_pantsd_log(ctx.workdir))
assert "[DEBUG] logging initialized" in pantsd_log
``` |
{
"source": "jperkins12/GamestonkTerminal",
"score": 3
} |
#### File: discordbot/economy/performance.py
```python
import discord
import config_discordbot as cfg
from helpers import pagination
from gamestonk_terminal.economy import finviz_model
async def performance_command(ctx, arg="sector"):
"""Gets the performance data of a entered sector from GST and sends it
Parameters
-----------
arg: str
sector (or any other input in the economy_group), -h or help
Returns
-------
discord message
Sends a message containing an embed of the performance data of the given arg
with pagination to the user
"""
economy_group = {
"sector": "Sector",
"industry": "Industry",
"basic_materials": "Industry (Basic Materials)",
"communication services": "Industry (Communication Services)",
"consumer_cyclical": "Industry (Consumer Cyclical)",
"consumer_defensive": "Industry (Consumer Defensive)",
"energy": "Industry (Energy)",
"financial": "Industry (Financial)",
"healthcare": "Industry (Healthcare)",
"industrials": "Industry (Industrials)",
"real_estate": "Industry (Real Estate)",
"technology": "Industry (Technology)",
"utilities": "Industry (Utilities)",
"country": "Country (U.S. listed stocks only)",
"capitalization": "Capitalization",
}
try:
# Debug
if cfg.DEBUG:
print(f"!stocks.economy.performance {arg}")
# Help
if arg == "-h" or arg == "help":
help_txt = "Group performance [Source: Finviz]\n"
possible_args = ""
for k, v in economy_group.items():
possible_args += f"\n{k}: {v}"
help_txt += "\nPossible arguments:\n"
help_txt += "<GROUP> Groups to get data from. Default: sector\n"
help_txt += f"The choices are:{possible_args}"
embed = discord.Embed(
title="Economy: [Finviz] Performance HELP",
description=help_txt,
colour=cfg.COLOR,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
else:
# Parse argument
try:
group = economy_group[arg]
except KeyError:
title = "ERROR Economy: [Finviz] Performance"
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_description(
f"Entered group argument: {arg}"
"\nEnter a valid group argument, example: sector"
)
await ctx.send(embed=embed)
if cfg.DEBUG:
print("ERROR: Bad group argument entered")
return
df_group = finviz_model.get_valuation_performance_data(group, "performance")
future_column_name = df_group["Name"]
df_group = df_group.transpose()
df_group.columns = future_column_name
df_group.drop("Name")
columns = []
initial_str = "Page 0: Overview"
i = 1
for col_name in df_group.columns.values:
initial_str += f"\nPage {i}: {col_name}"
i += 1
columns.append(
discord.Embed(
title=f"Economy: [Finviz] Performance {group}",
description=initial_str,
colour=cfg.COLOR,
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
for column in df_group.columns.values:
columns.append(
discord.Embed(
description="```"
+ df_group[column].fillna("").to_string()
+ "```",
colour=cfg.COLOR,
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
await pagination(columns, ctx)
except Exception as e:
title = "INTERNAL ERROR"
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_description(
"Try updating the bot, make sure DEBUG is True in the config "
"and restart it.\nIf the error still occurs open a issue at: "
"https://github.com/GamestonkTerminal/GamestonkTerminal/issues"
f"\n{e}"
)
await ctx.send(embed=embed)
if cfg.DEBUG:
print(e)
```
#### File: stocks/dark_pool_shorts/sidtc.py
```python
import discord
import config_discordbot as cfg
from helpers import pagination
from gamestonk_terminal.stocks.dark_pool_shorts import stockgrid_model
async def sidtc_command(ctx, arg="float", arg2="10"):
"""Gets short interest and days to cover data from GST and sends it
Parameters
-----------
arg: str
sort (dark_pool_shorts), -h or help
arg2: str
number
Returns
-------
discord message
Sends a message containing an embed with short interest and days to cover
data to the user
"""
try:
# Debug
if cfg.DEBUG:
print(f"!stocks.dps.sidtc {arg} {arg2}")
# Help
if arg == "-h" or arg == "help":
dark_pool_sort = {
"float": "Float Short %",
"dtc": "Days to Cover",
"si": "Short Interest",
}
help_txt = "Get short interest and days to cover. [Source: Stockgrid]\n"
possible_args = ""
for k, v in dark_pool_sort.items():
possible_args += f"\n{k}: {v}"
help_txt += "\nPossible arguments:\n"
help_txt += "<SORT> Field for which to sort by. Default: float\n"
help_txt += f"The choices are:{possible_args}\n"
help_txt += "<NUM> Number of top tickers to show. Default: 10"
embed = discord.Embed(
title="Stocks: [Stockgrid] Short Interest and Days to Cover HELP",
description=help_txt,
colour=cfg.COLOR,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
else:
# Parse argument
if arg == "float" or arg == "dtc" or arg == "si":
sort = arg
else:
title = "ERROR Stocks: [Stockgrid] Short Interest and Days to Cover"
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_description(
f"Entered sort argument: {arg}"
"\nEnter a valid sort argument, example: float"
)
await ctx.send(embed=embed)
if cfg.DEBUG:
print("ERROR: Bad sort argument entered")
return
try:
num = int(arg2)
if num < 0:
raise ValueError("Number has to be above 0")
except ValueError:
title = "ERROR Stocks: [Stockgrid] Short Interest and Days to Cover"
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_description(
"No number (int) entered in the second argument."
"\nEnter a valid (positive) number, example: 10"
)
await ctx.send(embed=embed)
if cfg.DEBUG:
print("ERROR: No (positive) int for second argument entered")
return
df = stockgrid_model.get_short_interest_days_to_cover(sort)
df = df.iloc[:num]
dp_date = df["Date"].values[0]
df = df.drop(columns=["Date"])
df["Short Interest"] = df["Short Interest"] / 1_000_000
df.head()
df.columns = [
"Ticker",
"Float Short %",
"Days to Cover",
"Short Interest (1M)",
]
future_column_name = df["Ticker"]
df = df.transpose()
df.columns = future_column_name
df.drop("Ticker")
columns = []
initial_str = "Page 0: Overview"
i = 1
for column in df.columns.values:
initial_str = initial_str + "\nPage " + str(i) + ": " + column
i += 1
columns.append(
discord.Embed(
title="Dark Pool Shorts", description=initial_str, colour=cfg.COLOR
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
for column in df.columns.values:
columns.append(
discord.Embed(
title="Stocks: [Stockgrid] Short Interest and Days to Cover",
description="```The following data corresponds to the date: "
+ dp_date
+ "\n\n"
+ df[column].fillna("").to_string()
+ "```",
colour=cfg.COLOR,
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
await pagination(columns, ctx)
except Exception as e:
title = "INTERNAL ERROR"
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_description(
"Try updating the bot, make sure DEBUG is True in the config "
"and restart it.\nIf the error still occurs open a issue at: "
"https://github.com/GamestonkTerminal/GamestonkTerminal/issues"
f"\n{e}"
)
await ctx.send(embed=embed)
if cfg.DEBUG:
print(e)
```
#### File: common/prediction_techniques/regression_view.py
```python
__docformat__ = "numpy"
from typing import Union
import os
import datetime
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from gamestonk_terminal.helper_funcs import (
patch_pandas_text_adjustment,
get_next_stock_market_days,
plot_autoscale,
export_data,
)
from gamestonk_terminal.common.prediction_techniques.pred_helper import (
print_pretty_prediction,
price_prediction_backtesting_color,
print_prediction_kpis,
)
from gamestonk_terminal.common.prediction_techniques import regression_model
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal import feature_flags as gtff
register_matplotlib_converters()
def display_regression(
dataset: str,
values: Union[pd.Series, pd.DataFrame],
poly_order: int,
n_input: int,
n_predict: int,
n_jumps: int,
s_end_date: str = "",
export: str = "",
):
"""Display predications for regression models
Parameters
----------
dataset : str
Title for data
values : Union[pd.Series, pd.DataFrame]
Data to fit
poly_order : int
Order of polynomial to fit
n_input : int
Length of input sequence
n_predict : int
Length of prediction sequence
n_jumps : int
Number of jumps in data
s_end_date : str, optional
Start date for backtesting
export : str, optional
Format for exporting figures
"""
# BACKTESTING
if s_end_date:
future_index = get_next_stock_market_days(
last_stock_day=s_end_date, n_next_days=n_predict
)
df_future = values[future_index[0] : future_index[-1]]
values = values[:s_end_date] # type: ignore
l_predictions, _ = regression_model.get_regression_model(
values, poly_order, n_input, n_predict, n_jumps
)
# Prediction data
l_pred_days = get_next_stock_market_days(
last_stock_day=values.index[-1], n_next_days=n_predict
)
df_pred = pd.Series(l_predictions, index=l_pred_days, name="Price")
# Plotting
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.plot(values.index, values, lw=2)
# BACKTESTING
if s_end_date:
ax.set_title(
f"BACKTESTING: Regression (polynomial {poly_order}) on {dataset} - {n_predict} days prediction"
)
else:
ax.set_title(
f"Regression (polynomial {poly_order}) on {dataset} - {n_predict} days prediction"
)
ax.set_xlim(values.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1])
ax.set_xlabel("Time")
ax.set_ylabel("Value")
ax.grid(b=True, which="major", color="#666666", linestyle="-")
ax.minorticks_on()
ax.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
ax.plot(
[values.index[-1], df_pred.index[0]],
[values.values[-1], df_pred.values[0]],
lw=1,
c="tab:green",
linestyle="--",
)
ax.plot(df_pred.index, df_pred, lw=2, c="tab:green")
ax.axvspan(values.index[-1], df_pred.index[-1], facecolor="tab:orange", alpha=0.2)
_, _, ymin, ymax = plt.axis()
ax.vlines(values.index[-1], ymin, ymax, linewidth=1, linestyle="--", color="k")
# BACKTESTING
if s_end_date:
ax.plot(
df_future.index,
df_future,
lw=2,
c="tab:blue",
ls="--",
)
ax.plot(
[values.index[-1], df_future.index[0]],
[
values.values[-1],
df_future.values[0],
],
lw=1,
c="tab:blue",
linestyle="--",
)
fig.tight_layout()
if gtff.USE_ION:
plt.ion()
plt.show()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "regression")
print("")
# BACKTESTING
if s_end_date:
fig, ax = plt.subplots(1, 2, figsize=plot_autoscale(), dpi=PLOT_DPI)
ax0 = ax[0]
ax0.plot(
df_future.index,
df_future,
lw=2,
c="tab:blue",
ls="--",
)
ax0.plot(df_pred.index, df_pred, lw=2, c="green")
ax0.scatter(df_future.index, df_future, c="tab:blue", lw=3)
ax0.plot(
[values.index[-1], df_future.index[0]],
[
values.values[-1],
df_future.values[0],
],
lw=2,
c="tab:blue",
ls="--",
)
ax0.scatter(df_pred.index, df_pred, c="green", lw=3)
ax0.plot(
[values.index[-1], df_pred.index[0]],
[values.values[-1], df_pred.values[0]],
lw=2,
c="green",
ls="--",
)
ax0.set_title("BACKTESTING: Real data vs Prediction")
ax0.set_xlim(values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1))
ax0.set_xticks(
[values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)]
)
ax0.set_ylabel("Value")
ax0.grid(b=True, which="major", color="#666666", linestyle="-")
ax0.minorticks_on()
ax0.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
ax0.legend(["Real data", "Prediction data"])
ax0.set_xticks([])
ax1 = ax[1]
ax1.axhline(y=0, color="k", linestyle="--", linewidth=2)
ax1.plot(
df_future.index,
100 * (df_pred.values - df_future.values) / df_future.values,
lw=2,
c="red",
)
ax1.scatter(
df_future.index,
100 * (df_pred.values - df_future.values) / df_future.values,
c="red",
lw=5,
)
ax1.set_title("BACKTESTING: Error between Real data and Prediction [%]")
ax1.plot(
[values.index[-1], df_future.index[0]],
[
0,
100 * (df_pred.values[0] - df_future.values[0]) / df_future.values[0],
],
lw=2,
ls="--",
c="red",
)
ax1.set_xlim(values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1))
ax1.set_xticks(
[values.index[-1], df_pred.index[-1] + datetime.timedelta(days=1)]
)
ax1.set_xlabel("Time")
ax1.set_ylabel("Prediction Error (%)")
ax1.grid(b=True, which="major", color="#666666", linestyle="-")
ax1.minorticks_on()
ax1.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)
ax1.legend(["Real data", "Prediction data"])
fig.tight_layout()
if gtff.USE_ION:
plt.ion()
plt.show()
# Refactor prediction dataframe for backtesting print
df_pred.name = "Prediction"
df_pred = df_pred.to_frame()
df_pred["Real"] = df_future
if gtff.USE_COLOR:
patch_pandas_text_adjustment()
print("Time Real [$] x Prediction [$]")
print(df_pred.apply(price_prediction_backtesting_color, axis=1).to_string())
else:
print(df_pred[["Real", "Prediction"]].round(2).to_string())
print("")
print_prediction_kpis(df_pred["Real"].values, df_pred["Prediction"].values)
else:
# Print prediction data
print_pretty_prediction(df_pred, values.values[-1])
print("")
```
#### File: cryptocurrency/onchain/glassnode_view.py
```python
import os
from matplotlib import pyplot as plt
import pandas as pd
from gamestonk_terminal.cryptocurrency.onchain.glassnode_model import (
get_active_addresses,
)
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal import config_plot as cfgPlot
def display_active_addresses(
asset: str, since: int, until: int, interval: str, export: str = ""
) -> None:
"""Display active addresses of a certain asset over time
[Source: https://glassnode.org]
Parameters
----------
asset : str
Asset to search active addresses (e.g., BTC)
since : int
Initial date timestamp (e.g., 1_614_556_800)
until : str
End date timestamp (e.g., 1_614_556_800)
interval : str
Interval frequency (e.g., 24h)
export : str
Export dataframe data to csv,json,xlsx file
"""
df_addresses = get_active_addresses(asset, interval, since, until)
if df_addresses.empty:
print("Error in glassnode request")
else:
plot_data(df_addresses, asset)
print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"active",
df_addresses,
)
def plot_data(df, asset):
df = df.set_index("t")
df.index = pd.to_datetime(df.index, unit="s")
df = df.loc[df.index > "2010-1-1"]
df.reset_index(inplace=True)
_, main_ax = plt.subplots(figsize=plot_autoscale(), dpi=cfgPlot.PLOT_DPI)
main_ax.plot(df["t"], df["v"], linewidth=0.5)
main_ax.set_yscale("log")
main_ax.grid(True)
main_ax.set_title(f"Active {asset} addresses over time")
main_ax.set_ylabel("Addresses")
main_ax.set_xlabel("Date")
plt.show()
```
#### File: cryptocurrency/overview/coinpaprika_model.py
```python
__docformat__ = "numpy"
from datetime import datetime
import textwrap
import pandas as pd
from dateutil import parser
from gamestonk_terminal.cryptocurrency.coinpaprika_helpers import PaprikaSession
def get_global_market() -> pd.DataFrame:
"""Return data frame with most important global crypto statistics like:
market_cap_usd, volume_24h_usd, bitcoin_dominance_percentage, cryptocurrencies_number,
market_cap_ath_value, market_cap_ath_date, volume_24h_ath_value, volume_24h_ath_date,
market_cap_change_24h, volume_24h_change_24h, last_updated. [Source: CoinPaprika]
Returns
-------
pandas.DataFrame
Most important global crypto statistics
Metric, Value
"""
session = PaprikaSession()
global_markets = session.make_request(session.ENDPOINTS["global"])
global_markets["last_updated"] = datetime.fromtimestamp(
global_markets["last_updated"]
)
for key, date in global_markets.items():
if "date" in key:
try:
global_markets[key] = parser.parse(date).strftime("%Y-%m-%d %H:%M:%S")
except (KeyError, ValueError, TypeError) as e:
print(e)
df = pd.Series(global_markets).to_frame().reset_index()
df.columns = ["Metric", "Value"]
return df
def get_list_of_coins() -> pd.DataFrame:
"""Get list of all available coins on CoinPaprika [Source: CoinPaprika]
Returns
-------
pandas.DataFrame
Available coins on CoinPaprika
rank, id, name, symbol, type
"""
session = PaprikaSession()
coins = session.make_request(session.ENDPOINTS["coins"])
df = pd.DataFrame(coins)
df = df[df["is_active"]]
return df[["rank", "id", "name", "symbol", "type"]]
def _get_coins_info_helper(quotes: str = "USD") -> pd.DataFrame:
"""Helper method that call /tickers endpoint which returns for all coins quoted in provided currency/crypto
{
"id": "btc-bitcoin",
"name": "Bitcoin",
"symbol": "BTC",
"rank": 1,
"circulating_supply": 17007062,
"total_supply": 17007062,
"max_supply": 21000000,
"beta_value": 0.735327,
"first_data_at": "2010-11-14T07:20:41Z",
"last_updated": "2018-11-14T07:20:41Z",
"quotes" : {
"USD": {
"price": 5162.15941296,
"volume_24h": 7304207651.1585,
"volume_24h_change_24h": -2.5,
"market_cap": 91094433242,
"market_cap_change_24h": 1.6,
"percent_change_15m": 0,
"percent_change_30m": 0,
"percent_change_1h": 0,
"percent_change_6h": 0,
"percent_change_12h": -0.09,
"percent_change_24h": 1.59,
"percent_change_7d": 0.28,
"percent_change_30d": 27.39,
"percent_change_1y": -37.99,
"ath_price": 20089,
"ath_date": "2017-12-17T12:19:00Z",
"percent_from_price_ath": -74.3
}
}
}
[Source: CoinPaprika]
Parameters
----------
quotes: Comma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pandas.DataFrame
id, name, symbol, rank, circulating_supply, total_supply, max_supply, beta_value, first_data_at,
last_updated, price, volume_24h, volume_24h_change_24h, market_cap, market_cap_change_24h,
percent_change_15m, percent_change_30m, percent_change_1h, percent_change_6h, percent_change_12h,
percent_change_24h, percent_change_7d, percent_change_30d, percent_change_1y,
ath_price, ath_date, percent_from_price_ath
"""
session = PaprikaSession()
tickers = session.make_request(session.ENDPOINTS["tickers"], quotes=quotes)
data = pd.json_normalize(tickers)
try:
# data.columns = [col.replace(f"quotes.{quotes}.", f"{quotes.lower()}_") for col in data.columns.tolist()]
data.columns = [
col.replace(f"quotes.{quotes}.", "") for col in data.columns.tolist()
]
data.columns = [col.replace("percent", "pct") for col in list(data.columns)]
except KeyError as e:
print(e)
data.rename(
columns={
"market_cap_change_24h": "mcap_change_24h",
"pct_from_price_ath": "pct_from_ath",
},
inplace=True,
)
return data
def get_coins_info(quotes: str = "USD") -> pd.DataFrame: # > format big numbers fix
"""Returns basic coin information for all coins from CoinPaprika API [Source: CoinPaprika]
Parameters
----------
quotes: str
Comma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pandas.DataFrame
rank, name, symbol, price, volume_24h, circulating_supply, total_supply,
max_supply, market_cap, beta_value, ath_price,
"""
cols = [
"rank",
"name",
"symbol",
"price",
"volume_24h",
"circulating_supply",
"total_supply",
"max_supply",
"market_cap",
"beta_value",
"ath_price",
]
return _get_coins_info_helper(quotes)[cols].sort_values(by="rank")
def get_coins_market_info(quotes: str = "USD") -> pd.DataFrame:
"""Returns basic coin information for all coins from CoinPaprika API [Source: CoinPaprika]
Parameters
----------
quotes: str
Comma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pandas.DataFrame
rank, name, symbol, price, volume_24h, mcap_change_24h,
pct_change_1h, pct_change_24h, ath_price, pct_from_ath,
"""
cols = [
"rank",
"name",
"symbol",
"price",
"volume_24h",
"mcap_change_24h",
"pct_change_1h",
"pct_change_24h",
"ath_price",
"pct_from_ath",
]
return _get_coins_info_helper(quotes=quotes)[cols].sort_values(by="rank")
def get_list_of_exchanges(quotes: str = "USD") -> pd.DataFrame:
"""
List exchanges from CoinPaprika API [Source: CoinPaprika]
Parameters
----------
quotes: str
Comma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pandas.DataFrame
rank, name, currencies, markets, fiats, confidence_score, reported_volume_24h,
reported_volume_7d ,reported_volume_30d, sessions_per_month,
"""
session = PaprikaSession()
exchanges = session.make_request(session.ENDPOINTS["exchanges"], quotes=quotes)
df = pd.json_normalize(exchanges)
try:
df.columns = [
col.replace(f"quotes.{quotes}.", "") for col in df.columns.tolist()
]
except KeyError as e:
print(e)
df = df[df["active"]]
cols = [
"adjusted_rank",
"id",
"name",
"currencies",
"markets",
"fiats",
"confidence_score",
"reported_volume_24h",
"reported_volume_7d",
"reported_volume_30d",
"sessions_per_month",
]
df.loc[:, "fiats"] = df["fiats"].apply(lambda x: len([i["symbol"] for i in x if x]))
df = df[cols]
df = df.applymap(
lambda x: "\n".join(textwrap.wrap(x, width=28)) if isinstance(x, str) else x
)
df.rename(
columns={"adjusted_rank": "rank", "confidence_score": "confidence"},
inplace=True,
)
df.columns = [x.replace("reported_", "") for x in df.columns]
return df.sort_values(by="rank")
def get_exchanges_market(
exchange_id: str = "binance", quotes: str = "USD"
) -> pd.DataFrame:
"""List markets by exchange ID [Source: CoinPaprika]
Parameters
----------
exchange_id: str
identifier of exchange e.g for Binance Exchange -> binance
quotes: str
Comma separated quotes to return e.g quotes=USD,BTC
Returns
-------
pandas.DataFrame
pair, base_currency_name, quote_currency_name, market_url,
category, reported_volume_24h_share, trust_score,
"""
session = PaprikaSession()
data = session.make_request(
session.ENDPOINTS["exchange_markets"].format(exchange_id), quotes=quotes
)
if "error" in data:
print(data)
return pd.DataFrame()
cols = [
"exchange_id",
"pair",
"base_currency_name",
"quote_currency_name",
"category",
"reported_volume_24h_share",
"trust_score",
"market_url",
]
df = pd.DataFrame(data)
df["exchange_id"] = exchange_id
return df[cols]
def get_all_contract_platforms() -> pd.DataFrame:
"""List all smart contract platforms like ethereum, solana, cosmos, polkadot, kusama ... [Source: CoinPaprika]
Returns
-------
pandas.DataFrame
index, platform_id
"""
session = PaprikaSession()
contract_platforms = session.make_request(session.ENDPOINTS["contract_platforms"])
df = pd.DataFrame(contract_platforms).reset_index()
df.columns = ["index", "platform_id"]
df["index"] = df["index"] + 1
return df
def get_contract_platform(platform_id: str = "eth-ethereum") -> pd.DataFrame:
"""Gets all contract addresses for given platform [Source: CoinPaprika]
Parameters
----------
platform_id: str
Blockchain platform like eth-ethereum
Returns
-------
pandas.DataFrame
id, type, active, balance
"""
session = PaprikaSession()
contract_platforms = session.make_request(
session.ENDPOINTS["contract_platform_addresses"].format(platform_id)
)
return pd.DataFrame(contract_platforms)[["id", "type", "active", "balance"]]
```
#### File: gamestonk_terminal/options/op_helpers.py
```python
__docformat__ = "numpy"
import numpy as np
import pandas as pd
def get_loss_at_strike(strike: float, chain: pd.DataFrame) -> float:
"""Function to get the loss at the given expiry
Parameters
----------
strike: Union[int,float]
Value to calculate total loss at
chain: Dataframe:
Dataframe containing at least strike and openInterest
Returns
-------
loss: Union[float,int]
Total loss
"""
itm_calls = chain[chain.index < strike][["OI_call"]]
itm_calls["loss"] = (strike - itm_calls.index) * itm_calls["OI_call"]
call_loss = itm_calls["loss"].sum()
itm_puts = chain[chain.index > strike][["OI_put"]]
itm_puts["loss"] = (itm_puts.index - strike) * itm_puts["OI_put"]
put_loss = itm_puts.loss.sum()
loss = call_loss + put_loss
return loss
def calculate_max_pain(chain: pd.DataFrame) -> int:
"""Returns the max pain for a given call/put dataframe
Parameters
----------
chain: DataFrame
Dataframe to calculate value from
Returns
-------
max_pain : int
Max pain value
"""
strikes = np.array(chain.index)
if ("OI_call" not in chain.columns) or ("OI_put" not in chain.columns):
print("Incorrect columns. Unable to parse max pain")
return np.nan
loss = []
for price_at_exp in strikes:
loss.append(get_loss_at_strike(price_at_exp, chain))
chain["loss"] = loss
max_pain = chain["loss"].idxmin()
return max_pain
def convert(orig: str, to: str) -> float:
"""Convert a string to a specific type of number
Parameters
----------
orig: str
String to convert
Returns
-------
number : float
Decimal value of string
"""
if to == "%":
clean = orig.replace("%", "").replace("+", "")
return float(clean) / 100
if to == ",":
clean = orig.replace(",", "")
return float(clean)
raise ValueError("Invalid to format, please use '%' or ','.")
opt_chain_cols = {
"lastTradeDate": {"format": "date", "label": "Last Trade Date"},
"strike": {"format": "${x:.2f}", "label": "Strike"},
"lastPrice": {"format": "${x:.2f}", "label": "Last Price"},
"bid": {"format": "${x:.2f}", "label": "Bid"},
"ask": {"format": "${x:.2f}", "label": "Ask"},
"change": {"format": "${x:.2f}", "label": "Change"},
"percentChange": {"format": "{x:.2f}%", "label": "Percent Change"},
"volume": {"format": "{x:.2f}", "label": "Volume"},
"openInterest": {"format": "", "label": "Open Interest"},
"impliedVolatility": {"format": "{x:.2f}", "label": "Implied Volatility"},
}
```
#### File: stocks/quantitative_analysis/factors_model.py
```python
__docformat__ = "numpy"
from urllib.request import urlopen
from zipfile import ZipFile
from io import BytesIO
import statsmodels.api as sm
import yfinance as yf
import pandas as pd
def get_fama_raw():
"""Gets base Fama French data to calculate risk"""
with urlopen(
"http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_CSV.zip"
) as url:
# Download Zipfile and create pandas DataFrame
with ZipFile(BytesIO(url.read())) as zipfile:
with zipfile.open("F-F_Research_Data_Factors.CSV") as zip_open:
df = pd.read_csv(
zip_open,
header=0,
names=["Date", "MKT-RF", "SMB", "HML", "RF"],
skiprows=3,
)
df = df[df["Date"].apply(lambda x: len(str(x).strip()) == 6)]
df["Date"] = df["Date"].astype(str) + "01"
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d")
df["MKT-RF"] = pd.to_numeric(df["MKT-RF"], downcast="float")
df["SMB"] = pd.to_numeric(df["SMB"], downcast="float")
df["HML"] = pd.to_numeric(df["HML"], downcast="float")
df["RF"] = pd.to_numeric(df["RF"], downcast="float")
df["MKT-RF"] = df["MKT-RF"] / 100
df["SMB"] = df["SMB"] / 100
df["HML"] = df["HML"] / 100
df["RF"] = df["RF"] / 100
df = df.set_index("Date")
return df
def get_historical_5(ticker: str):
"""Get 5 year monthly historical performance for a ticker with dividends filtered"""
tick = yf.Ticker(ticker)
df = tick.history(period="5y", interval="1mo")
df = df[df.index.to_series().apply(lambda x: x.day == 1)]
df = df.drop(["Dividends", "Stock Splits"], axis=1)
df = df.dropna()
return df
def capm_information(ticker):
"""Provides information that relates to the CAPM model"""
df_f = get_fama_raw()
df_h = get_historical_5(ticker)
df = df_h.join(df_f)
df = df.dropna()
df["Monthly Return"] = df["Close"].pct_change()
df["Excess Monthly Return"] = df["Monthly Return"] - df["RF"]
df["Excess MKT-RF"] = df["MKT-RF"] - df["RF"]
df = df.dropna()
y = df[["Excess Monthly Return"]]
x = df["Excess MKT-RF"]
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
beta = model.params["Excess MKT-RF"]
sy = model.rsquared
return beta, sy
``` |
{
"source": "jperla/webify",
"score": 2
} |
#### File: apps/hello/__init__.py
```python
import time
import webify
app = webify.defaults.app()
# Controllers
@app.subapp(path='/')
@webify.urlable()
def index(req, p):
p(u'Hello, world!')
@app.subapp()
@webify.urlable()
def hello(req, p):
p(u'<form method="POST">')
name = req.params.get('name', None)
if name is None:
p(u'Hello, world! <br />')
else:
p(u'Hello, %(name)s! <br />' % {'name': name})
p(u'Your name: <input type="text" name="name">')
p(u'<input type="submit">')
p(u'</form>')
@app.subapp()
@webify.urlable()
def hello_old(req, p):
webify.http.status.redirect(hello.url())
# Middleware
from webify.middleware import EvalException
wrapped_app = webify.wsgify(app, EvalException)
# Server
from webify.http import server
if __name__ == '__main__':
server.serve(wrapped_app, host='127.0.0.1', port=8080)
```
#### File: apps/layouts/__init__.py
```python
from __future__ import with_statement
import webify
from webify.templates.helpers import html
# Layout template
@webify.template()
def page_layout(p, title, inside):
with p(html.html()):
with p(html.head()):
p(html.title(title))
with p(html.body()):
p.sub(inside)
app = webify.defaults.app()
# Controllers
@app.subapp()
@webify.urlable()
def hello(req, p):
name = req.params.get(u'name', u'world')
p(page_layout(u'Hello App', hello_template(name)))
# Templates
# This would normally be in a different file in a different module
@webify.template()
def hello_template(p, name):
with p(html.form(action=u'', method='GET')):
p(u'Hello, %s! <br />' % name)
p(u'Your name: %s' % html.input_text('name'))
p(html.input_submit('name'))
# Middleware
from webify.middleware import EvalException
wrapped_app = webify.wsgify(app, EvalException)
# Server
if __name__ == '__main__':
webify.http.server.serve(wrapped_app, host='127.0.0.1', port='8080')
```
#### File: controllers/webargs/__init__.py
```python
from __future__ import absolute_import
#import chardet
from ... import App
NoArgument = object()
class UrlableApp(App):
def __call__(self, req, p):
raise NotImplementedError
def url(self, *args, **kwargs):
raise NotImplementedError
def remaining_url(req):
remaining = req.path_info[1:]
# #TODO: jperla: add this later
#chardet.decode
remaining = remaining.decode(u'utf-8')
return remaining
class RemainingUrlableApp(UrlableApp):
def __init__(self, subapp):
self.subapp = subapp
def __call__(self, req, p):
remaining = remaining_url(req)
self.subapp(req, p, remaining)
def url(self, remaining):
url = u'/%s' % remaining
if self.parent is None:
return url
else:
return self.parent.wrap_url(url)
class UrlableAppWrapper(object):
def __init__(self, args_func=lambda req:[], url_func=lambda:u'/'):
# Takes request object, returns tuple for args (or dict for kwargs??)
self.args_func = args_func
##TODO: jperla: Takes arbitrary, returns str or Url ?
self.url_func = url_func
def __call__(self, controller):
url_func, args_func = self.url_func, self.args_func
class UrlableAppDecorator(UrlableApp):
def __init__(self, func):
self.func = func
def __call__(self, req, p):
args = args_func(req)
if isinstance(args, dict):
kwargs = args
args = []
else:
kwargs = {}
return self.func(req, p, *args, **kwargs)
def url(self, *args, **kwargs):
url = url_func(*args, **kwargs)
if self.parent is None:
return url
else:
return self.parent.wrap_url(self, url)
return UrlableAppDecorator(controller)
class RemainingUrlableAppWrapper(UrlableAppWrapper):
def __init__(self,
args_func=lambda req: (remaining_url(req),),
url_func=lambda remaining: u'/%s' % remaining):
UrlableAppWrapper.__init__(self, args_func, url_func)
```
#### File: webify/middleware/__init__.py
```python
from paste.evalexception import EvalException
def install_middleware(app, middleware):
for m in middleware:
app = m(app)
return app
class SettingsMiddleware(object):
'''
Takes a dictionary of aribtrary settings for the app.
Places the dictionary at the top of a stack
in the environ (key "settings")
'''
def __init__(self, settings):
self.settings = settings
def __call__(self, app):
def wrapper(environ, start_response):
if u'settings' in environ:
environ[u'settings'].insert(self.settings, 0)
else:
environ['settings'] = [self.settings]
return app(environ, start_response)
return wrapper
```
#### File: templates/helpers/xml.py
```python
__no_content = object()
def node(element_name, content=__no_content, attributes={}):
attrs_string = _attrs_string(attributes)
if content == __no_content:
return node_block(element_name, attributes)
else:
return node_inline(element_name, content, attributes)
def _attrs_string(attributes):
attrs = u' '.join(['%s="%s"' % (k,v) for k,v in attributes.iteritems()])
attrs_string = (u' ' + attrs) if len(attrs) > 0 else u''
return attrs_string
def node_inline(element_name, content, attributes={}):
attrs_string = _attrs_string(attributes)
if content == u'':
return u'<%s%s />' % (element_name, attrs_string)
else:
return u'<%s%s>%s</%s>\n' % (element_name, attrs_string, content, element_name)
def node_block(element_name, attributes={}):
attrs_string = _attrs_string(attributes)
return u'<%s%s>\n' % (element_name, attrs_string), u'</%s>\n' % element_name
def cdata(content):
return u'<![CDATA[>%s\n]]>' % content
def cdata_block():
return u'<![CDATA[>', u'\n]]>'
```
#### File: urls/dispatchers/__init__.py
```python
from __future__ import absolute_import
import wsgiref
import wsgiref.util
from ... import http
class SimpleDispatcher(object):
def __init__(self, default=None):
self.default = default
self.apps, self.urls = {}, {}
def register(self, subapp, path=None):
name = (u'/%s' % subapp.func.func_name) if path is None else path
if name in self.apps:
raise Exception(u'Already dispatching to path: %s' % path)
self.apps[name] = subapp
self.urls[subapp] = name
def url(self, subapp, controller_url):
assert(subapp in self.urls)
#TODO: jperla: fix index urls
return (self.urls[subapp] + controller_url).replace(u'//', u'/')
def __call__(self, req):
path_info = req.environ[u'PATH_INFO'] #for debugging
#TODO: jperla: deep copy request here?
name = u'/%s' % (wsgiref.util.shift_path_info(req.environ) or u'')
apps = self.apps
app = apps.get(name)
if app is not None:
return app, req
else:
if self.default is not None:
return self.default, req
else:
raise http.status.not_found()
class SingleDispatcher(object):
def __init__(self):
self.subapp = None
def register(self, subapp):
assert(subapp is not None)
if self.subapp is not None:
raise Exception(u'Single dispatcher only dispatches'
u' to one controller')
else:
self.subapp = subapp
def url(self, subapp, controller_url):
assert(subapp == self.subapp)
return u'%s' % controller_url
def __call__(self, req):
app = self.subapp
if app is not None:
return app, req
else:
raise http.status.not_found()
``` |
{
"source": "jperl/xor",
"score": 3
} |
#### File: jperl/xor/xor_dataset_test.py
```python
import numpy as np
from numpy.testing import assert_equal
from xor_dataset import get_random_bits_parity
def test_get_random_bits_parity():
bit_sequences, parity = get_random_bits_parity(num_sequences=5, num_bits=5)
assert_equal(
bit_sequences,
[
#even, odd, even, even, odd
[0, 1, 1, 0, 1],
#odd, even, odd, even, odd
[1, 1, 1, 1, 1],
# odd, odd, odd, even, even
[1, 0, 0, 1, 0],
# even, even, even, even, odd
[0, 0, 0, 0, 1],
# even, odd, even, even, even
[0, 1, 1, 0, 0]
])
assert_equal(parity, [
[0, 1, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
])
``` |
{
"source": "jperod/RNN_Scandinavian_Text_Classifier",
"score": 3
} |
#### File: jperod/RNN_Scandinavian_Text_Classifier/predict_rnn.py
```python
from RNN import RNN
from Utils import Utils
import pickle
import torch
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--string', type=str, help='enter string to predict')
parser.add_argument('--example', action='store_true', help='example of predictions made by the model')
parser.add_argument('--save_dir', type=str, help='directory fo saved model', default='saves/save_hn_256_lr_0.005/')
args = parser.parse_args()
"""
Load saved model
"""
vocab_dir = args.save_dir + 'vocab.txt'
saved_model_dir = args.save_dir + 'saved_model.pth'
with open(vocab_dir, 'rb') as f:
Word2Index = pickle.load(f)
# Point unknown tokens to Word2Index with index 0 corresponding to the rarely used token '0':
Word2Index_w_unk = Word2Index.copy()
Word2Index_w_unk['<UNK>'] = 0
n_words = len(Word2Index)
rnn = RNN(len(Word2Index), 256, 3)
rnn.load_state_dict(torch.load(saved_model_dir))
all_categories = ['da', 'no', 'sv']
U = Utils(n_words, all_categories, Word2Index_w_unk)
"""
Predict a given input sentence
"""
def predict(input_line):
print('\n> %s' % input_line)
with torch.no_grad():
output, _ = U.evaluate(U.SentToTensor(input_line), rnn)
output = np.squeeze(output.numpy())
pred_ix = np.argmax(output)
prediction = all_categories[pred_ix]
print("The following sentence is: [" + prediction + "]")
"""
Predict multiple example sentences
"""
if args.example:
print('\nTesting on dataset sentences:')
predict('Hold nu op, hun har det skidt') #DA
predict('Jeg har akkurat bakt en sukkerkake') #NO
predict('Man känner igen den, den är bekväm.') #SV
print('\nTesting on random sentences from the internet:')
predict('Hej, jeg hedder Pedro og jeg elsker at drikke øl!') #DA
predict('Mit luftpudefartøj er fyldt med ål') #DA
predict('Der er i øjeblikket ingen tekst på denne side. Du kan søge efter sidenavnet på andre sider, søge i relaterede logger eller oprette siden. ') #DA
predict('Jeg forstår det veldig godt.') #NO
predict('Floreanaspottefugl er ein sterkt truga art av spottefuglar. Han er naturleg endemisk til øya Floreana, ei av Galápagosøyane.') # NO
predict('När katten är borta dansar råttorna på bordet') #SV
predict('Rosshavet (engelska: Ross Sea) är ett randhav av Antarktiska oceanen och ligger mellan Victoria Land och Marie Byrd Land') #SV
else:
predict(args.string)
``` |
{
"source": "JPeroutek/mycoin",
"score": 3
} |
#### File: JPeroutek/mycoin/custom.py
```python
import tools
#import hashlib
database_name = 'DB.db'
listen_port = 8900
gui_port = 8700
version = "VERSION"
block_reward = 10 ** 5
premine = 5 * 10 ** 6
fee = 10 ** 3
# Lower limits on what the "time" tag in a block can say.
mmm = 100
# Take the median of this many of the blocks.
# How far back in history do we look when we use statistics to guess at
# the current blocktime and difficulty.
history_length = 400
# This constant is selected such that the 50 most recent blocks count for 1/2 the
# total weight.
inflection = 0.985
download_many = 500 # Max number of blocks to request from a peer at the same time.
max_download = 50000
brainwallet = 'brain wallet'
privkey = tools.det_hash(brainwallet)
pubkey = tools.privtopub(privkey)
peers = [['localhost', 8901],
['localhost', 8902],
['localhost', 8903],
['localhost', 8904],
['localhost', 8905]]
hashes_per_check = 10 ** 5
def blocktime(length):
if length * block_reward < premine:
return 30 # seconds
else:
return 60
```
#### File: JPeroutek/mycoin/networking.py
```python
import socket
import subprocess
import re
import tools
import custom
MAX_MESSAGE_SIZE = 60000
def kill_processes_using_ports(ports):
popen = subprocess.Popen(['netstat', '-lpn'],
shell=False,
stdout=subprocess.PIPE)
(data, err) = popen.communicate()
pattern = "^tcp.*((?:{0})).* (?P<pid>[0-9]*)/.*$"
pattern = pattern.format(')|(?:'.join(ports))
prog = re.compile(pattern)
for line in data.split('\n'):
match = re.match(prog, line)
if match:
pid = match.group('pid')
subprocess.Popen(['kill', '-9', pid])
def serve_forever(message_handler_func, PORT, queue):
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('127.0.0.1', PORT))
server.listen(100)
while True:
client, addr = server.accept()
(ip, port) = addr
data = client.recv(MAX_MESSAGE_SIZE)
#we could insert security checks here
data = tools.unpackage(data)
client.sendall(tools.package(message_handler_func(data, queue)))
def connect(msg, host, port):
msg['version'] = custom.version
msg = tools.package(msg)
if len(msg) < 1 or len(msg) > MAX_MESSAGE_SIZE:
print('wrong sized message')
return
s = socket.socket()
try:
s.settimeout(2)
s.connect((str(host), int(port)))
s.sendall(msg)
response = s.recv(MAX_MESSAGE_SIZE)
#print(response)
return tools.unpackage(response)
except Exception as e:
#print('THE ERROR WAS: ' +str(e))
#print('disconnect')
return {'error': e}
def send_command(peer, msg):
return connect(msg, peer[0], peer[1])
``` |
{
"source": "jperras/Flask-ApiExceptions",
"score": 3
} |
#### File: jperras/Flask-ApiExceptions/flask_apiexceptions.py
```python
from __future__ import absolute_import
from __future__ import print_function
__version_info__ = ('1', '1', '2')
__version__ = '.'.join(__version_info__)
__author__ = '<NAME>'
__copyright__ = '(c) 2018 Fictive Kin, LLC'
__all__ = ['JSONExceptionHandler', 'ApiException', 'ApiError',
'api_exception_handler']
import logging
from flask import jsonify, request
from werkzeug.exceptions import default_exceptions, HTTPException
logger = logging.getLogger('apiexceptions')
class JSONExceptionHandler(object):
"""
A Flask extension that converts default Flask exceptions to their
application/json content type equivalent.
```
from application.libs import JSONExceptionHandler
exception_handler = JSONExceptionHandler()
exception_handler.init_app(app)
```
"""
# If we don't know what HTTP code to assign an exception, by default
# we assign it a `500`. This also handles uncaught exceptions; e.g.
# if our application raises any kind of Exception subclass that we don't
# explicitly have a handler for, then we've probably got an application
# error somewhere for that particular code path.
default_status_code = 500
default_message = 'An error occurred!'
def __init__(self, app=None):
"""
Initialize the extension.
Any default configurations that do not require the application
instance should be put here.
"""
if app:
self.init_app(app)
def default_handler(self, error=None):
"""Default error handler to register with the application."""
if error is None:
message = self.default_message
else:
# If the error object contains a `message` attribute, then let's
# use that as the message for our exception.
if hasattr(error, 'message'):
message = error.message
# Werkzeug default exception types use `description` instead
# of `message`.
elif hasattr(error, 'description'):
message = error.description
else:
message = self.default_message
response = jsonify(message=message)
# If our error object contains a specific error code, then let's use
# that. If not, we will use our `default_status_code` that has been
# defined for this class. This ensures that random exceptions that
# are thrown by Python or by external libraries that we miss are
# an application error.
response.status_code = self.default_status_code
if hasattr(error, 'status_code'):
response.status_code = int(error.status_code)
elif isinstance(error, HTTPException):
response.status_code = error.code
if response.status_code >= 500:
logger.exception(error)
else:
logger.debug(error)
return response
def init_app(self, app):
"""
Initialize the extension with any application-level configuration
requirements.
This is where we register the Werkzeug `HTTPException` class along
with all the other default exception codes.
"""
self.app = app
# Register the default HTTP codes to be handled by our exception
# handler.
for code, _ in default_exceptions.items():
self.register(code)
if not hasattr(self.app, 'extensions'):
self.app.extensions = {}
self.app.extensions['apiexceptions'] = self
def register(self, code_or_exception, handler=None):
"""
Register an exception class *or* numeric code with the default
exception handler provided by this extension *or* the function provided
at `handler` in the argument.
"""
f = handler or self.default_handler
self.app.register_error_handler(code_or_exception, f=f)
@staticmethod
def handle_404(error=None): #pylint: disable=locally-disabled,unused-argument
"""The default Werkzeug 404 handler does not include a
message or description, which causes some consistency issues with our
frontends when receiving a 404."""
message = 'The resource at {} could not be found.'.format(request.path)
response = jsonify(message=message)
response.status_code = 404
return response
class ApiError(object): #pylint: disable=locally-disabled,too-few-public-methods
"""
Contains information related to an API usage error.
- code: a semantically readable error slug, e.g. `invalid-password`
- info: information regarding the source of the error. E.g., if the
error was caused by invalid submitted data, the info can contain
a list of fields that contained the bad data. E.g.,
['username', 'other_field']
- message: a human-readable description of what went wrong.
All of the above data will be serialized into JSON to be returned to the
client.
"""
def __init__(self, code=None, info=None, message=None):
self.code = code
self.info = info
self.message = message
def serialize(self):
"""
Construct response dictionary.
"""
return {'code': self.code,
'info': self.info,
'message': self.message}
class ApiException(Exception):
"""
An exception that may be raised by various API view endpoints.
Can contain one or more ApiError objects and must include a HTTP status
code.
"""
# Default status code if none is set.
status_code = 500
message = None
code = None
info = None
#pylint: disable=locally-disabled,too-many-arguments
def __init__(self, status_code=None, error=None, message=None, info=None,
code=None):
"""
Initialize the ApiException container object.
If an `error` instance is provided, it will be added as an error
contained within this wrapper. If any of `message`, `info`, or `code`
are set, a new error object is created added.
"""
super(ApiException, self).__init__()
self._errors = []
if error is not None:
self._errors.append(error)
self.status_code = status_code or self.status_code
message = message or self.message
code = code or self.code
info = info or self.info
if message or info or code:
self.add_error(ApiError(message=message, code=code, info=info))
def add_error(self, error):
"""
Append an error to the list of errors contained with this
ApiException instance.
"""
self._errors.append(error)
@property
def errors(self):
"""Getter for errors currently stored on this instance."""
return self._errors
def serialize(self):
"""
Serialize the errors contained within this ApiException object to
Python types that are easily convertible to JSON (or similar).
"""
return {'errors': [e.serialize() for e in self.errors]}
def api_exception_handler(api_exception):
"""
Jsonify and serialize ApiException-compatible objects and assign
the correct response code.
"""
response = jsonify(api_exception.serialize())
response.status_code = api_exception.status_code
return response
``` |
{
"source": "jperuggia/ComputerVision",
"score": 4
} |
#### File: assignments/ps01/ps1.py
```python
import math
import numpy as np
import cv2
import sys
# # Implement the functions below.
def extract_red(image):
""" Returns the red channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the red channel.
"""
# Since Red is last index, we want all rows, columns, and the last channel.
return np.copy(image[:, :, 2])
def extract_green(image):
""" Returns the green channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the green channel.
"""
# Return green channel, all rows, columns
return np.copy(image[:, :, 1])
def extract_blue(image):
""" Returns the blue channel of the input image. It is highly recommended to make a copy of the
input image in order to avoid modifying the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 2D array containing the blue channel.
"""
# Since blue is the first index, get first channel.
return np.copy(image[:, :, 0])
def swap_green_blue(image):
""" Returns an image with the green and blue channels of the input image swapped. It is highly
recommended to make a copy of the input image in order to avoid modifying the original array.
You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input RGB (BGR in OpenCV) image.
Returns:
numpy.array: Output 3D array with the green and blue channels swapped.
"""
temp_image = np.copy(image)
temp_image[:, :, 0] = extract_green(image)
temp_image[:, :, 1] = extract_blue(image)
return temp_image
def copy_paste_middle(src, dst, shape):
""" Copies the middle region of size shape from src to the middle of dst. It is
highly recommended to make a copy of the input image in order to avoid modifying the
original array. You can do this by calling:
temp_image = np.copy(image)
Note: Assumes that src and dst are monochrome images, i.e. 2d arrays.
Note: Where 'middle' is ambiguous because of any difference in the oddness
or evenness of the size of the copied region and the image size, the function
rounds downwards. E.g. in copying a shape = (1,1) from a src image of size (2,2)
into an dst image of size (3,3), the function copies the range [0:1,0:1] of
the src into the range [1:2,1:2] of the dst.
Args:
src (numpy.array): 2D array where the rectangular shape will be copied from.
dst (numpy.array): 2D array where the rectangular shape will be copied to.
shape (tuple): Tuple containing the height (int) and width (int) of the section to be
copied.
Returns:
numpy.array: Output monochrome image (2D array)
"""
src = np.copy(src)
dst = np.copy(dst)
# height is rows, width is columns
src_rows, src_cols = src.shape
dst_rows, dst_cols = dst.shape
# shape size mid points.
shape_mid_rows = int(np.floor(shape[0] / 2))
shape_mid_cols = int(np.floor(shape[1] / 2))
# mid point of the "copy" image
copy_mid_row = int(np.floor(src_rows / 2))
copy_mid_col = int(np.floor(src_cols / 2))
# mid points of the paste image.
paste_mid_row = int(np.floor(dst_rows / 2))
paste_mid_col = int(np.floor(dst_cols / 2))
# calculate the shifts to make sure copy is correct.
r1_dst, r2_dst, c1_dst, c2_dst, r1_src, r2_src, c1_src, c2_src = [
paste_mid_row - shape_mid_rows,
paste_mid_row + shape_mid_rows,
paste_mid_col - shape_mid_cols,
paste_mid_col + shape_mid_cols,
copy_mid_row - shape_mid_rows,
copy_mid_row + shape_mid_rows,
copy_mid_col - shape_mid_cols,
copy_mid_col + shape_mid_cols
]
dst[r1_dst: r2_dst, c1_dst: c2_dst] = src[r1_src: r2_src, c1_src: c2_src]
return dst
def image_stats(image):
""" Returns the tuple (min,max,mean,stddev) of statistics for the input monochrome image.
In order to become more familiar with Numpy, you should look for pre-defined functions
that do these operations i.e. numpy.min.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
Returns:
tuple: Four-element tuple containing:
min (float): Input array minimum value.
max (float): Input array maximum value.
mean (float): Input array mean / average value.
stddev (float): Input array standard deviation.
"""
return 1.*np.min(image), 1.*np.max(image), 1.*np.mean(image), 1.*np.std(image)
def center_and_normalize(image, scale):
""" Returns an image with the same mean as the original but with values scaled about the
mean so as to have a standard deviation of "scale".
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
scale (int or float): scale factor.
Returns:
numpy.array: Output 2D image.
"""
i_min, i_max, i_mean, i_std = image_stats(image)
# take the mean from the image, then divide by the std deviation. We then scale by the
# scale factor and then add the mean back into the image.
normal_image = (((image-i_mean) / i_std) * scale) + i_mean
return normal_image
def shift_image_left(image, shift):
""" Outputs the input monochrome image shifted shift pixels to the left.
The returned image has the same shape as the original with
the BORDER_REPLICATE rule to fill-in missing values. See
http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/copyMakeBorder/copyMakeBorder.html?highlight=copy
for further explanation.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): Input 2D image.
shift (int): Displacement value representing the number of pixels to shift the input image.
This parameter may be 0 representing zero displacement.
Returns:
numpy.array: Output shifted 2D image.
"""
temp_image = np.copy(image)
# take the temp image, all rows, from column defined in shift to end, move shift using border replicate.
return cv2.copyMakeBorder(temp_image[:, shift:], 0, 0, 0, shift, cv2.BORDER_REPLICATE)
def difference_image(img1, img2):
""" Returns the difference between the two input images (img1 - img2). The resulting array must be normalized
and scaled to fit [0, 255].
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
img1 (numpy.array): Input 2D image.
img2 (numpy.array): Input 2D image.
Returns:
numpy.array: Output 2D image containing the result of subtracting img2 from img1.
"""
difference = img1.astype(np.float) - img2.astype(np.float)
output_image = np.zeros(difference.shape)
cv2.normalize(difference, output_image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# print("Max Value is ", max(output_image.flatten()))
# print("Min Value is ", min(output_image.flatten()))
return output_image
def add_noise(image, channel, sigma):
""" Returns a copy of the input color image with Gaussian noise added to
channel (0-2). The Gaussian noise mean must be zero. The parameter sigma
controls the standard deviation of the noise.
The returned array values must not be clipped or normalized and scaled. This means that
there could be values that are not in [0, 255].
Note: This function makes no defense against the creation
of out-of-range pixel values. Consider converting the input image to
a float64 type before passing in an image.
It is highly recommended to make a copy of the input image in order to avoid modifying
the original array. You can do this by calling:
temp_image = np.copy(image)
Args:
image (numpy.array): input RGB (BGR in OpenCV) image.
channel (int): Channel index value.
sigma (float): Gaussian noise standard deviation.
Returns:
numpy.array: Output 3D array containing the result of adding Gaussian noise to the
specified channel.
"""
# generate random noise using the image.shape tuple as the dimensions.
gaussian_noise = np.random.randn(*image.shape) * sigma
temp_image = np.copy(image)
temp_image = (temp_image * 1.0) # make it a float
temp_image[:, :, channel] += gaussian_noise[:, :, channel]
return temp_image
```
#### File: assignments/ps04/ps4.py
```python
import numpy as np
import cv2
# Utility function
def normalize_and_scale(image_in, scale_range=(0, 255)):
"""Normalizes and scales an image to a given range [0, 255].
Utility function. There is no need to modify it.
Args:
image_in (numpy.array): input image.
scale_range (tuple): range values (min, max). Default set to
[0, 255].
Returns:
numpy.array: output image.
"""
image_out = np.zeros(image_in.shape)
cv2.normalize(image_in, image_out, alpha=scale_range[0],
beta=scale_range[1], norm_type=cv2.NORM_MINMAX)
return image_out
# Assignment code
def gradient_x(image):
"""Computes image gradient in X direction.
Use cv2.Sobel to help you with this function. Additionally you
should set cv2.Sobel's 'scale' parameter to one eighth and ksize
to 3.
Args:
image (numpy.array): grayscale floating-point image with
values in [0.0, 1.0].
Returns:
numpy.array: image gradient in the X direction. Output
from cv2.Sobel.
"""
image_cpy = image.copy()
# sobel scale = 1/8, ksize = 3 default border type
sobel_x = cv2.Sobel(image_cpy, cv2.CV_64F, 1, 0, scale=0.125, ksize=3)
return sobel_x
def gradient_y(image):
"""Computes image gradient in Y direction.
Use cv2.Sobel to help you with this function. Additionally you
should set cv2.Sobel's 'scale' parameter to one eighth and ksize
to 3.
Args:
image (numpy.array): grayscale floating-point image with
values in [0.0, 1.0].
Returns:
numpy.array: image gradient in the Y direction.
Output from cv2.Sobel.
"""
image_cpy = image.copy()
# sobel scale = 1/8, ksize = 3 default border type
sobel_y = cv2.Sobel(image_cpy, cv2.CV_64F, 0, 1, scale=0.125, ksize=3)
return sobel_y
def optic_flow_lk(img_a, img_b, k_size, k_type, sigma=1):
"""Computes optic flow using the Lucas-Kanade method.
For efficiency, you should apply a convolution-based method.
Note: Implement this method using the instructions in the lectures
and the documentation.
You are not allowed to use any OpenCV functions that are related
to Optic Flow.
Args:
img_a (numpy.array): grayscale floating-point image with
values in [0.0, 1.0].
img_b (numpy.array): grayscale floating-point image with
values in [0.0, 1.0].
k_size (int): size of averaging kernel to use for weighted
averages. Here we assume the kernel window is a
square so you will use the same value for both
width and height.
k_type (str): type of kernel to use for weighted averaging,
'uniform' or 'gaussian'. By uniform we mean a
kernel with the only ones divided by k_size**2.
To implement a Gaussian kernel use
cv2.getGaussianKernel. The autograder will use
'uniform'.
sigma (float): sigma value if gaussian is chosen. Default
value set to 1 because the autograder does not
use this parameter.
Returns:
tuple: 2-element tuple containing:
U (numpy.array): raw displacement (in pixels) along
X-axis, same size as the input images,
floating-point type.
V (numpy.array): raw displacement (in pixels) along
Y-axis, same size and type as U.
"""
# fix to make sure the kernel size is an odd number.
if k_size % 2 == 0 and k_size > 2:
k_size = k_size - 1
# copy images for use.
image_a = img_a.copy()
image_b = img_b.copy()
k = np.ones((k_size, k_size)) / (k_size ** 2)
# base gradients.
gx = gradient_x(image_a)
gy = gradient_y(image_a)
gt = image_b - image_a
# calculate the weighted gradients using blurs to prevent having to loop. Makes this much easier to manage.
if k_type == 'uniform':
gx_x = cv2.filter2D(gx * gx, ddepth=-1, kernel=k)
gy_y = cv2.filter2D(gy * gy, ddepth=-1, kernel=k)
gx_y = cv2.filter2D(gx * gy, ddepth=-1, kernel=k)
gy_x = cv2.filter2D(gy * gx, ddepth=-1, kernel=k)
gx_t = cv2.filter2D(gx * gt, ddepth=-1, kernel=k)
gy_t = cv2.filter2D(gy * gt, ddepth=-1, kernel=k)
else:
blur_kernel = (k_size, k_size)
gx_x = cv2.GaussianBlur(gx * gx, blur_kernel, sigma)
gy_y = cv2.GaussianBlur(gy * gy, blur_kernel, sigma)
gx_y = cv2.GaussianBlur(gx * gy, blur_kernel, sigma)
gy_x = cv2.GaussianBlur(gy * gx, blur_kernel, sigma)
gx_t = cv2.GaussianBlur(gx * gt, blur_kernel, sigma)
gy_t = cv2.GaussianBlur(gy * gt, blur_kernel, sigma)
# create a new matrix from the gradients in the x, the xy, the yx, and the yy directions. To allow the use
# of matrix multiplier provided by numpy, we flatten, then transpose to the correct shape of a 2x2 matrix.
# np.matmul states that if either argument is N-D, N > 2, it is treated as a stack of matrices. We want a stack
# of 2x2 matrices. residing in the last two indexes and broadcast accordingly.
image_a_transform = np.array([
gx_x.flatten(), gx_y.flatten(),
gy_x.flatten(), gy_y.flatten()
]).T.reshape(-1, 2, 2) # transpose and take from last a 2 x 2 matrix in the reshape.
# 2 x 1 matrix for matmul
time_transform = np.array([
-gx_t.flatten(),
-gy_t.flatten()
]).T.reshape(-1, 2, 1)
try:
image_a_transform_inv = np.linalg.inv(image_a_transform)
except np.linalg.linalg.LinAlgError:
# if this is a singular matrix (happens when the shift is the same as the original)
return np.zeros(image_a.shape), np.zeros(image_a.shape)
transform = np.matmul(image_a_transform_inv, time_transform)
U = transform[:, 0, 0].reshape(image_a.shape) # get hte U matrix
V = transform[:, 1, 0].reshape(image_a.shape) # get the V
return U, V
def reduce_image(image):
"""Reduces an image to half its shape.
The autograder will pass images with even width and height. It is
up to you to determine values with odd dimensions. For example the
output image can be the result of rounding up the division by 2:
(13, 19) -> (7, 10)
For simplicity and efficiency, implement a convolution-based
method using the 5-tap separable filter.
Follow the process shown in the lecture 6B-L3. Also refer to:
- <NAME>., and <NAME>. (1983). The Laplacian Pyramid
as a Compact Image Code
You can find the link in the problem set instructions.
Args:
image (numpy.array): grayscale floating-point image, values in
[0.0, 1.0].
Returns:
numpy.array: output image with half the shape, same type as the
input image.
"""
image = image.copy()
# only select odd columns out.
image = image.copy()
# 1/16 4/16 6/16 4/12 1/16
# k = np.array([1, 4, 6, 4, 1]) / 16.0
k = np.array([np.array([1, 4, 6, 4, 1]) / 16.0])
r_k = np.dot(k.T, k)
filter_img = cv2.filter2D(image, -1, r_k)
return filter_img[::2, ::2]
def gaussian_pyramid(image, levels):
"""Creates a Gaussian pyramid of a given image.
This method uses reduce_image() at each level. Each image is
stored in a list of length equal the number of levels.
The first element in the list ([0]) should contain the input
image. All other levels contain a reduced version of the previous
level.
All images in the pyramid should floating-point with values in
Args:
image (numpy.array): grayscale floating-point image, values
in [0.0, 1.0].
levels (int): number of levels in the resulting pyramid.
Returns:
list: Gaussian pyramid, list of numpy.arrays.
"""
image = image.copy()
images = [image] # the list for the pyramid. has base image first.
for i in range(levels-1):
image = reduce_image(image) # reduce it then append. n times.
images.append(image)
return images
def create_combined_img(img_list):
"""Stacks images from the input pyramid list side-by-side.
Ordering should be large to small from left to right.
See the problem set instructions for a reference on how the output
should look like.
Make sure you call normalize_and_scale() for each image in the
pyramid when populating img_out.
Args:
img_list (list): list with pyramid images.
Returns:
numpy.array: output image with the pyramid images stacked
from left to right.
"""
# height is constant. image will never be larger than the first.
h = img_list[0].shape[0]
# width will be sum of all images size in the x direction
w = sum([i.shape[1] for i in img_list])
output = np.zeros((h, w)) # empty image.
curr_x = 0
for image in img_list:
ih, iw = image.shape # use this to determine where to place.
output[: ih, curr_x: curr_x + iw] = normalize_and_scale(image)
curr_x += iw
return output
def expand_image(image):
"""Expands an image doubling its width and height.
For simplicity and efficiency, implement a convolution-based
method using the 5-tap separable filter.
Follow the process shown in the lecture 6B-L3. Also refer to:
- <NAME>., and <NAME>. (1983). The Laplacian Pyramid
as a Compact Image Code
You can find the link in the problem set instructions.
Args:
image (numpy.array): grayscale floating-point image, values
in [0.0, 1.0].
Returns:
numpy.array: same type as 'image' with the doubled height and
width.
"""
image = image.copy()
dr = 2 * image.shape[0] # double rows.
dc = 2 * image.shape[1] # double columns
output = np.zeros((dr, dc))
# # filling alternate rows
output[::2, ::2] = image
# 2/16, 8/16, 12/16, 8/16, 2/16
k = np.array([np.array([2, 8, 12, 8, 2]) / 16.0])
r_k = np.dot(k.T, k)
output = cv2.filter2D(output, -1, r_k)
return output
def laplacian_pyramid(g_pyr):
"""Creates a Laplacian pyramid from a given Gaussian pyramid.
This method uses expand_image() at each level.
Args:
g_pyr (list): Gaussian pyramid, returned by gaussian_pyramid().
Returns:
list: Laplacian pyramid, with l_pyr[-1] = g_pyr[-1].
"""
images = []
for idx in range(len(g_pyr)):
if idx == len(g_pyr) - 1:
image = g_pyr[idx]
else:
h = g_pyr[idx].shape[0]
w = g_pyr[idx].shape[1]
# expand the image that is found to the next images size.
image = g_pyr[idx] - expand_image(g_pyr[idx + 1])[:h, :w]
images.append(image)
return images
def warp(image, U, V, interpolation, border_mode):
"""Warps image using X and Y displacements (U and V).
This function uses cv2.remap. The autograder will use cubic
interpolation and the BORDER_REFLECT101 border mode. You may
change this to work with the problem set images.
See the cv2.remap documentation to read more about border and
interpolation methods.
Args:
image (numpy.array): grayscale floating-point image, values
in [0.0, 1.0].
U (numpy.array): displacement (in pixels) along X-axis.
V (numpy.array): displacement (in pixels) along Y-axis.
interpolation (Inter): interpolation method used in cv2.remap.
border_mode (BorderType): pixel extrapolation method used in
cv2.remap.
Returns:
numpy.array: warped image, such that
warped[y, x] = image[y + V[y, x], x + U[y, x]]
"""
image = image.copy()
# displacements in x-axis and y-axis as U and V for image as float32
U = U.astype(np.float32)
V = V.astype(np.float32)
new_x, new_y = np.meshgrid(range(image.shape[1]), range(image.shape[0]))
new_x = new_x.astype(np.float32) + U
new_y = new_y.astype(np.float32) + V
output = cv2.remap(src=image, map1=new_x, map2=new_y, interpolation=interpolation, borderMode=border_mode)
return output
def hierarchical_lk(img_a, img_b, levels, k_size, k_type, sigma, interpolation,
border_mode):
"""Computes the optic flow using Hierarchical Lucas-Kanade.
This method should use reduce_image(), expand_image(), warp(),
and optic_flow_lk().
Args:
img_a (numpy.array): grayscale floating-point image, values in
[0.0, 1.0].
img_b (numpy.array): grayscale floating-point image, values in
[0.0, 1.0].
levels (int): Number of levels.
k_size (int): parameter to be passed to optic_flow_lk.
k_type (str): parameter to be passed to optic_flow_lk.
sigma (float): parameter to be passed to optic_flow_lk.
interpolation (Inter): parameter to be passed to warp.
border_mode (BorderType): parameter to be passed to warp.
Returns:
tuple: 2-element tuple containing:
U (numpy.array): raw displacement (in pixels) along X-axis,
same size as the input images,
floating-point type.
V (numpy.array): raw displacement (in pixels) along Y-axis,
same size and type as U.
"""
image_a = img_a.copy()
image_b = img_b.copy()
gaussian_pyramid_a = gaussian_pyramid(image_a, levels)
gaussian_pyramid_b = gaussian_pyramid(image_b, levels)
# init U and V
U = np.zeros(gaussian_pyramid_a[-1].shape)
V = np.zeros(gaussian_pyramid_a[-1].shape)
for aa, bb in reversed(zip(gaussian_pyramid_a, gaussian_pyramid_b)):
h, w = aa.shape
# expand the flow field and double it to get the the next image.
U = (expand_image(U) * 2)[:h, :w]
V = (expand_image(V) * 2)[:h, :w]
# warp it
c = warp(bb, U, V, interpolation=interpolation, border_mode=border_mode)
# perform Lucas Kanade
nx, ny = optic_flow_lk(aa, c, k_size, k_type, sigma)
# add to the orignal flow.
U = U + nx
V = V + ny
return U, V
```
#### File: assignments/ps04/ps4_test.py
```python
import numpy as np
import cv2
import unittest
import ps4
INPUT_DIR = "input_images/test_images/"
class Part1(unittest.TestCase):
@classmethod
def setUpClass(self):
self.input_imgs_1 = ['test_lk1.png', 'test_lk3.png', 'test_lk5.png']
self.input_imgs_2 = ['test_lk2.png', 'test_lk4.png', 'test_lk6.png']
self.delta_c = [0, 0, -1]
self.delta_r = [0, -1, -1]
self.r_val = [14, 12, 14]
self.c_val = [15, 16, 15]
self.cb = [(28, 30), (24, 32), (28, 30)]
self.k_size = 15
self.k_type = 'uniform'
def test_optic_flow_LK(self):
for i in range(3):
f1 = self.input_imgs_1[i]
f2 = self.input_imgs_2[i]
img1 = cv2.imread(INPUT_DIR + f1, 0) / 255.
img2 = cv2.imread(INPUT_DIR + f2, 0) / 255.
u, v = ps4.optic_flow_lk(img1.copy(), img2.copy(),
self.k_size, self.k_type, 1.)
r = self.r_val[i]
c = self.c_val[i]
d_c = self.delta_c[i]
d_r = self.delta_r[i]
center_box = self.cb[i]
u_mean = np.mean(u[r:r + center_box[0],
c:c + center_box[1]])
check_u = abs(u_mean - d_c) <= 0.5
error_msg = "Average of U values in the area where there is " \
"movement is greater than the allowed amount."
self.assertTrue(check_u, error_msg)
v_mean = np.mean(v[r:r + center_box[0],
c:c + center_box[1]])
check_v = abs(v_mean - d_r) <= 0.5
error_msg = "Average of V values in the area where there is " \
"movement is greater than the allowed amount."
self.assertTrue(check_v, error_msg)
class Part2(unittest.TestCase):
def test_reduce(self):
input_imgs = ['test_reduce1_img.npy', 'test_reduce2_img.npy',
'test_reduce3_img.npy']
ref_imgs = ['test_reduce1_ref.npy', 'test_reduce2_ref.npy',
'test_reduce3_ref.npy']
for i in range(3):
f1 = input_imgs[i]
f2 = ref_imgs[i]
test_array = np.load(INPUT_DIR + f1)
reduced = ps4.reduce_image(test_array.copy())
ref_reduced = np.load(INPUT_DIR + f2)
correct = np.allclose(reduced, ref_reduced, atol=0.05)
self.assertTrue(correct, "Output does not match the reference "
"solution.")
def test_expand(self):
input_imgs = ['test_expand1_img.npy', 'test_expand2_img.npy',
'test_expand3_img.npy']
ref_imgs = ['test_expand1_ref.npy', 'test_expand2_ref.npy',
'test_expand3_ref.npy']
for i in range(3):
f1 = input_imgs[i]
f2 = ref_imgs[i]
test_array = np.load(INPUT_DIR + f1)
expanded = ps4.expand_image(test_array.copy())
ref_expanded = np.load(INPUT_DIR + f2)
correct = np.allclose(expanded, ref_expanded, atol=0.05)
self.assertTrue(correct, "Output does not match the reference "
"solution.")
def test_gaussian_pyramid(self):
input_imgs = ['test_gauss1_pyr.npy', 'test_gauss2_pyr.npy',
'test_gauss3_pyr.npy']
ref_imgs = ['test_gauss1_pyr_ref.npy', 'test_gauss2_pyr_ref.npy',
'test_gauss3_pyr_ref.npy']
levels = [4, 2, 4]
for i in range(3):
f1 = input_imgs[i]
f2 = ref_imgs[i]
l = levels[i]
test_array = np.load(INPUT_DIR + f1)
g_pyr = ps4.gaussian_pyramid(test_array.copy(), levels=l)
g_pyr_ref = np.load(INPUT_DIR + f2)
for l in range(len(g_pyr)):
correct = np.allclose(g_pyr[l], g_pyr_ref[l], atol=0.1)
error_msg = "Value at level {} does not match the answer." \
"".format(l)
self.assertTrue(correct, error_msg)
def test_laplacian_pyramid(self):
input_imgs = ['test_lapl1_pyr.npy', 'test_lapl2_pyr.npy',
'test_lapl3_pyr.npy']
ref_imgs = ['test_lapl1_pyr_ref.npy', 'test_lapl2_pyr_ref.npy',
'test_lapl3_pyr_ref.npy']
levels = [5, 5, 4]
for i in range(3):
f1 = input_imgs[i]
f2 = ref_imgs[i]
test_array = np.load(INPUT_DIR + f1)
l_pyr = ps4.laplacian_pyramid(test_array)
l_pyr_ref = np.load(INPUT_DIR + f2)
for l in range(levels[i]):
correct = np.allclose(l_pyr[l], l_pyr_ref[l], atol=0.1)
error_msg = "Value at level {} does not match the answer. " \
"Make sure your expand() function is passing " \
"the autograder.\n".format(l)
self.assertTrue(correct, error_msg)
class Part3(unittest.TestCase):
@classmethod
def setUpClass(self):
self.input_imgs_1 = ['test_warp1.npy', 'test_warp3.npy',
'test_warp5.npy']
self.input_imgs_2 = ['test_warp2.npy', 'test_warp4.npy',
'test_warp6.npy']
self.input_flows = ['u_v1.npy', 'u_v2.npy', 'u_v3.npy']
self.r_val = [6, 5, 8]
self.c_val = [9, 8, 7]
self.bv = [168, 139, 242]
def test_warp(self):
for i in range(2):
f1 = self.input_imgs_1[i] # Not used
f2 = self.input_imgs_2[i]
f3 = self.input_flows[i]
img1 = np.load(INPUT_DIR + f1) # Not used
img2 = np.load(INPUT_DIR + f2)
u_v = np.load(INPUT_DIR + f3)
u = u_v[:, :, 0]
v = u_v[:, :, 1]
warped = ps4.warp(img2.copy(), u.copy(), v.copy(),
cv2.INTER_CUBIC, cv2.BORDER_REFLECT101)
r = self.r_val[i]
c = self.c_val[i]
box_value = self.bv[i]
center_box_average = np.mean(warped[r:3 * r, c:3 * c])
correct_center_box = abs(center_box_average - box_value) <= 0.51
error_msg = "Center box average pixel value is greater than the " \
"value used in the input image."
self.assertTrue(correct_center_box, error_msg)
warped_without_center = np.copy(warped)
warped_without_center[r:3 * r, c:3 * c] = 0.
average_warped_img = np.mean(warped_without_center)
center_box_average = box_value * 0.15
correct_warped_img = center_box_average >= average_warped_img
error_msg = "Average of values outside the center box area are " \
"greater than the allowed amount."
self.assertTrue(correct_warped_img, error_msg)
class Part4(unittest.TestCase):
@classmethod
def setUpClass(self):
self.input_imgs_1 = ['test_hlk1.png', 'test_hlk3.png', 'test_hlk5.png']
self.input_imgs_2 = ['test_hlk2.png', 'test_hlk4.png', 'test_hlk6.png']
self.delta_c = [-7, -1, 1]
self.delta_r = [2, 6, 5]
self.r_val = [17, 17, 16]
self.c_val = [13, 17, 18]
self.cb = [(34, 26), (34, 34), (32, 36)]
self.k_size = 15
self.k_type = 'uniform'
def test_optic_flow_HLK(self):
for i in range(3):
f1 = self.input_imgs_1[i]
f2 = self.input_imgs_2[i]
img1 = cv2.imread(INPUT_DIR + f1, 0) / 255.
img2 = cv2.imread(INPUT_DIR + f2, 0) / 255.
u, v = ps4.hierarchical_lk(img1.copy(), img2.copy(), 3,
self.k_size, self.k_type, 1.,
cv2.INTER_CUBIC, cv2.BORDER_REFLECT101)
r = self.r_val[i]
c = self.c_val[i]
d_c = self.delta_c[i]
d_r = self.delta_r[i]
center_box = self.cb[i]
u_mean = np.mean(u[r:r + center_box[0],
c:c + center_box[1]])
max_diff = abs(d_c) * .1 + .2
check_u = abs(u_mean - d_c) < max_diff
error_msg = "Average of U values in the area where there is " \
"movement is greater than the allowed amount."
self.assertTrue(check_u, error_msg)
v_mean = np.mean(v[r:r + center_box[0],
c:c + center_box[1]])
max_diff = abs(d_r) * .1 + .2
check_v = abs(v_mean - d_r) < max_diff
error_msg = "Average of V values in the area where there is " \
"movement is greater than the allowed amount."
self.assertTrue(check_v, error_msg)
if __name__ == "__main__":
unittest.main()
```
#### File: assignments/ps05/experiment.py
```python
import cv2
import ps5
import os
import numpy as np
# I/O directories
input_dir = "input_images"
output_dir = "output"
NOISE_1 = {'x': 2.5, 'y': 2.5}
NOISE_2 = {'x': 7.5, 'y': 7.5}
# Helper code
def run_particle_filter(filter_class, imgs_dir, template_rect,
save_frames={}, **kwargs):
"""Runs a particle filter on a given video and template.
Create an object of type pf_class, passing in initial video frame,
template (extracted from first frame using template_rect), and any
keyword arguments.
Do not modify this function except for the debugging flag.
Args:
filter_class (object): particle filter class to instantiate
(e.g. ParticleFilter).
imgs_dir (str): path to input images.
template_rect (dict): template bounds (x, y, w, h), as float
or int.
save_frames (dict): frames to save
{<frame number>|'template': #}.
**kwargs: arbitrary keyword arguments passed on to particle
filter class.
Returns:
None.
"""
imgs_list = [f for f in os.listdir(imgs_dir)
if f[0] != '.' and f.endswith('.jpg')]
imgs_list.sort()
# Initialize objects
template = None
pf = None
frame_num = 0
# Loop over video (till last frame or Ctrl+C is presssed)
for img in imgs_list:
frame = cv2.imread(os.path.join(imgs_dir, img))
# Extract template and initialize (one-time only)
if template is None:
template = frame[int(template_rect['y']):
int(template_rect['y'] + template_rect['h']),
int(template_rect['x']):
int(template_rect['x'] + template_rect['w'])]
if 'template' in save_frames:
cv2.imwrite(save_frames['template'], template)
pf = filter_class(frame, template, **kwargs)
# Process frame
pf.process(frame)
if True: # For debugging, it displays every frame
out_frame = frame.copy()
pf.render(out_frame)
cv2.imshow('Tracking', out_frame)
cv2.waitKey(1)
# Render and save output, if indicated
if frame_num in save_frames:
frame_out = frame.copy()
pf.render(frame_out)
cv2.imwrite(save_frames[frame_num], frame_out)
# Update frame number
frame_num += 1
if frame_num % 20 == 0:
print 'Working on frame %d' % frame_num
def run_kalman_filter(kf, imgs_dir, noise, sensor, save_frames={},
template_loc=None):
imgs_list = [f for f in os.listdir(imgs_dir)
if f[0] != '.' and f.endswith('.jpg')]
imgs_list.sort()
frame_num = 0
if sensor == "hog":
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
elif sensor == "matching":
frame = cv2.imread(os.path.join(imgs_dir, imgs_list[0]))
template = frame[template_loc['y']:
template_loc['y'] + template_loc['h'],
template_loc['x']:
template_loc['x'] + template_loc['w']]
else:
raise ValueError("Unknown sensor name. Choose between 'hog' or "
"'matching'")
for img in imgs_list:
frame = cv2.imread(os.path.join(imgs_dir, img))
# Sensor
if sensor == "hog":
(rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),
padding=(8, 8), scale=1.05)
if len(weights) > 0:
max_w_id = np.argmax(weights)
z_x, z_y, z_w, z_h = rects[max_w_id]
z_x += z_w // 2
z_y += z_h // 2
z_x += np.random.normal(0, noise['x'])
z_y += np.random.normal(0, noise['y'])
elif sensor == "matching":
corr_map = cv2.matchTemplate(frame, template, cv2.cv.CV_TM_SQDIFF)
z_y, z_x = np.unravel_index(np.argmin(corr_map), corr_map.shape)
z_w = template_loc['w']
z_h = template_loc['h']
z_x += z_w // 2 + np.random.normal(0, noise['x'])
z_y += z_h // 2 + np.random.normal(0, noise['y'])
x, y = kf.process(z_x, z_y)
if False: # For debugging, it displays every frame
out_frame = frame.copy()
cv2.circle(out_frame, (int(z_x), int(z_y)), 20, (0, 0, 255), 2)
cv2.circle(out_frame, (int(x), int(y)), 10, (255, 0, 0), 2)
cv2.rectangle(out_frame, (int(z_x) - z_w // 2, int(z_y) - z_h // 2),
(int(z_x) + z_w // 2, int(z_y) + z_h // 2),
(0, 0, 255), 2)
cv2.imshow('Tracking', out_frame)
cv2.waitKey(1)
# Render and save output, if indicated
if frame_num in save_frames:
frame_out = frame.copy()
cv2.circle(frame_out, (int(x), int(y)), 10, (255, 0, 0), 2)
cv2.imwrite(save_frames[frame_num], frame_out)
# Update frame number
frame_num += 1
if frame_num % 20 == 0:
print 'Working on frame %d' % frame_num
def part_1b():
print "Part 1b"
template_loc = {'y': 72, 'x': 140, 'w': 50, 'h': 50}
# Define process and measurement arrays if you want to use other than the
# default. Pass them to KalmanFilter.
Q = None # Process noise array
R = None # Measurement noise array
kf = ps5.KalmanFilter(template_loc['x'], template_loc['y'])
save_frames = {10: os.path.join(output_dir, 'ps5-1-b-1.png'),
30: os.path.join(output_dir, 'ps5-1-b-2.png'),
59: os.path.join(output_dir, 'ps5-1-b-3.png'),
99: os.path.join(output_dir, 'ps5-1-b-4.png')}
run_kalman_filter(kf,
os.path.join(input_dir, "circle"),
NOISE_2,
"matching",
save_frames,
template_loc)
def part_1c():
print "Part 1c"
init_pos = {'x': 311, 'y': 217}
# Define process and measurement arrays if you want to use other than the
# default. Pass them to KalmanFilter.
Q = None # Process noise array
R = None # Measurement noise array
kf = ps5.KalmanFilter(init_pos['x'], init_pos['y'])
save_frames = {10: os.path.join(output_dir, 'ps5-1-c-1.png'),
33: os.path.join(output_dir, 'ps5-1-c-2.png'),
84: os.path.join(output_dir, 'ps5-1-c-3.png'),
159: os.path.join(output_dir, 'ps5-1-c-4.png')}
run_kalman_filter(kf,
os.path.join(input_dir, "walking"),
NOISE_1,
"hog",
save_frames)
def part_2a():
template_loc = {'y': 72, 'x': 140, 'w': 50, 'h': 50}
save_frames = {10: os.path.join(output_dir, 'ps5-2-a-1.png'),
30: os.path.join(output_dir, 'ps5-2-a-2.png'),
59: os.path.join(output_dir, 'ps5-2-a-3.png'),
99: os.path.join(output_dir, 'ps5-2-a-4.png')}
num_particles = 200 # Define the number of particles
sigma_mse = 10 # Define the value of sigma for the measurement exponential equation
sigma_dyn = 10 # Define the value of sigma for the particles movement (dynamics)
run_particle_filter(ps5.ParticleFilter, # particle filter model class
os.path.join(input_dir, "circle"),
template_loc,
save_frames,
num_particles=num_particles, sigma_exp=sigma_mse,
sigma_dyn=sigma_dyn,
template_coords=template_loc) # Add more if you need to
def part_2b():
template_loc = {'x': 360, 'y': 141, 'w': 127, 'h': 179}
save_frames = {10: os.path.join(output_dir, 'ps5-2-b-1.png'),
33: os.path.join(output_dir, 'ps5-2-b-2.png'),
84: os.path.join(output_dir, 'ps5-2-b-3.png'),
99: os.path.join(output_dir, 'ps5-2-b-4.png')}
num_particles = 200 # Define the number of particles
sigma_mse = 10 # Define the value of sigma for the measurement exponential equation
sigma_dyn = 10 # Define the value of sigma for the particles movement (dynamics)
run_particle_filter(ps5.ParticleFilter, # particle filter model class
os.path.join(input_dir, "pres_debate_noisy"),
template_loc,
save_frames,
num_particles=num_particles, sigma_exp=sigma_mse,
sigma_dyn=sigma_dyn,
template_coords=template_loc) # Add more if you need to
def part_3():
template_rect = {'x': 538, 'y': 377, 'w': 73, 'h': 117}
save_frames = {22: os.path.join(output_dir, 'ps5-3-a-1.png'),
50: os.path.join(output_dir, 'ps5-3-a-2.png'),
160: os.path.join(output_dir, 'ps5-3-a-3.png')}
num_particles = 500 # Define the number of particles
sigma_mse = 6 # Define the value of sigma for the measurement exponential equation
sigma_dyn = 30 # Define the value of sigma for the particles movement (dynamics)
alpha = 0.95 # Set a value for alpha
run_particle_filter(ps5.AppearanceModelPF, # particle filter model class
os.path.join(input_dir, "pres_debate"),
# input video
template_rect,
save_frames,
num_particles=num_particles, sigma_exp=sigma_mse,
sigma_dyn=sigma_dyn, alpha=alpha,
template_coords=template_rect) # Add more if you need to
def part_4():
template_rect = {'x': 210, 'y': 37, 'w': 103, 'h': 285}
save_frames = {40: os.path.join(output_dir, 'ps5-4-a-1.png'),
100: os.path.join(output_dir, 'ps5-4-a-2.png'),
240: os.path.join(output_dir, 'ps5-4-a-3.png'),
300: os.path.join(output_dir, 'ps5-4-a-4.png')}
num_particles = 250 # Define the number of particles
sigma_md = 10 # Define the value of sigma for the measurement exponential equation
sigma_dyn = 4 # Define the value of sigma for the particles movement (dynamics)
alpha = 0.1
run_particle_filter(ps5.MDParticleFilter,
os.path.join(input_dir, "pedestrians"),
template_rect,
save_frames,
num_particles=num_particles, sigma_exp=sigma_md,
sigma_dyn=sigma_dyn,
template_coords=template_rect,
alpha=alpha,
scale = 0.995) # Add more if you need to
def part_5():
"""Tracking multiple Targets.
Use either a Kalman or particle filter to track multiple targets
as they move through the given video. Use the sequence of images
in the TUD-Campus directory.
Follow the instructions in the problem set instructions.
Place all your work in this file and this section.
"""
save_frames = {29: os.path.join(output_dir, 'ps5-5-a-1.png'),
56: os.path.join(output_dir, 'ps5-5-a-2.png'),
71: os.path.join(output_dir, 'ps5-5-a-3.png')}
t1 = {
'x': 60,
'y': 200,
'w': 100,
'h': 100
}
t2 = {
'x': 414,
'y': 220,
'w': 100,
'h': 100
}
t3 = {
'x': 20,
'y': 172,
'w': 60,
'h': 150
}
kwargs1 = {
'num_particles': 400,
'sigma_exp': 5,
'sigma_dyn': 15,
'alpha': 0.05,
}
kwargs2 = {
'num_particles': 250,
'sigma_exp': 5,
'sigma_dyn': 10,
'alpha': 0.,
}
kwargs3 = {
'num_particles': 150,
'sigma_exp': 5,
'sigma_dyn': 15,
'alpha': 0.05,
}
imgs_dir = os.path.join(input_dir, "TUD-Campus")
imgs_list = [f for f in os.listdir(imgs_dir)
if f[0] != '.' and f.endswith('.jpg')]
imgs_list = sorted(imgs_list)
# Initialize objects
templates = []
pf1 = None
pf2 = None
pf3 = None
frame_num = 1
for img in imgs_list:
frame = cv2.imread(os.path.join(os.path.join(input_dir, "TUD-Campus"), img))
# Extract template and initialize (one-time only)
if len(templates) < 1:
template1 = frame[int(t1['y']): int(t1['y'] + t1['h']), int(t1['x']): int(t1['x'] + t1['w'])]
template2 = frame[int(t2['y']):int(t2['y'] + t2['h']), int(t2['x']): int(t2['x'] + t2['w'])]
templates.append(template1)
templates.append(template2)
pf1 = ps5.AppearanceModelPF(frame, template=template1, template_coords=t1, **kwargs1)
pf2 = ps5.AppearanceModelPF(frame, template=template2, template_coords=t2, **kwargs2)
if frame_num == 32:
template3 = frame[int(t3['y']):int(t3['y'] + t3['h']), int(t3['x']): int(t3['x'] + t3['w'])]
templates.append(template3)
pf3 = ps5.AppearanceModelPF(frame, template=template3, template_coords=t3, **kwargs3)
# Process frame
pf1.process(frame)
if frame_num <= 29:
pf2.process(frame)
if frame_num >= 32:
pf3.process(frame)
if True: # For debugging, it displays every frame
out_frame = frame.copy()
pf1.render(out_frame)
if frame_num <= 29:
pf2.render(out_frame)
if frame_num >= 32:
pf3.render(out_frame)
cv2.imshow('Tracking', out_frame)
cv2.waitKey(1)
# Render and save output, if indicated
if frame_num in save_frames:
frame_out = frame.copy()
pf1.render(frame_out)
if frame_num <= 29:
pf2.render(frame_out)
if frame_num >= 32:
pf3.render(frame_out)
cv2.imwrite(save_frames[frame_num], frame_out)
# if frame_num == 71:
# frame_out = frame.copy()
# pf1.render(frame_out)
# pf2.render(frame_out)
# pf3.render(frame_out)
# cv2.imwrite(save_frames[frame_num], frame_out)
# Update frame number
frame_num += 1
def part_6():
"""Tracking pedestrians from a moving camera.
Follow the instructions in the problem set instructions.
Place all your work in this file and this section.
"""
raise NotImplementedError
if __name__ == '__main__':
# part_1b()
# part_1c()
# part_2a()
# part_2b()
part_3()
# part_4()
# part_5()
# part_6()
```
#### File: 3A-L2/answers/project_a_point.py
```python
import cv2
import numpy as np
# Project a point from 3D to 2D using a matrix operation
# Given: Point p in 3-space [x y z], and focal length f
# Return: Location of projected point on 2D image plane [u v]
def project_point(p, f):
# TODO: Define and apply projection matrix
H = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1./f, 0]])
p2 = np.hstack((p, np.array([[1]])))
res = np.dot(H, p2.T)
return res[0, 0] / res[2, 0], res[1, 0] / res[2, 0]
# Test: Given point and focal length (units: mm)
p = np.array([[200, 100, 120]])
f = 50
print project_point(p, f)
```
#### File: 3B-L3/answers/find_best_match.py
```python
import cv2
import numpy as np
# Find best match
def find_best_match(patch, strip):
# TODO: Find patch in strip and return column index (x value) of topleft corner
# We will use SSD to find out the best match
best_id = 0
min_diff = np.infty
for i in range(int(strip.shape[1] - patch.shape[1])):
temp = strip[:, i: i + patch.shape[1]]
ssd = np.sum((temp - patch) ** 2)
if ssd < min_diff:
min_diff = ssd
best_id = i
return best_id
# Test code:
# Load images
left = cv2.imread('../images/flowers-left.png')
right = cv2.imread('../images/flowers-right.png')
cv2.imshow('Left', left)
cv2.imshow('Right', right)
# Convert to grayscale, double, [0, 1] range for easier computation
left_gray = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY) / 255.
right_gray = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY) / 255.
# Define image patch location (topleft [row col]) and size
patch_loc = [94, 119] # Adapted index values to approximate the difference with the original images shapes
patch_size = [100, 100]
# Extract patch (from left image)
patch_left = left_gray[patch_loc[0]:patch_loc[0] + patch_size[0],
patch_loc[1]:patch_loc[1] + patch_size[1]]
cv2.imshow('Patch', patch_left)
# Extract strip (from right image)
strip_right = right_gray[patch_loc[0]: patch_loc[0] + patch_size[0], :]
cv2.imshow('Strip', strip_right)
# Now look for the patch in the strip and report the best position (column index of topleft corner)
best_x = find_best_match(patch_left, strip_right)
print best_x
patch_right = right_gray[patch_loc[0]: patch_loc[0] + patch_size[0],
best_x: best_x + patch_size[1]]
# Because we had to adjust the index numbers for this quiz, we will
# plot a rectangle where the best match is in the right image. This
# will help us verify if what we did was correct.
cv2.rectangle(right, (best_x, patch_loc[0]),
(best_x + patch_size[0], patch_loc[0] + patch_size[0]),
(0, 0, 255),
2)
cv2.imshow("Match", right)
cv2.waitKey(0)
``` |
{
"source": "jpes707/quiz-api",
"score": 3
} |
#### File: jpes707/quiz-api/api_updater.py
```python
import os
from mongo_config import questions_collection, client
def get_relative_path(*args):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
questions_collection.delete_many({}) # clears all questions in MongoDB
question_objects = []
lines = [line[:-1] for line in open(get_relative_path('trivia.txt'), 'r').readlines()] + ['']
for idx in range(0, len(lines), 6):
question = lines[idx]
correct_answer = lines[idx + 1]
wrong_answers = lines[idx + 2 : idx + 5]
choices = [correct_answer] + wrong_answers # not shuffled yet
question_object = {'question': question, 'correct_answer': correct_answer, 'choices': choices}
question_objects.append(question_object)
questions_collection.insert_many(question_objects) # puts all questions from txt file into MongoDB
client.close()
print('Questions updated!')
``` |
{
"source": "jpesperidiao/MOPA",
"score": 2
} |
#### File: DatabaseTools/DatabaseManager/abstractDatabase.py
```python
import sqlite3, os
from PyQt5.QtCore import QObject
from Core.enums import Enums
from Core.DatabaseTools.SqlGenerator.sqlGeneratorFactory import SqlGeneratorFactory
class AbstractDatabase(QObject):
def __init__(self, parameters=None):
"""
Connects to a database and manages its contents.
:parameters: (dict) connection parameters.
"""
super(AbstractDatabase, self).__init__()
self.connect(parameters)
self.gen = self.sqlGenerator()
def driver(self):
"""
Gets current connection's driver.
:return: (int) driver code/enum.
"""
return Enums.NoDatabase
def driverName(self):
"""
Gets current connection's driver name.
"""
return "NoDriver"
def name(self):
"""
Gets current database's connection name.
:return: (str) database name.
"""
return ''
def query(self, sql, commit=False):
"""
Executes a query on loaded database.
:param sql: (str) SQL statement to be executed on the database.
:param commit: (bool) if any changes should be commited to database.
:return: (cursor?) cursor to query results.
"""
# to be reimplemented
pass
def validateConnectionParameters(self, parameters):
"""
Validates connection parameters before trying to connect it.
:parameters: (dict) connection parameters.
:return: (bool) parameters' validity status.
"""
# to be reimplemented
return False
def connect(self, parameters):
"""
Connects to a database and sets it to db attribute.
:parameters: (dict) connection parameters.
:return: () database object.
"""
# to be reimplemented
self.db = None
def isConnected(self):
"""
Checks if current database is connected to a valid source.
:return: (bool) validity status.
"""
# to be reimplemented
return self.db is not None
def createDatabase(self, parameters):
"""
Creates a database.
:return: (bool) creation status.
"""
# to be reimplemented
return False
def disconnect(self):
"""
Disconnects from a database, if connected to any.
"""
if self.db is not None:
self.db.close()
del self.db
self.db = None
def sqlGenerator(self):
"""
Gets a SQL generator object.
:return: (AbstractSqlGenerator) SQL generator.
"""
return SqlGeneratorFactory.getGenerator(self.driver())
def createObservations(self):
"""
Creates observations table.
:return: (bool) execution status.
"""
self.query(self.gen.createObservations(), commit=True)
return 'observation' in self.allTables()
def createSensors(self):
"""
Creates observations table.
:return: (bool) execution status.
"""
self.query(self.gen.createSensors(), commit=True)
return 'sensors' in self.allTables()
def allTables(self):
"""
Gets a list of all available tables.
:return: (list-of-str) list of names from available tables.
"""
if self.isConnected():
return [t[0] for t in self.query(self.gen.allTables())]
return []
def allObservations(self):
"""
A list of all observation's information present at the database.
:return: (list-of-tuple) observations' informations.
"""
if self.isConnected():
return self.query(self.gen.allObservations())
return []
def allSensors(self):
"""
A list of all sensor's information present at the database.
:return: (list-of-tuple) sensors' informations.
"""
if self.isConnected():
return self.query(self.gen.allSensors())
return []
def getSensor(self, sensorId):
"""
Gets a sensor using its ID.
:param sensorId: (int) sensor's ID.
:return: (tuple) sensor's informations, if it exists.
"""
if self.isConnected():
sensorL = self.query(self.gen.getSensor(sensorId))
return sensorL[0] if len(sensorL) > 0 else tuple()
return tuple()
def addObservation(self, azimuth, zenith, sensorId, commit=True):
"""
Adds a sensor to the database.
:param azimuth: observation's azimuth angle.
:param zenith: observation's zenith angle.
:param sensorId: (str) station's ID.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
return self.query(
self.gen.addObservation(azimuth, zenith, sensorId), commit
)
return
def addSensor(self, coordinates, epsg, name=None, status=True, commit=True):
"""
Gets a sensor using its ID.
:param coordinates: (tuple-of-float) sensor's coordinates.
:param epsg: (int) sensor's CRS auth id.
:param name: (str) station's friendly name.
:param status: (bool) working status.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
self.query(
self.gen.addSensor(coordinates, epsg, name, status), commit
)
return
def updateObservation(self, table, obs, commit=True):
"""
Updates observation's information. Observation information should already
exist into the database.
:param table: (str) observations' table name.
:param sensor: (Observation) observation object.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
self.query(
self.gen.updateObservation(
table=table, obsId=obs['id'], azimuth=obs['azimuth'],
zenith=obs['zenith'], sensorId=obs['sensorId'],
date=obs['date']
), commit
)
def updateSensor(self, table, sensor, commit=True):
"""
Updates sensors information. Sensor information should already exist into
the database.
:param table: (str) sensors' table name.
:param sensor: (Sensor) sensor object.
:param commit: (bool) commit addition to database.
"""
if self.isConnected():
coord = ",".join(map(str, sensor['coordinates']))
self.query(
self.gen.updateSensor(
table=table, epsg=sensor['epsg'], sensorId=sensor['id'],
coord=coord, onDate=sensor['activation_date'],
status=sensor['status'], name=sensor['name'],
offDate=sensor['deactivation_date']
), commit
)
def createShootersTable(self, tablename, commit=True):
"""
Creates the shooters' table. Method should be invoked from settings module.
:para tablename: (str) shooters' table name (default from settings).
:param commit: (bool) commit table creation to the database.
"""
if self.isConnected() and not self.tableExists(tablename):
self.query(self.gen.createShootersTable(tablename), commit)
return self.tableExists(tablename)
return False
def dropShootersTable(self, tablename, commit=True):
"""
Drops shooters' table. Method should be invoked from settings module.
:para tablename: (str) shooters' table name (default from settings).
:param commit: (bool) commit table creation to the database.
"""
if self.isConnected() and self.tableExists(tablename):
self.query(self.gen.dropTable(tablename), commit)
return not self.tableExists(tablename)
return False
def tableExists(self, tablename):
"""
Verifies if table exists into database.
:param tablename: (str) table's name.
:return: (bool) whether table exists.
"""
return tablename in self.allTables()
```
#### File: CustomWidgets/PlottingWidgets/mayaViWidget.py
```python
from traits.api import HasTraits, Instance, on_trait_change
from traitsui.api import View, Item
from mayavi.core.ui.api import MayaviScene, MlabSceneModel, \
SceneEditorx
from PyQt5.QtWidgets import QWidget
class Visualization(HasTraits):
"""
Class 'as is' from MayaVi API example.
Source: http://docs.enthought.com/mayavi/mayavi/auto/example_qt_embedding.html
"""
scene = Instance(MlabSceneModel, ())
@on_trait_change('scene.activated')
def update_plot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
# We can do normal mlab calls on the embedded scene.
self.scene.mlab.test_points3d()
# the layout of the dialog screated
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
resizable=True # We need this to resize with the parent widget
)
class MayaViWidget(QWidget):
"""
A widget to embed MayaVi data visualization to GUI.
"""
def __init(self, parent=None):
"""
Class constructor.
:param parent: (QtWidgets) any QtWidgets object parent to new instance of MayaViWidget.
"""
super(MayaViWidget, self).__init__(parent)
self.parent = parent
```
#### File: Gui/CustomWidgets/summaryDialog.py
```python
import os
from PyQt5 import uic
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QDialog, QFileDialog
FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'summaryDialog.ui'))
class SummaryDialog(QDialog, FORM_CLASS):
def __init__(self, html=None, parent=None):
"""
Class constructor.
:param parent: (QWidget) any widget from Qt5 parent to this dialog.
"""
super(SummaryDialog, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.setHtml(html)
def setHtml(self, html):
"""
Sets HTML text to GUI.
:param html: (str) html text to be added.
"""
html = "" if html is None else html
self.summaryTextBrowser.setHtml(html)
def addToHtml(self, textToAdd):
"""
Appends text to existing one.
:param textToAdd: (str) text to be added to GUI.
"""
html = self.summaryTextBrowser.toHtml()
self.summaryTextBrowser.setHtml(html + textToAdd)
def clearHtml(self):
"""
Clears text from GUI.
"""
self.summaryTextBrowser.setHtml("")
def template(self):
"""
Gets summary template contents.
:return: (str) template's contents.
"""
with open(os.path.join(os.path.dirname(__file__), 'summaryTemplate.html'), 'r') as f:
return f.read()
def setSummary(self, method, dem, sensor, obs, shooters, elapsedTime):
"""
Sets summary to interface.
:param method: (str) name of method used to finding the shooter.
:param dem: (RasterLayer) DEM used.
:param sensor: (Sensor) sensor used for shooter detection.
:param obs: (Observation) observation detected by sensor.
:param shooters: (set) shooters found.
:param method: (str) method used for finding the shooter.
:param elapsedTime: (float) elapsed time in seconds.
"""
template = self.template()
xMin, xMax, yMin, yMax = dem.extents()
sensorY, sensorX, sensorZ = sensor['coordinates']
template = template.replace('SENSOR_Y', "{0}".format(sensorY))\
.replace('SENSOR_X', "{0}".format(sensorX))\
.replace('SENSOR_Z', "{0:.2f}".format(sensorZ))\
.replace('SENSOR_EPSG', "{0}".format(sensor['epsg']))\
.replace('AZIMUTH', "{0:.3f}".format(obs['azimuth']))\
.replace('ZENITH', "{0:.3f}".format(obs['zenith']))\
.replace('METHOD_NAME', method)\
.replace('EXEC_TIME', "{0:.3f} s".format(elapsedTime))\
.replace('RASTER_FILEPATH', dem.directory)\
.replace('RASTER_UNITS', self.tr('degrees') if dem.isGeographic() else self.tr('meters'))\
.replace('RASTER_CRS', dem.projection())\
.replace('SPATIAL_RESOLUTION', "{0:.2f} m".format(dem.spatialResolution()))\
.replace('ROW_COUNT', str(dem.width()))\
.replace('COL_COUNT', str(dem.height()))\
.replace('Y_AXIS', self.tr('Latitude') if dem.isGeographic() else self.tr('Northing'))\
.replace('X_AXIS', self.tr('Longitude') if dem.isGeographic() else self.tr('Easting'))\
.replace('Z_AXIS', self.tr('Altitude'))\
.replace('MIN_Y', str(yMin))\
.replace('MIN_X', str(xMin))\
.replace('MIN_Z', "{0:.2f}".format(dem.min()))\
.replace('MAX_Y', str(yMax))\
.replace('MAX_X', str(xMax))\
.replace('MAX_Z', "{0:.2f}".format(dem.max()))
shootersString = ""
for idx, shooter in enumerate(shooters):
row, col, h = shooter
y, x = dem.pixelToCoordinates(col, row)
shootersString += """
<tr>
<td style="text-align: center; width: 110%;"><strong>SH_NUMBER</strong></td>
<td style="text-align: center; width: 110%;">SH_COL</td>
<td style="text-align: center; width: 110%;">SH_ROW</td>
<td style="text-align: center; width: 110%;">SH_Y</td>
<td style="text-align: center; width: 110%;">SH_X</td>
<td style="text-align: center; width: 110%;">SH_HEIGHT</td>
</tr>
""".replace('SH_NUMBER', str(idx + 1))\
.replace('SH_COL', str(col))\
.replace('SH_ROW', str(row))\
.replace('SH_Y', str(y))\
.replace('SH_X', str(x))\
.replace('SH_HEIGHT', "{0:.2f}".format(h))
self.summaryTextBrowser.setHtml(template.replace('SHOOTERS_TABLE', shootersString))
@pyqtSlot(bool, name='on_savePushButton_clicked')
def saveHtml(self):
"""
Exports text.
:return: (str) output path.
"""
html = self.summaryTextBrowser.toHtml()
fd = QFileDialog()
filename = fd.getSaveFileName(caption=self.tr('Select a Path to Log'),filter=self.tr('HTML Files (*.html)'))
filename = filename[0] if isinstance(filename, tuple) else filename
if filename:
with open(filename, 'w', encoding='utf-8') as f:
f.write(html)
return filename
@pyqtSlot(bool, name='on_closePushButton_clicked')
def exit(self):
"""
Closes dialog.
"""
self.close()
```
#### File: jpesperidiao/MOPA/tests.py
```python
from Core.Sensor.sensorsManager import SensorsManager
from Core.Observation.observationsManager import ObservationsManager
from Settings.settings import Settings
from Core.ProcessingTools.rasterLayer import RasterLayer
from Core.ProcessingTools.shooterFinder import ShooterFinder
import numpy as np
from Core.enums import Enums
def generateObsAtPixel(dem, sensor, col, lin):
"""
Generates an observations at a given column / line of a raster.
:param col: (int) shooter's column.
:param lin: (int) shooter's line.
:return: (Observation) observation at col, pix.
"""
# handle outside points - put them back to the borders
if col > dem.width():
col = dem.width() - 1
elif col < 0:
col = 0
if lin > dem.height():
lin = dem.height() - 1
elif lin < 0:
lin = 0
y, x = dem.pixelToCoordinates(col, lin)
z = dem.bands()[col][lin]
sensorY, sensorX, sensorZ = sensor['coordinates']
dx, dy, dz = x - sensorX, y - sensorY, z - sensorZ
if dem.isGeographic():
# the more appropriate conversion would be a geodesic line - curve geometry
dx = np.deg2rad(dx) * Enums.EARTH_RADIUS
dy = np.deg2rad(dy) * np.pi * Enums.EARTH_RADIUS / 180
azimuth = np.rad2deg(np.arctan2(np.sqrt(dy ** 2 + dz ** 2), dx))
zenith = np.rad2deg(np.arctan2(dx, dz))
om = ObservationsManager()
om.addObservation(azimuth, zenith, sensor['id'])
return zenith, azimuth
s = Settings()
sm = SensorsManager(s)
sensor = sm.getSensorFromId(3)
om = ObservationsManager(s)
demPath = ".dev/testing_data/sf-23-z-b/rj_srtm_90m_sirgas2000-23s.tif"
dem = RasterLayer(demPath)
generateObsAtPixel(dem, sensor, 102, 101)
obs = om.getObservationsFromSensor(3)[17]
sf = ShooterFinder(s)
sf.findShooter(1, sensor, obs, dem)
print(om.getObservationsFromSensor(3))
``` |
{
"source": "jpetazzo/aiotone",
"score": 2
} |
#### File: aiotone/aiotone/profiling.py
```python
from __future__ import annotations
from typing import *
from contextlib import contextmanager, nullcontext
import cProfile
import pstats
SortKey = pstats.SortKey # type: ignore
@contextmanager
def profile() -> Iterator:
try:
with cProfile.Profile() as pr:
yield
finally:
st = pstats.Stats(pr).sort_stats(SortKey.CALLS)
st.print_stats()
st.sort_stats(SortKey.CUMULATIVE)
st.print_stats()
def maybe(toggle: bool) -> ContextManager:
if toggle:
return profile()
return nullcontext()
``` |
{
"source": "jpeter82/poker-player-c2h5oh",
"score": 3
} |
#### File: jpeter82/poker-player-c2h5oh/player.py
```python
class Player:
VERSION = "c2h5oh ver2"
# first bet round without communnity cards
def first_round(self, game_state):
if game_state['bet_index'] == 0:
bet = game_state['small_blind']
elif game_state['bet_index'] == 1:
bet = game_state['small_blind'] * 2
else:
if self.is_pair_in_hand(game_state):
bet = game_state['current_buy_in']
else:
bet = 0
# player_idx = game_state['in_action']
# bet = game_state['current_buy_in'] - game_state['players'][player_idx]['bet']
return bet
# 3 community cards
def second_round(self, game_state):
# bet = game_state['current_buy_in'] - game_state['players'][player_idx]['bet']
bet = game_state['current_buy_in']
return bet
# 4 community cards
def third_round(self, game_state):
# bet = game_state['current_buy_in'] - game_state['players'][player_idx]['bet']
bet = game_state['current_buy_in']
return bet
# 5 community cards
def fourth_round(self, game_state):
# bet = game_state['current_buy_in'] - game_state['players'][player_idx]['bet']
bet = game_state['current_buy_in']
return bet
def betRequest(self, game_state):
bet = 0
if len(game_state['community_cards']) == 0:
bet = self.first_round(game_state)
elif len(game_state['community_cards']) == 3:
bet = self.second_round(game_state)
elif len(game_state['community_cards']) == 4:
bet = self.third_round(game_state)
elif len(game_state['community_cards']) == 5:
bet = self.fourth_round(game_state)
# self.transform_hand(game_state)
return bet
def is_pair_in_hand(self, game_state):
player_idx = game_state['in_action']
return game_state['players'][player_idx]['hole_cards'][0]['rank'] == game_state['players'][player_idx]['hole_cards'][1]['rank']
def transform_hand(self, game_state):
modified_hand = []
player_idx = game_state['in_action']
for card in game_state['players'][player_idx]['hole_cards']:
if card['suit'] == 'clubs':
modified_suit = 'C'
elif card['suit'] == 'spades':
modified_suit = 'S'
elif card['suit'] == 'hearts':
modified_suit = 'H'
elif card['suit'] == 'diamonds':
modified_suit = 'D'
if card['rank'] == '10':
modified_rank = 'T'
else:
modified_rank = card['rank']
modified_hand.append(modified_suit + modified_rank)
return modified_hand
def showdown(self, game_state):
pass
``` |
{
"source": "jpeterik12/GM4_Resources",
"score": 2
} |
#### File: GM4_Resources/scripts/cit.py
```python
import pandas
import errno
import os
import cit_preprocess
NAMESPACE = "gm4"
CMD_PREFIXES = (0, 3420000) # legacy and registered prefixes
VANILLA_MODEL_PATH = "gm4_resources/assets/minecraft/optifine/cit"
DOC_ID = "1myt9FkMYkvyzpr9Uu7wXyzXNNreLQreWfWfP4CAGMQM"
DOC_SHEET = "ArmorCIT"
DOC_URL = f"https://docs.google.com/spreadsheets/d/{DOC_ID}/gviz/tq?tqx=out:csv&sheet={DOC_SHEET}"
def write_cit(path, content):
if not os.path.exists(os.path.dirname(f"{path}.properties")):
try:
os.makedirs(os.path.dirname(f"{path}.properties"))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(f"{path}.properties", "w+") as file:
file.write(content)
data = pandas.read_csv(DOC_URL)
data = cit_preprocess.preprocess_data(data)
data.dropna(subset = ["Item"], inplace=True)
for i, rows in data.groupby(["Item","Module","Name"]):
CMDList = rows["Index"].tolist()
CMDList = CMDList + [x + 3420000 for x in CMDList]
for row in rows.iterrows():
row = row[1]
texturebase = row['Item'].replace("_helmet","").replace("_chestplate","").replace("_leggings","").replace("_boots","").replace("golden","gold")
namebase = row['Name'].split('/')[-1].replace("_helmet","").replace("_chestplate","").replace("_leggings","").replace("_boots","").replace("golden","gold")
res = "type=armor\n"
res += f"matchItems={row['Item']}\n"
if (row["Layer 1"]): res += f"texture.{texturebase}_layer_1={namebase}_layer_1\n"
if (row["Overlay 1"] and "leather" in texturebase): res += f"texture.{texturebase}_layer_1_overlay={namebase}_layer_1_overlay\n"
if (row["Layer 2"]): res += f"texture.{texturebase}_layer_2={namebase}_layer_2\n"
if (row["Overlay 2"] and "leather" in texturebase): res += f"texture.{texturebase}_layer_2_overlay={namebase}_layer_2_overlay\n"
if len(CMDList) == 1: res += f"nbt.CustomModelData={CMDList[0]}\n"
else: res += f'nbt.CustomModelData=regex:({"|".join(str(x) for x in CMDList)})\n'
write_cit(f"{VANILLA_MODEL_PATH}/{row['Module']}/{row['Name']}", res)
OTHER_SHEET = "OtherCIT"
OTHER_URL = f"https://docs.google.com/spreadsheets/d/{DOC_ID}/gviz/tq?tqx=out:csv&sheet={OTHER_SHEET}"
data2 = pandas.read_csv(OTHER_URL)
data2.dropna(subset = ["Item"], inplace=True)
for i, rows in data2.groupby(["Type","Item","Name"]):
if i[0] == "elytra":
for row in rows.iterrows():
row = row[1]
res = f"type=elytra\nmatchItems=elytra\ntexture.elytra={row['Name'].split('/')[-1]}\nnbt.CustomModelData={row['Index'] + CMD_PREFIXES[1]}\n"
res += f"nbt.CustomModelData=regex:({row['Index']}|{row['Index'] + 3420000})\n"
write_cit(f"{VANILLA_MODEL_PATH}/{row['Module']}/{row['Name']}", res)
elif i[0] == "item":
CMDList = rows["Index"].tolist()
CMDList = CMDList + [x + 3420000 for x in CMDList]
for row in rows.iterrows():
row = row[1]
res = f"type=item\nmatchItems={row['Item']}\ntexture={row['Name'].split('/')[-1]}\nnbt.CustomModelData={row['Index'] + CMD_PREFIXES[1]}\n"
if len(CMDList) == 1: res += f"nbt.CustomModelData={CMDList[0]}\n"
else: res += f'nbt.CustomModelData=regex:({"|".join(str(x) for x in CMDList)})\n'
write_cit(f"{VANILLA_MODEL_PATH}/{row['Module']}/{row['Name']}", res)
``` |
{
"source": "j-petit/leipzig_hids",
"score": 2
} |
#### File: leipzig_hids/test/test_data_processing.py
```python
import pytest
import os
import pathpy
import subprocess
from .context import src
@pytest.mark.parametrize(
"test_syscall, parsed_syscall",
[
(
"1340 21:09:45.230382300 6 101 nginx 16840 > epoll_wait maxevents=512",
[
"1340",
"21:09:45.230382300",
"6",
"101",
"nginx",
"16840",
">",
"epoll_wait",
["maxevents=512"],
],
),
(
"446 21:09:43.368862035 6 101 nginx 16840 > recvfrom fd=13(<4t>172.17.0.1:44548->172.17.0.3:8080) size=1024",
[
"446",
"21:09:43.368862035",
"6",
"101",
"nginx",
"16840",
">",
"recvfrom",
["fd=13(<4t>172.17.0.1:44548->172.17.0.3:8080)", "size=1024"],
],
),
],
)
def test_parse_syscall(test_syscall, parsed_syscall):
result = src.data_processing.parse_syscall(test_syscall)
assert parsed_syscall[0] == result[0]
assert parsed_syscall[1] == result[1]
assert parsed_syscall[2] == result[2]
assert parsed_syscall[3] == result[3]
assert parsed_syscall[4] == result[4]
assert parsed_syscall[5] == result[5]
assert parsed_syscall[6] == result[6]
assert parsed_syscall[7] == result[7]
assert parsed_syscall[8] == result[8]
@pytest.mark.parametrize("run_file, parsed_results", [("test/mock_run.txt", [0, 22467, "futex"])])
def test_parse_run_to_pandas(run_file, parsed_results):
data = src.data_processing.parse_run_to_pandas(run_file)
assert data["time"].iloc[0] == parsed_results[0]
assert data["thread_id"].iloc[0] == parsed_results[1]
assert data["syscall"].iloc[0] == parsed_results[2]
@pytest.mark.parametrize(
"run_file, start_end_time, output_paths",
[
("test/mock_run.txt", None, [("futex",)]),
(
"test/mock_run_2.txt",
None,
[
("futex", "futex"),
("futex", "futex"),
("futex", "futex"),
("select", "sched_yield"),
(
"poll",
"fcntl",
"fcntl",
"accept",
"fcntl",
"getsockname",
"fcntl",
"fcntl",
"setsockopt",
"setsockopt",
"fcntl",
"setsockopt",
"setsockopt",
"mmap",
"mprotect",
),
],
),
(
"test/mock_run_2.txt",
(200000, 500000),
[("futex", "futex"), ("select", "sched_yield"), ("futex", "futex")],
),
],
)
def test_generate_paths_from_threads(run_file, start_end_time, output_paths):
paths = src.data_processing.generate_paths_from_threads(
src.data_processing.parse_run_to_pandas(run_file), start_end_time
)
assert len(output_paths) == paths.observation_count
for test_path in output_paths:
assert test_path in paths.paths[len(test_path) - 1]
@pytest.mark.skip(reason="Full test only when manual trigger. Takes long.")
def test_full_pipeline():
"Testing the full project pipeline with a minimal working example."
subprocess.call(
[
"python",
"run.py",
"-u",
"with",
"stages.pull_data=True",
"stages.make_temp_paths=True",
"stages.create_model=True",
"model.train_examples=10",
"simulate.normal_samples=3",
"simulate.attack_samples=3",
]
)
assert True
``` |
{
"source": "j-petit/temporal_graph",
"score": 2
} |
#### File: j-petit/temporal_graph/run.py
```python
import logging
import pdb
import logging.config
import yaml
import sacred
import pprint
import os
import datetime
import dotenv
import multiprocessing
import pandas as pd
from sacred import Experiment
from sacred.observers import MongoObserver
from src.get_data import get_dataset
from src.data_loader import occurances_entity
from src.run_training import run_training
from src.utils import plot_occurances
ex = sacred.Experiment("temp_graph")
ex.add_config(yaml.load("config/config.yaml", yaml.SafeLoader))
dotenv.load_dotenv(".env")
URI = "mongodb://{}:{}@172.16.17.32/?authSource=hids&authMechanism=SCRAM-SHA-1".format(
os.environ["SACRED_MONGODB_USER"], os.environ["SACRED_MONGODB_PWD"]
)
ex.observers.append(MongoObserver(url=URI, db_name="hids"))
@ex.command(unobserved=True)
def print_config(_config):
""" Replaces print_config which is not working with python 3.8 and current packages sacred"""
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(_config)
@ex.config
def config(c_data, c_results, c_model):
c_data["raw"] = os.path.join(c_data["prefix"], "raw")
c_data["processed"] = os.path.join(c_data["prefix"], "processed")
c_data["interim"] = os.path.join(c_data["prefix"], "interim")
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
cpu_count = multiprocessing.cpu_count()
c_results["output_path"] = os.path.join(c_results["prefix"], timestamp)
c_data["start_date"] = datetime.datetime.strptime(c_data["start_date"], "%Y_%m_%d")
c_data["end_date"] = datetime.datetime.strptime(c_data["end_date"], "%Y_%m_%d")
c_data["database"] = os.path.join(c_data["interim"], c_data["database"])
@ex.config_hook
def hook(config, command_name, logger):
if config["c_data"]["cluster"]:
os.system(f"rm -rf {config['c_data']['prefix']}")
config.update({"hook": True})
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", None)
pd.options.display.width = 0
os.makedirs(config["c_results"]["output_path"], exist_ok=True)
os.makedirs(config["c_data"]["processed"], exist_ok=True)
os.makedirs(config["c_data"]["interim"], exist_ok=True)
os.makedirs(config["c_data"]["raw"], exist_ok=True)
# logging.config.fileConfig("config/logging_local.conf")
log_config = yaml.load(open("config/logging.yaml", "r"), yaml.SafeLoader)
for handler in log_config["handlers"].values():
if "filename" in handler.keys():
handler["filename"] = os.path.join(
config["c_results"]["output_path"], handler["filename"]
)
logging.config.dictConfig(log_config)
return config
@ex.post_run_hook
def clean_up(c_data, _log):
if c_data["cluster"]:
_log.info("Copying database back...")
os.system(f"cp {c_data['database']} ./data/interim/")
_log.info("Removing temp files...")
os.system(f"rm -rf {c_data['prefix']}")
@ex.automain
def run(hook, _config, c_stages, c_results, _run):
logger = logging.getLogger("temp_graph." + os.path.basename(os.path.splitext(__file__)[0]))
logger.info(_config["timestamp"])
if c_stages["get_data"]:
get_dataset(_config)
if c_stages["train"]:
run_training(_config)
ex.add_artifact(os.path.join(c_results["output_path"], "general.log"))
```
#### File: temporal_graph/src/run_training.py
```python
import sys
from torch.utils.data import DataLoader
import torch
import tqdm
from src.data_loader import NewsEntityDataset
# to be able to improt tgn content
sys.path.append("./src/tgn")
from model.tgn import TGN
def run_training(config):
c_tgn = config["c_tgn"]
data = NewsEntityDataset(config["c_data"]["database"], config["c_model"]["predict_horizon"])
dataset_loader = DataLoader(data, batch_size=20, shuffle=False, num_workers=6)
torch.multiprocessing.set_sharing_strategy("file_system")
# See https://github.com/twitter-research/tgn/blob/master/train_supervised.py how to continue from here
``` |
{
"source": "jpetrov/pythonexamples",
"score": 4
} |
#### File: jpetrov/pythonexamples/calculator.py
```python
import math
# Calculator functions
# Addition
def calc_add(x, y):
return x + y
# Subtraction
def calc_subtract(x, y):
return x - y
# Multiplication
def calc_multiply(x, y):
return x * y
# Division
def calc_divide(x, y):
return x / y
# Exponentiation
def calc_exponent(x, y):
return x ** y
# Square root
def calc_sqrt(x):
return math.sqrt(x)
print('Welcome to the calculator v.1.0\n')
print('Operators\n')
print('+ = add, - = subtract, * = multiply, / = divide, ^ raise to power, s = square root\n')
choice = 'y'
while choice == 'y' or choice == 'Y':
number1 = float(input("x = "))
number2 = float(input("y = "))
operator = str(input("Operator:"))
if operator == '+':
result = calc_add(number1, number2)
elif operator == '-':
result = calc_subtract(number1, number2)
elif operator == '/':
result = calc_divide(number1, number2)
elif operator == '*':
result = calc_multiply(number1, number2)
elif operator == '^':
result = calc_exponent(number1, number2)
elif operator == "s":
result = calc_sqrt(number1)
else:
print('Unknown operator')
result = 0
print('The result is: ' + str(result))
choice = str(input('Would you like to make another calculation? For yes enter y :'))
``` |
{
"source": "jpetrucciani/pybugsnag",
"score": 3
} |
#### File: pybugsnag/models/error.py
```python
class PyBugsnagException(Exception):
"""base pybugsnag exception class"""
def __init__(self, *args, **kwargs):
extra = ""
if args:
extra = '\n| extra info: "{extra}"'.format(extra=args[0])
print(
"[{exception}]: {doc}{extra}".format(
exception=self.__class__.__name__, doc=self.__doc__, extra=extra
)
)
Exception.__init__(self, *args, **kwargs)
class RateLimited(PyBugsnagException):
"""request received a 429 - you are currently rate limited"""
```
#### File: pybugsnag/test/conftest.py
```python
import pytest
from pybugsnag.test.helpers import dbg
@pytest.fixture(scope="session", autouse=True)
def before_all(request):
"""test setup"""
dbg("[+] begin pybugsnag tests")
request.addfinalizer(after_all)
def after_all():
"""tear down"""
dbg("[+] end pybugsnag tests")
```
#### File: pybugsnag/test/helpers.py
```python
import json
import sys
from colorama import init, Fore, Style
init()
def dbg(text):
"""debug printer for tests"""
if isinstance(text, dict):
text = json.dumps(text, sort_keys=True, indent=2)
caller = sys._getframe(1)
print("")
print(Fore.GREEN + Style.BRIGHT)
print("----- {} line {} ------".format(caller.f_code.co_name, caller.f_lineno))
print(text)
print("-----")
print(Style.RESET_ALL)
print("")
```
#### File: pybugsnag/test/test_pybugsnag.py
```python
from datetime import datetime
from pybugsnag.models import (
Collaborator,
Error,
Event,
EventField,
Organization,
Pivot,
Project,
Release,
)
from pybugsnag.models.client import test_client
CLIENT = test_client()
def is_list_of_type(check_list, check_type):
"""helper function for checking if it's a list and of a specific type"""
assert isinstance(check_list, list)
assert isinstance(check_list[0], check_type)
return True
def test_organizations():
"""testing accessing organizations"""
organizations = CLIENT.organizations
assert organizations
assert isinstance(organizations, list)
organization = organizations[0]
assert organization
assert isinstance(organization, Organization)
found_organization = CLIENT.get_organization(organization.id)
assert isinstance(found_organization, Organization)
assert isinstance(organization.created_at, datetime)
assert isinstance(organization.updated_at, datetime)
assert organization.admins_count == 5
assert is_list_of_type(organization.collaborators, Collaborator)
assert is_list_of_type(organization.projects, Project)
found_collaborator = organization.get_collaborator(organization.collaborators[0].id)
assert isinstance(found_collaborator, Collaborator)
assert "<pybugsnag.Collaborator" in str(found_collaborator)
assert "<pybugsnag.Organization" in str(organization)
def test_projects():
"""testing features around projects"""
organization = CLIENT.organizations[0]
assert organization
assert is_list_of_type(organization.projects, Project)
project = organization.projects[0]
found_project = CLIENT.get_project(project.id)
assert isinstance(found_project, Project)
assert isinstance(project.created_at, datetime)
assert isinstance(project.updated_at, datetime)
errors = project.get_errors()
assert is_list_of_type(errors, Error)
error_found = project.get_error(errors[0].id)
assert isinstance(error_found, Error)
events = project.get_events()
assert is_list_of_type(events, Event)
event_found = project.get_event(events[0].id)
assert isinstance(event_found, Event)
releases = project.get_releases()
assert is_list_of_type(releases, Release)
# TODO reimplement when the apiary is fixed
# release_found = project.get_release(releases[0].id)
# assert isinstance(release_found, Release)
trend_buckets = project.get_trend_buckets()
assert is_list_of_type(trend_buckets, dict)
trend_resolution = project.get_trend_resolution()
assert is_list_of_type(trend_resolution, dict)
pivots = project.get_pivots()
assert is_list_of_type(pivots, Pivot)
event_fields = project.get_event_fields()
assert is_list_of_type(event_fields, EventField)
assert "<pybugsnag.Release" in str(releases[0])
assert "<pybugsnag.EventField" in str(event_fields[0])
def test_errors():
"""testing features around errors"""
organization = CLIENT.organizations[0]
assert organization
assert isinstance(organization.projects, list)
project = organization.projects[0]
assert isinstance(project, Project)
errors = project.get_errors()
assert is_list_of_type(errors, Error)
error = errors[0]
events = error.get_events()
assert is_list_of_type(events, Event)
assert isinstance(events[0].project, Project)
assert isinstance(error.first_seen, datetime)
assert isinstance(error.last_seen, datetime)
assert isinstance(error.first_seen_unfiltered, datetime)
latest_event = error.get_latest_event()
assert isinstance(latest_event, Event)
found_event = error.get_event(latest_event.id)
assert isinstance(found_event, Event)
trend_buckets = error.get_trend_buckets()
assert is_list_of_type(trend_buckets, dict)
trend_resolution = error.get_trend_resolution()
assert is_list_of_type(trend_resolution, dict)
pivots = error.get_pivots()
assert is_list_of_type(pivots, Pivot)
assert "<pybugsnag.Error" in str(error)
assert "<pybugsnag.Pivot" in str(pivots[0])
def test_events():
"""testing features around events"""
organization = CLIENT.organizations[0]
assert organization
assert isinstance(organization.projects, list)
project = organization.projects[0]
assert isinstance(project, Project)
errors = project.get_errors()
assert is_list_of_type(errors, Error)
error = errors[0]
events = error.get_events()
assert is_list_of_type(events, Event)
event = events[0]
assert isinstance(event.received_at, datetime)
assert "<pybugsnag.Event" in str(event)
```
#### File: pybugsnag/utils/text.py
```python
import re
from datetime import datetime
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
DATE_FORMAT_MILLIS = "%Y-%m-%dT%H:%M:%S.%fZ"
FIRST_CAP = re.compile("(.)([A-Z][a-z]+)")
ALL_CAP = re.compile("([a-z0-9])([A-Z])")
LOCALS_FILTER = ["self", "kwargs"]
def snakeify(text):
"""camelCase to snake_case"""
first_string = FIRST_CAP.sub(r"\1_\2", text)
return ALL_CAP.sub(r"\1_\2", first_string).lower()
def filter_locals(local_variables, extras=None):
"""filters out builtin variables in the local scope and returns locals as a dict"""
var_filter = LOCALS_FILTER.copy()
if extras and isinstance(extras, list):
var_filter += extras
return {
x: local_variables[x]
for x in local_variables
if local_variables[x] is not None and x not in var_filter
}
def datetime_to_iso8601(date_object, milliseconds=False):
"""converts a datetime to the time format bugsnag wants"""
return date_object.strftime(DATE_FORMAT_MILLIS if milliseconds else DATE_FORMAT)
def iso8601_to_datetime(date_string, milliseconds=False):
"""converts a iso8601 string to a python datetime"""
return datetime.strptime(
date_string, DATE_FORMAT_MILLIS if milliseconds else DATE_FORMAT
)
def dict_to_query_params(params):
"""given a dictionary of query params, form the query param string"""
if not params:
return ""
for param in params:
if isinstance(params[param], bool):
params[param] = str(params[param]).lower()
elif isinstance(params[param], datetime):
params[param] = datetime_to_iso8601(params[param])
return "?{}".format("&".join(["{}={}".format(x, params[x]) for x in params]))
``` |
{
"source": "jpetrucciani/python-duckduckgo",
"score": 3
} |
#### File: jpetrucciani/python-duckduckgo/ddg3.py
```python
import urllib.parse
import requests
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from typing import Optional
__version__ = "VERSION"
__useragent__ = "ddg3 {__version__}"
class Results:
"""ddg results object"""
def __init__(self, xml: Element) -> None:
"""constructor"""
self.type = {
"A": "answer",
"D": "disambiguation",
"C": "category",
"N": "name",
"E": "exclusive",
"": "nothing",
}[xml.findtext("Type", "")]
self.api_version = xml.attrib.get("version", None)
self.heading = xml.findtext("Heading", "")
self.answer: Optional[Answer] = None
self.image: Optional[Image] = None
try:
self.results = [Result(elem) for elem in xml.getiterator("Result")] # type: ignore
self.related = [
Result(elem) for elem in xml.getiterator("RelatedTopic") # type: ignore
]
except AttributeError:
self.results = [Result(elem) for elem in xml.iter("Result")]
self.related = [Result(elem) for elem in xml.iter("RelatedTopic")]
self.abstract = Abstract(xml)
answer_xml = xml.find("Answer")
if answer_xml is not None:
self.answer = Answer(answer_xml)
if not self.answer.text:
self.answer = None
else:
self.answer = None
image_xml = xml.find("Image")
if image_xml is not None and image_xml.text:
self.image = Image(image_xml)
else:
self.image = None
class Abstract:
"""ddg abstract object"""
def __init__(self, xml: Element) -> None:
"""constructor"""
self.html = xml.findtext("Abstract", "")
self.text = xml.findtext("AbstractText", "")
self.url = xml.findtext("AbstractURL", "")
self.source = xml.findtext("AbstractSource")
class Result:
"""ddg result object"""
def __init__(self, xml: Element) -> None:
"""constructor"""
self.html = xml.text
self.text = xml.findtext("Text")
self.url = xml.findtext("FirstURL")
self.icon: Optional[Image] = None
icon_xml = xml.find("Icon")
if icon_xml is not None:
self.icon = Image(icon_xml)
else:
self.icon = None
class Image:
"""ddg image object"""
def __init__(self, xml: Element) -> None:
"""constructor"""
self.url = xml.text
self.height = xml.attrib.get("height", None)
self.width = xml.attrib.get("width", None)
class Answer:
"""ddg answer object"""
def __init__(self, xml: Element) -> None:
"""constructor"""
self.text = xml.text
self.type = xml.attrib.get("type", "")
def query(query_text: str, useragent: str = __useragent__) -> Results:
"""
Query Duck Duck Go, returning a Results object.
Here's a query that's unlikely to change:
>>> result = query('1 + 1')
>>> result.type
'nothing'
>>> result.answer.text
'1 + 1 = 2'
>>> result.answer.type
'calc'
"""
params = urllib.parse.urlencode({"q": query_text, "o": "x"})
url = f"http://api.duckduckgo.com/?{params}"
request = requests.get(url, headers={"User-Agent": useragent})
response = request.text
xml = ElementTree.fromstring(response)
return Results(xml)
def main() -> None:
"""main function for when run as a cli tool"""
import sys
from optparse import OptionParser
parser = OptionParser(
usage="usage: %prog [options] query", version=f"ddg3 {__version__}"
)
parser.add_option(
"-o",
"--open",
dest="open",
action="store_true",
help="open results in a browser",
)
parser.add_option(
"-n", dest="n", type="int", default=3, help="number of results to show"
)
parser.add_option(
"-d", dest="d", type="int", default=None, help="disambiguation choice"
)
(options, args) = parser.parse_args()
q = " ".join(args)
if options.open:
import webbrowser
query_url = urllib.parse.urlencode(dict(q=q))
webbrowser.open(f"http://duckduckgo.com/?{query_url}", new=2)
sys.exit(0)
results = query(q)
if options.d and results.type == "disambiguation":
try:
related = results.related[options.d - 1]
except IndexError:
print("Invalid disambiguation number.")
sys.exit(1)
results = query(related.url.split("/")[-1].replace("_", " "))
if results.answer and results.answer.text:
print(f"Answer: {results.answer.text}\n")
elif results.abstract and results.abstract.text:
print(f"{results.abstract.text}\n")
if results.type == "disambiguation":
print(
f"""
'{q}' can mean multiple things. You can re-run your query
and add '-d #' where '#' is the topic number you're
interested in.
""".replace(
"\n", ""
).replace(
"\t", " "
)
)
for i, related in enumerate(results.related[0 : options.n]):
name = related.url.split("/")[-1].replace("_", " ")
summary = related.text
if len(summary) < len(related.text):
summary += "..."
print(f"{i+1}. {name}: {summary}\n")
else:
for i, result in enumerate(results.results[0 : options.n]):
if result.text:
summary = result.text[0:70].replace(" ", " ")
if len(summary) < len(result.text):
summary += "..."
print(f"{i + 1}. {summary}")
print(f" <{result.url}>\n")
if __name__ == "__main__":
main()
``` |
{
"source": "j-pettit/pfinance",
"score": 2
} |
#### File: j-pettit/pfinance/noxfile.py
```python
import nox
@nox.session
def lint(session):
targets = ['pfinance', 'tests', 'noxfile.py']
session.install('flake8')
session.run('flake8', *targets)
@nox.session
def tests(session):
session.install('pytest')
session.run('pytest', '-v')
```
#### File: pfinance/pfinance/general.py
```python
import math
def simple_interest(principle: float, interest_rate: float, periods: int) -> float:
'''
Returns the total value of an investment earning simple interest.
Parameters:
principle (float): Present value of the investment
interest_rate (float): Interest rate per period, e.g. year
periods (int): Term of the investment, e.g. years
Returns:
future_value (float): Value of the investment after the term
'''
interest = interest_rate * periods
return principle * (1 + interest)
def compound_interest(principle: float, interest_rate: float, periods: int, compounding_frequency: int = 1) -> float:
'''
Returns the total value of an investment earning compound interest.
Parameters:
principle (float): Present value of the investment
interest_rate (float): Interest rate per period, e.g. year
periods (int): Term of the investment, e.g. years
compounding_frequency (int): Number of compoundings that occur per period, default 1
Returns:
future_value (float): Value of the investment after the term
'''
return principle * (1 + effective_interest(interest_rate, compounding_frequency)) ** periods
def effective_interest(nominal_rate: float, periods: int) -> float:
'''
Returns the effective annual interest rate.
Parameters:
nominal_rate (float): The nominal interest rate (i.e. APR)
periods (int): The number of compounding periods per year
Returns:
effective_interest: The effective interest rate
'''
return (1 + nominal_rate / periods) ** periods - 1
def loan_payment(principal: float, interest_rate: float, payment_frequency: int, term: int, down_payment: float = 0) -> float:
'''
Returns the periodic payment required to repay a loan accruing compound interest.
Parameters:
principal (float): Initial value of the loan
interest_rate (float): Interest rate per period, e.g. year
payment_frequency (int): Number of payments and compoundings per period, e.g. year
term (int): Term of the loan in number of payments
down_payment (float): Amount paid towards the loan before interest, default 0
Returns:
periodic_payment (float): Period loan payment
'''
loan_amount = principal - down_payment
if interest_rate == 0:
return loan_amount / term
effective_rate = interest_rate / payment_frequency
return loan_amount * effective_rate * (1 + effective_rate) ** term / ((1 + effective_rate) ** term - 1)
def equivalent_interest_rate(present_value: float, future_value: float, periods: int) -> float:
'''
Returns the equivalent interest rate for the growth of an investment.
Parameters:
present_value (float): The present value of the investment
future_value (float): The future value of the investment
periods (int): The number of periods elapsed between the future and present value
Returns:
equivalent_interest_rate (float): The equivalent interest rate
'''
return (future_value / present_value) ** (1 / periods) - 1
def loan_payment_schedule(
principal: float,
interest_rate: float,
payment_frequency: int,
term: int,
down_payment: float = 0,
) -> dict[str, list[float]]:
'''
Returns the payment schedule for a loan.
Parameters:
principal (float): Initial value of the loan
interest_rate (float): Interest rate per period, e.g. year
payment_frequency (int): Number of payments and compoundings per period, e.g. year
term (int): Term of the loan in number of payments
down_payment (float): Amount paid towards the loan before interest, default 0
Returns:
loan_payment_schedule (dict):
principal_payment (list[float]): Portion of the loan payment used to pay the principal
interest_payment (list[float]): Portion of the loan payment used to pay the interest
remaining_balance (list[float]): Remaining loan balance after payment
'''
payment = loan_payment(principal, interest_rate, payment_frequency, term, down_payment)
loan_amount = principal - down_payment
principal_payment, interest_payment, remaining_balance = [], [], []
for _ in range(term):
interest_payment.append(loan_amount * interest_rate / payment_frequency)
principal_payment.append(payment - interest_payment[-1])
remaining_balance.append(loan_amount - principal_payment[-1])
loan_amount -= principal_payment[-1]
return {
'principal_payment': principal_payment,
'interest_payment': interest_payment,
'remaining_balance': remaining_balance,
}
def number_periods_loan(principal: float, interest_rate: float, payment: float) -> float:
'''
Returns the number of periods required to payback a loan with fixed payments.
Parameters:
principal (float): Initial value of the loan
interest_rate (float): Interest rate per period, e.g. year
payment (float): Constant payment per period, must be negative
Returns:
number_periods (float): Number of periods to payback the loan, -1 if loan can not be paid off
'''
# Check if loan can be paid back
if payment <= (principal * interest_rate):
return -1
return math.log10(payment / (payment - principal * interest_rate)) / math.log10(1 + interest_rate)
def sum_product(*args: list[float]) -> float:
'''
Returns the sum of lists multiplied by eachother. For example, [3, 5, 6, 1] and [4, 2, 7, 8] returns
(3 * 4) + (5 * 2) + (6 * 7) + (1 * 8) = 72.
Parameters:
*lst_args (list[float]): Any number of lists to be multiplied and summed together
Returns:
total_sum (float): Total sum of the lists multiplied together. None if lists are different lengths or no
lists passed in
'''
if len({len(i) for i in args}) != 1: # Use set comprehension to check if list lengths are same
return None
total_sum = 0
multiplied = 1
len_list = len(args[0])
for i in range(len_list):
for lst in args:
multiplied *= lst[i]
total_sum += multiplied
multiplied = 1
return total_sum
def sum_squares(vals: list[float]) -> float:
'''
Returns the sum of the square of each value in a list. For example, [5,2,1,3] gives 5^2 + 2^2 + 1^2 + 3^2 = 39.
Parameters:
vals (list[float]): List of values
Returns:
total_sum (float): Sum of squares of values in list.
'''
total_sum = 0
for val in vals:
total_sum += val * val
return total_sum
def factorial(num: int) -> int:
'''
Returns the factorial of an integer greater than or equal to 0.
Parameters:
num (int): Input number
Returns:
result (int): The factorial result
'''
if num == 0 or num == 1:
return 1
result = 1
for i in range(2, num + 1):
result *= i
return result
def sum_diff_squares(lst1: list[float], lst2: list[float]) -> float:
'''
Returns the sum of the difference of squares of the values in two lists with same size. For example, [5,2,3] and
[3,-1,4] gives (5^2 - 3^2) + (2^2 - (-1)^2) + (3^2 - 4^2) = 12.
Parameters:
lst1 (list[float]): List one of values
lst2 (list[float]): List two of values
Returns:
total_sum (float): Sum of difference of squares of values in lists.
'''
total_sum = 0
for i in range(len(lst1)):
total_sum += lst1[i] * lst1[i] - lst2[i] * lst2[i]
return total_sum
```
#### File: pfinance/pfinance/securities.py
```python
def bond_coupon_rate(face_value: float, payment: float, payment_rate: int = 1) -> float:
'''
Returns the coupon rate of a bond.
Parameters:
face_value (float): Par value of the bond at issue
payment (float): Periodic payment from the bond
payment_rate (int): Payments per interest period, default 1
Returns:
coupon_rate (float): Effective interest rate returned by the bond
'''
return (payment * payment_rate) / face_value
def norberts_gambit(
quantity: int,
purchase_price: float,
sale_price: float,
rate: float = 1,
purchase_commission: float = 0,
sale_commission: float = 0,
) -> dict[str, float]:
'''
Returns the converted value and capital gain of an execution of Norbert's Gambit.
Parameters:
quantity (int): Number of securities transacted
purchase_price (float): Unit price of the purchased securities
sale_price (float): Unit price of the sold securities
rate (float): Exchange rate between the purchasing currency and the sale currency expressed as a multiplier of the
purchasing currency, default 1
purchase_commission (float): Commission paid in the purchasing currency on the purchase transaction, default 0
sale_commission (float): Commission paid in the sale currency on the sale transaction, default 0
Returns:
gambit_result (dict):
base_value (float): Final value of the conversion expressed in the purchase currency
base_gain (float): Capital gain of the conversion expressed in the purchase currency
converted_value (float): Final value of the conversion expressed in the sale currency
converted_gain (float): Capital gain of the conversion expressed in the sale currency
'''
initial_value = quantity * purchase_price - purchase_commission
final_value = quantity * sale_price - sale_commission
return {
'base_value': final_value / rate,
'base_gain': (final_value / rate) - initial_value,
'converted_value': final_value,
'converted_gain': final_value - (initial_value * rate),
}
def alpha(actual_rate_of_return: float, expected_rate_of_return: float) -> float:
'''
Returns the alpha coefficient for a given actual and expected rate of return.
Parameters:
actual_rate_of_return (float): Rate of return achieved
expected_rate_of_return (float): Rate of return expected
Returns:
alpha (float): Highest return possible from a minimum amount of investment risk
'''
return actual_rate_of_return - expected_rate_of_return
def expected_rate_of_return(risk_free_rate: float, beta: float, market_risk_premium: float) -> float:
'''
Returns the expected rate of return.
Parameters:
risk_free_rate (float): Theoretical rate of return for an investment with 0 risk
beta (float): Volatility of an investment compared to the market as a whole
market_risk_premium (float): Premium on return paid for risk on an investment
Returns:
expected_rate_of_return (float): Rate of return expected for an investment
'''
return risk_free_rate + beta * market_risk_premium
class adjusted_cost_base:
'''
Represents an adjusted cost base tracker
Methods:
buy(quantity, unit_price, commission): Records a purchase transaction
sell(quantity, unit_price, commission): Records a sale transaction
get_acb(): Returns the adjusted cost base of the position
'''
def __init__(self):
'''
Constructs the necessary attributes for the adjusted cost base tracker object.
'''
self._book_value = 0
self._acb = 0
self._shares = 0
def buy(self, quantity: int, unit_price: float, commission: float = 0):
'''
Records a purchase transaction.
Parameters:
quantity (int): Number of securities purchased
unit_price (float): Price per security paid
commission (float): Commission paid in the transaction, default 0
'''
self._shares += quantity
self._book_value += quantity * unit_price + commission
self._acb = self._book_value / self._shares
def sell(self, quantity: int, unit_price: float, commission: float = 0) -> float:
'''
Records a sale transaction.
Parameters:
quantity (int): Number of securities sold
unit_price (float): Price per security received
commission (float): Commission paid in the transaction, default 0
Returns:
capital_gain (float): The capital gain on the sale
'''
self._shares -= quantity
capital_gain = (quantity * unit_price - commission) - (quantity * self._acb)
if self._shares == 0:
self._book_value = 0
self._acb = 0
else:
self._book_value -= quantity * self._acb
return capital_gain
def get_acb(self) -> float:
'''
Returns the adjusted cost base of the position
Returns:
acb (float): Adjusted cost base of the position
'''
return self._acb
``` |
{
"source": "JPETTomography/j-pet-roi-detector",
"score": 3
} |
#### File: JPETTomography/j-pet-roi-detector/lib.py
```python
import numpy as np
# import pickle
import os
# import matplotlib.pyplot as plt
# import random
# import itertools
# Różne przydatne metody
def pointsInRadius(sPoint, radius):
"""
Tworzy zbiór (listę) punktów odległych od punktu środkowego sPoint o promień radius.
Fizycznie będzie to zbiór punktów płaszczyzny kwadratu o boku 2*radius + 1, w którego centroidzie znajduje się sPoint.
"""
points = []
if radius == 0:
points = [sPoint]
else:
# Przypadek 2D
if len(sPoint) == 2:
# Dodanie pionowych
for row in [sPoint[0] - radius, sPoint[0] + radius]:
[points.append([row, col]) for col in range(sPoint[1] - radius, sPoint[1] + radius + 1)]
# Dodanie poziomych
for col in [sPoint[1] - radius, sPoint[1] + radius]:
[points.append([row, col]) for row in range(sPoint[0] - radius + 1, sPoint[0] + radius)]
# Przypadek 3D
else:
z = sPoint[2]
for row in [sPoint[0] - radius, sPoint[0] + radius]:
[points.append([row, col, z]) for col in range(sPoint[1] - radius, sPoint[1] + radius + 1)]
for col in [sPoint[1] - radius, sPoint[1] + radius]:
[points.append([row, col, z]) for row in range(sPoint[0] - radius + 1, sPoint[0] + radius)]
for z in [sPoint[2] - radius, sPoint[2] + radius]:
for row in range(sPoint[0] - radius, sPoint[0] + radius + 1):
for col in range(sPoint[1] - radius, sPoint[1] + radius + 1):
points.append([row, col, z])
return points
def belongsToArr(p, arr):
"""
Sprawdza czy punkt lub zbiór punktów należy(należą) do macierzy.
Jeśli p: punkt (2D lub 3D) to:
True, jeśli punkt należy do macierzy
False wpp
Jeśli p: zbiór punktów (2D lub 3D) to:
True, jeśli PRZYNAJMNIEJ JEDEN punkt należy do macierzy
False, jeśli ŻADEN punkt nie należy do macierzy
"""
p = np.array(p)
if len(p.shape) == 1:
# punkt
if len(p) == 2: # 2D
# Dorobić wyjątki gdy wymiary się nie zgadzają
logic = p[0] >= 0 and p[0] < arr.shape[0] and p[1] >= 0 and p[1] < arr.shape[1]
if logic:
return True
else:
return False
else: # 3D
logic = p[0] >= 0 and p[0] < arr.shape[0] and p[1] >= 0 and p[1] < arr.shape[1] and p[2] >= 0 and p[2] < arr.shape[2]
if logic:
return True
else:
return False
else:
# zbiór punktów. Sprawdzamy, czy choć jeden punkt/woksel jest na macierzy
if len(p[0]) == 2: # 2D
for point in p:
logic = point[0] >= 0 and point[0] < arr.shape[0] and point[1] >= 0 and point[1] < arr.shape[1]
if logic:
return True
return False
else: # 3D
for point in p:
logic = point[0] >= 0 and point[0] < arr.shape[0] and point[1] >= 0 and point[1] < arr.shape[1] and point[2] >= 0 and point[2] < arr.shape[2]
if logic:
return True
return False
def points2Img3D(rawData):
"""
Konwertuje wolumem 3D z postaci N x {x,y,z,f(x,y,z)} do postaci sqrt(Nx) x sqrt(Ny) x Nz
"""
ind = np.lexsort((rawData[:,2], rawData[:,1], rawData[:,0])) # Sortowanie danych
rawData = rawData[ind]
Nx = len(set(rawData[:,0]))
Ny = len(set(rawData[:,1]))
Nz = len(set(rawData[:,2]))
licz = 0
img3D = np.zeros((Nx,Ny,Nz))
for i in range(Nx):
for j in range(Ny):
for k in range(Nz):
img3D[Ny-1-j,i,k] = rawData[licz,3] # Przekopiowanie danych
licz += 1
return img3D
def getListOfFiles(dirName):
"""
Tworzy listę wszystkich plików w danej lokalizacji
"""
allFiles = []
for root, dirs, files in os.walk(dirName):
for name in files:
allFiles.append(os.path.join(root, name))
return allFiles
#########
# STAŁE #
#########
KROTKIE_INFO = """
Program SegmentujObrazyJPET
by <NAME> 2019
Poprawne wywołanie segmentacji:
python SegmentujObrazyJPET.py [yen | otsu-region | otsu-iter] [ścieżka_do_pliku_z_rekonstrukcją]
Żeby uruchomić pełną wersję programu wraz z uzyskaniem informacji na temat algorytmów segmentacji wprowadź
python SegmentujObrazyJPET.py
"""
TEKST_POWITALNY = """
Witamy w programie SegmentujObrazyJPET
by <NAME> 2019
OPIS PROGRAMU
Program jest narzędziem służącym do segmentacji obrazów 3D (wolumenów)
pochodzących z rekonstrukcji obrazowania tomografem pozytonowym.
Segmentacja może być wkonana za pomocą następujących algorytmów:
- algorytm progujący Yena: alg='yen-thresh'
- algorytm Region Growing: 'alg=region-growing'
- algorytm Region Growing wykorzystujący próg Yena jako punkt startowy: 'yen-region
- iteracyjny algorytm Otsu: 'otsu-iter'
- wieloprogowy algorytm Otsu: 'otsu-multi'
Szczegółowe informacje można uzyskać wpisując poniżej polecenie:
info [yen | otsu-region | otsu-iter]
INSTRUKCJA UŻYCIA
Aby wykonać segmentację wpisz polecenie
run [yen | otsu-region | otsu-iter] [ścieżka_do_pliku_z_rekonstrukcją]
Plik z rekonstrukcją może być typu tekstowego (.txt) lub jako macierz
zapisana do pliku .pckl. Dane w pliku muszą być ułożone w następujących
konfiguracjach:
- macierz Nx4, gdzie kolumny to x,y,z,f(x,y,z)
- macierz trójwymiarowa KxLxM, gdzie dla każdego punktu w przestrzeni
(x,y,z) przydzielona jest wartość emisyjności.
Aby zakończyć pracę z programem wpisz polecenie
exit
lub ctrl+c.
"""
INFO = """
Możliwe polecenia:
info [yen | otsu-region | otsu-iter]
run [yen | otsu-region | otsu-iter] [ścieżka_do_pliku_z_rekonstrukcją]
exit
"""
INFO_INFO = """ Informacje o algorytmie można uzyskać wpisując
info [yen | otsu-region | otsu-iter]
"""
INFO_RUN = """ Segmentację wykonuje się wpisując
run [yen | otsu-region | otsu-iter] [ścieżka_do_pliku_z_rekonstrukcją]
gdzie ścieżka musi mieć rozszerzenie .txt lub .pckl
"""
INFO_YEN = """
Algorytm segmentacji Yen
"""
INFO_YEN_REGION = """
Algorytm Region Growing w połączeniu z progowaniem Yen'a
"""
INFO_OTSU_ITER = """
Algorytm Otsu iteracyjny"""
INFO_ALG = """lelelel"""
INFO_DATA = """dATA"""
INFO_SAVE_PICKLE = """pickles"""
INFO_SAVE_SLICES = "SLICES"
``` |
{
"source": "jpetto/olympia",
"score": 2
} |
#### File: management/commands/fix_let_scope_bustage.py
```python
from django.core.management.base import BaseCommand, CommandError
from olympia.amo.utils import chunked
from olympia.files.tasks import fix_let_scope_bustage_in_addons
class Command(BaseCommand):
args = '<addon_id addon_id ...>'
help = """Fix the "let scope bustage" (bug 1224686) for a list of add-ons.
Only the last version of each add-on will be fixed, and its version bumped."""
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError('Please provide at least one add-on id to fix.')
addon_ids = [int(addon_id) for addon_id in args]
for chunk in chunked(addon_ids, 100):
fix_let_scope_bustage_in_addons.delay(chunk)
```
#### File: olympia/amo/feeds.py
```python
from django.contrib.syndication.views import Feed
from django.db.transaction import non_atomic_requests
from django.utils.decorators import method_decorator
class NonAtomicFeed(Feed):
"""
A feed that does not use transactions.
Feeds are special because they don't inherit from generic Django class
views so you can't decorate dispatch().
"""
@method_decorator(non_atomic_requests)
def __call__(self, *args, **kwargs):
return super(NonAtomicFeed, self).__call__(*args, **kwargs)
```
#### File: olympia/amo/signals.py
```python
import contextlib
from django.db import models
def flush_front_end_cache(sender, instance, **kwargs):
from . import tasks
furls = getattr(instance, 'flush_urls', None)
urls = furls() if hasattr(furls, '__call__') else furls
if urls:
tasks.flush_front_end_cache_urls.apply_async(args=[urls])
def _connect():
models.signals.post_save.connect(flush_front_end_cache)
models.signals.post_delete.connect(flush_front_end_cache)
def _disconnect():
models.signals.post_save.disconnect(flush_front_end_cache)
models.signals.post_delete.disconnect(flush_front_end_cache)
@contextlib.contextmanager
def hera_disabled():
_disconnect()
try:
yield
finally:
_connect()
```
#### File: amo/tests/test_install.py
```python
from olympia.amo.install import addons
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlparams
class InstallTests(TestCase):
def test_generic(self):
url = reverse('api.install')
r = self.client.get(urlparams(url,
addon_id='1',
addon_name='Status Watch'))
assert 'prompted to install Status Watch' in r.content
def test_byid(self):
url = reverse('api.install')
r = self.client.get(urlparams(url, addon_id='318202'))
assert 'prompted to install Twitter Address Search Bar' in r.content
def test_byname(self):
url = reverse('api.install')
r = self.client.get(urlparams(url, addon_key='prism'))
assert 'prompted to install Prism for Firefox' in r.content
def test_byidname(self):
url = reverse('api.install')
r = self.client.get(urlparams(url, addon_id='prism'))
assert 'prompted to install Prism for Firefox' in r.content
def test_redirect(self):
url = reverse('api.install')
r = self.client.get(urlparams(url, addon_id=424))
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], addons[424]['link'])
def test_bad_id(self):
url = reverse('api.install')
r = self.client.get(urlparams(url, addon_id='eleventy-one'))
self.assertEqual(r.status_code, 404)
def test_bad_key(self):
url = reverse('api.install')
r = self.client.get(urlparams(url, addon_key='unicorns'))
self.assertEqual(r.status_code, 404)
def test_xss(self):
url = reverse('api.install')
url = '{url}?{path}'.format(
url=url,
path='addon_id=252539%3C/script%3E%3CBODY%20ONLOAD=alert%28%27XSS'
'%27%29%3E&addon_name=F1%20by%20Mozilla%20Labs'
'&src=external-f1home')
r = self.client.get(url)
assert '<BODY' not in r.content
```
#### File: api/tests/test_jwt_auth.py
```python
from datetime import datetime, timedelta
import json
from django.conf import settings
from django.test import RequestFactory
import jwt
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from olympia.amo.tests import TestCase, WithDynamicEndpoints
from olympia.api.jwt_auth import handlers
from olympia.api.jwt_auth.views import JWTKeyAuthentication, JWTProtectedView
from olympia.api.models import APIKey, SYMMETRIC_JWT_TYPE
from olympia.users.models import UserProfile
class ProtectedView(JWTProtectedView):
"""
This is an example of a view that would be protected by JWT token auth.
"""
def get(self, request):
return Response('some get response')
def post(self, request):
return Response({'user_pk': request.user.pk})
class JWTAuthTester(TestCase):
def create_api_key(self, user, key='some-user-key', is_active=True,
secret='some-shared-secret', **kw):
return APIKey.objects.create(type=SYMMETRIC_JWT_TYPE,
user=user, key=key, secret=secret,
is_active=is_active, **kw)
def auth_token_payload(self, user, issuer):
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
return jwt_payload_handler(user, issuer)
def encode_token_payload(self, payload, secret):
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
return jwt_encode_handler(payload, secret)
def create_auth_token(self, user, issuer, secret):
payload = self.auth_token_payload(user, issuer)
return self.encode_token_payload(payload, secret)
class TestJWTProtectedView(WithDynamicEndpoints, JWTAuthTester):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestJWTProtectedView, self).setUp()
self.endpoint(ProtectedView)
self.client.logout() # just to be sure!
self.user = UserProfile.objects.get(email='<EMAIL>')
def request(self, method, *args, **kw):
handler = getattr(self.client, method)
return handler('/en-US/firefox/dynamic-endpoint', *args, **kw)
def jwt_request(self, token, method, *args, **kw):
return self.request(method,
HTTP_AUTHORIZATION='JWT {}'.format(token),
*args, **kw)
def test_get_requires_auth(self):
res = self.request('get')
assert res.status_code == 401, res.content
def test_post_requires_auth(self):
res = self.request('post', {})
assert res.status_code == 401, res.content
def test_can_post_with_jwt_header(self):
api_key = self.create_api_key(self.user)
token = self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
res = self.jwt_request(token, 'post', {})
assert res.status_code == 200, res.content
data = json.loads(res.content)
assert data['user_pk'] == self.user.pk
def test_api_key_must_be_active(self):
api_key = self.create_api_key(self.user, is_active=False)
token = self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
res = self.jwt_request(token, 'post', {})
assert res.status_code == 401, res.content
class TestJWTAuthHandlers(JWTAuthTester):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestJWTAuthHandlers, self).setUp()
self.user = UserProfile.objects.get(email='<EMAIL>')
def test_report_unknown_issuer(self):
token = self.create_auth_token(self.user, 'non-existant-issuer',
'some-secret')
with self.assertRaises(AuthenticationFailed) as ctx:
handlers.jwt_decode_handler(token)
assert ctx.exception.detail == 'Unknown JWT iss (issuer)'
def test_report_token_without_issuer(self):
payload = self.auth_token_payload(self.user, 'some-issuer')
del payload['iss']
token = self.encode_token_payload(payload, 'some-secret')
with self.assertRaises(AuthenticationFailed) as ctx:
handlers.jwt_decode_handler(token)
assert ctx.exception.detail == 'JWT iss (issuer) claim is missing'
def test_decode_garbage_token(self):
with self.assertRaises(jwt.DecodeError) as ctx:
handlers.jwt_decode_handler('}}garbage{{')
assert ctx.exception.message == 'Not enough segments'
def test_decode_invalid_non_ascii_token(self):
with self.assertRaises(jwt.DecodeError) as ctx:
handlers.jwt_decode_handler(u'<NAME>\u0107')
assert ctx.exception.message == 'Not enough segments'
def test_incorrect_signature(self):
api_key = self.create_api_key(self.user)
token = self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
decoy_api_key = self.create_api_key(
self.user, key='another-issuer', secret='another-secret')
with self.assertRaises(jwt.DecodeError) as ctx:
handlers.jwt_decode_handler(
token, get_api_key=lambda **k: decoy_api_key)
assert ctx.exception.message == 'Signature verification failed'
def test_expired_token(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['exp'] = (datetime.utcnow() -
settings.JWT_AUTH['JWT_EXPIRATION_DELTA'] -
timedelta(seconds=10))
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(jwt.ExpiredSignatureError):
handlers.jwt_decode_handler(token)
def test_missing_issued_at_time(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
del payload['iat']
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
handlers.jwt_decode_handler(token)
assert (ctx.exception.detail ==
'Invalid JWT: Token is missing the "iat" claim')
def test_invalid_issued_at_time(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
# Simulate clock skew:
payload['iat'] = (
datetime.utcnow() +
timedelta(seconds=settings.JWT_AUTH['JWT_LEEWAY'] + 10))
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
handlers.jwt_decode_handler(token)
assert ctx.exception.detail.startswith(
'JWT iat (issued at time) is invalid')
def test_missing_expiration(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
del payload['exp']
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
handlers.jwt_decode_handler(token)
assert (ctx.exception.detail ==
'Invalid JWT: Token is missing the "exp" claim')
def test_disallow_long_expirations(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['exp'] = (
datetime.utcnow() +
timedelta(seconds=settings.MAX_JWT_AUTH_TOKEN_LIFETIME) +
timedelta(seconds=1)
)
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
handlers.jwt_decode_handler(token)
assert ctx.exception.detail == 'JWT exp (expiration) is too long'
class TestJWTKeyAuthentication(JWTAuthTester):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestJWTKeyAuthentication, self).setUp()
self.factory = RequestFactory()
self.auth = JWTKeyAuthentication()
self.user = UserProfile.objects.get(email='<EMAIL>')
def request(self, token):
return self.factory.get('/', HTTP_AUTHORIZATION='JWT {}'.format(token))
def _create_token(self):
api_key = self.create_api_key(self.user)
return self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
def test_get_user(self):
user, _ = self.auth.authenticate(self.request(self._create_token()))
assert user == self.user
def test_unknown_issuer(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['iss'] = 'non-existant-issuer'
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(self.request(token))
def test_deleted_user(self):
self.user.update(deleted=True)
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(self.request(self._create_token()))
def test_user_has_not_read_agreement(self):
self.user.update(read_dev_agreement=None)
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(self.request(self._create_token()))
```
#### File: olympia/blocklist/models.py
```python
from django.db import models
from olympia.amo.models import ModelBase
from olympia.amo.urlresolvers import reverse
class BlocklistApp(ModelBase):
blitem = models.ForeignKey('BlocklistItem', related_name='app', blank=True,
null=True)
blplugin = models.ForeignKey('BlocklistPlugin', related_name='app',
blank=True, null=True)
guid = models.CharField(max_length=255, blank=True, db_index=True,
null=True)
min = models.CharField(max_length=255, blank=True, null=True)
max = models.CharField(max_length=255, blank=True, null=True)
class Meta(ModelBase.Meta):
db_table = 'blapps'
def __unicode__(self):
return '%s: %s - %s' % (self.guid, self.min, self.max)
def flush_urls(self):
return ['/blocklist*'] # no lang/app
class BlocklistCA(ModelBase):
data = models.TextField()
class Meta(ModelBase.Meta):
db_table = 'blca'
def flush_urls(self):
return ['/blocklist*'] # no lang/app
class BlocklistDetail(ModelBase):
name = models.CharField(max_length=255)
why = models.TextField()
who = models.TextField()
bug = models.URLField()
class Meta(ModelBase.Meta):
db_table = 'bldetails'
def __unicode__(self):
return self.name
class BlocklistBase(object):
@property
def block_id(self):
return '%s%s' % (self._type, self.details_id)
def get_url_path(self):
return reverse('blocked.detail', args=[self.block_id])
def save(self, *args, **kw):
for field in self._meta.fields:
if isinstance(field, models.fields.CharField) and field.null:
if getattr(self, field.attname, None) == '':
setattr(self, field.attname, None)
return super(BlocklistBase, self).save(*args, **kw)
class BlocklistItem(BlocklistBase, ModelBase):
_type = 'i'
guid = models.CharField(max_length=255, blank=True, null=True)
min = models.CharField(max_length=255, blank=True, null=True)
max = models.CharField(max_length=255, blank=True, null=True)
os = models.CharField(max_length=255, blank=True, null=True)
severity = models.SmallIntegerField(blank=True, null=True)
details = models.OneToOneField(BlocklistDetail, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
creator = models.CharField(max_length=255, blank=True, null=True)
homepage_url = models.URLField(blank=True, null=True)
update_url = models.URLField(blank=True, null=True)
class Meta(ModelBase.Meta):
db_table = 'blitems'
def __unicode__(self):
return '%s: %s - %s' % (self.guid, self.min, self.max)
def flush_urls(self):
return ['/blocklist*'] # no lang/app
class BlocklistPlugin(BlocklistBase, ModelBase):
_type = 'p'
name = models.CharField(max_length=255, blank=True, null=True)
guid = models.CharField(max_length=255, blank=True, db_index=True,
null=True)
min = models.CharField(max_length=255, blank=True, null=True)
max = models.CharField(max_length=255, blank=True, null=True)
os = models.CharField(max_length=255, blank=True, null=True)
xpcomabi = models.CharField(max_length=255, blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
filename = models.CharField(max_length=255, blank=True, null=True)
severity = models.SmallIntegerField(blank=True, null=True)
vulnerability_status = models.SmallIntegerField(
blank=True, null=True,
choices=((1, 'update available'),
(2, 'update unavailable')))
info_url = models.URLField(blank=True, null=True)
details = models.OneToOneField(BlocklistDetail, null=True)
class Meta(ModelBase.Meta):
db_table = 'blplugins'
def __unicode__(self):
return '%s: %s - %s' % (self.name or self.guid or self.filename,
self.min, self.max)
@property
def get_vulnerability_status(self):
"""Returns vulnerability status per bug 778365
Returns None when criteria aren't met so jinja2 excludes it from when
using the attrs filter.
"""
if self.severity == 0 and self.vulnerability_status in (1, 2):
return self.vulnerability_status
def flush_urls(self):
return ['/blocklist*'] # no lang/app
class BlocklistGfx(BlocklistBase, ModelBase):
_type = 'g'
guid = models.CharField(max_length=255, blank=True, null=True)
os = models.CharField(max_length=255, blank=True, null=True)
vendor = models.CharField(max_length=255, blank=True, null=True)
devices = models.CharField(max_length=255, blank=True, null=True)
feature = models.CharField(max_length=255, blank=True, null=True)
feature_status = models.CharField(max_length=255, blank=True, null=True)
driver_version = models.CharField(max_length=255, blank=True, null=True)
driver_version_max = models.CharField(
max_length=255, blank=True, null=True)
driver_version_comparator = models.CharField(max_length=255, blank=True,
null=True)
hardware = models.CharField(max_length=255, blank=True, null=True)
details = models.OneToOneField(BlocklistDetail, null=True)
class Meta:
db_table = 'blgfxdrivers'
def __unicode__(self):
return '%s: %s : %s : %s' % (self.guid, self.os, self.vendor,
self.devices)
def flush_urls(self):
return ['/blocklist*'] # no lang/app
class BlocklistIssuerCert(BlocklistBase, ModelBase):
_type = 'c'
issuer = models.TextField() # Annoyingly, we can't know the size.
serial = models.CharField(max_length=255)
details = models.OneToOneField(BlocklistDetail)
class Meta:
db_table = 'blissuercert'
def __unicode__(self):
return unicode(self.details.name)
def flush_urls(self):
return ['/blocklist*'] # no lang/app
class BlocklistPref(ModelBase):
"""Preferences which should be reset when a blocked item is detected."""
blitem = models.ForeignKey('BlocklistItem', related_name='prefs')
pref = models.CharField(max_length=255)
class Meta:
db_table = 'blitemprefs'
```
#### File: blocklist/tests/test_views.py
```python
import base64
from datetime import datetime
from xml.dom import minidom
from django.conf import settings
from django.core.cache import cache
from nose.tools import eq_, ok_
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.blocklist.models import (
BlocklistApp, BlocklistCA, BlocklistDetail, BlocklistGfx, BlocklistItem,
BlocklistIssuerCert, BlocklistPlugin, BlocklistPref)
base_xml = """
<?xml version="1.0"?>
<blocklist xmlns="http://www.mozilla.org/2006/addons-blocklist">
</blocklist>
"""
class XMLAssertsMixin(object):
def assertOptional(self, obj, field, xml_field):
"""Make sure that if the field isn't filled in, it's not in the XML."""
# Save the initial value.
initial = getattr(obj, field)
try:
# If not set, the field isn't in the XML.
obj.update(**{field: ''})
eq_(self.dom(self.fx4_url).getElementsByTagName(xml_field), [])
# If set, it's in the XML.
obj.update(**{field: 'foobar'})
element = self.dom(self.fx4_url).getElementsByTagName(xml_field)[0]
eq_(element.firstChild.nodeValue, 'foobar')
finally:
obj.update(**{field: initial})
def assertAttribute(self, obj, field, tag, attr_name):
# Save the initial value.
initial = getattr(obj, field)
try:
# If set, it's in the XML.
obj.update(**{field: 'foobar'})
element = self.dom(self.fx4_url).getElementsByTagName(tag)[0]
eq_(element.getAttribute(attr_name), 'foobar')
finally:
obj.update(**{field: initial})
def assertEscaped(self, obj, field):
"""Make sure that the field content is XML escaped."""
obj.update(**{field: 'http://example.com/?foo=<bar>&baz=crux'})
r = self.client.get(self.fx4_url)
assert 'http://example.com/?foo=<bar>&baz=crux' in r.content
class BlocklistViewTest(TestCase):
def setUp(self):
super(BlocklistViewTest, self).setUp()
self.fx4_url = reverse('blocklist', args=[3, amo.FIREFOX.guid, '4.0'])
self.fx2_url = reverse('blocklist', args=[2, amo.FIREFOX.guid, '2.0'])
self.tb4_url = reverse('blocklist', args=[3, amo.THUNDERBIRD.guid,
'4.0'])
self.mobile_url = reverse('blocklist', args=[2, amo.MOBILE.guid, '.9'])
cache.clear()
self.details = BlocklistDetail.objects.create()
def create_blplugin(self, app_guid=None, app_min=None, app_max=None,
*args, **kw):
plugin = BlocklistPlugin.objects.create(*args, **kw)
app = BlocklistApp.objects.create(blplugin=plugin, guid=app_guid,
min=app_min, max=app_max)
return plugin, app
def normalize(self, s):
return '\n'.join(x.strip() for x in s.split())
def eq_(self, x, y):
return eq_(self.normalize(x), self.normalize(y))
def dom(self, url):
r = self.client.get(url)
return minidom.parseString(r.content)
class BlocklistItemTest(XMLAssertsMixin, BlocklistViewTest):
def setUp(self):
super(BlocklistItemTest, self).setUp()
self.item = BlocklistItem.objects.create(guid='<EMAIL>',
details=self.details)
self.pref = BlocklistPref.objects.create(blitem=self.item,
pref='foo.bar')
self.app = BlocklistApp.objects.create(blitem=self.item,
guid=amo.FIREFOX.guid)
def stupid_unicode_test(self):
junk = u'\xc2\x80\x15\xc2\x80\xc3'
url = reverse('blocklist', args=[3, amo.FIREFOX.guid, junk])
# Just make sure it doesn't fail.
eq_(self.client.get(url).status_code, 200)
def test_content_type(self):
response = self.client.get(self.fx4_url)
eq_(response['Content-Type'], 'text/xml')
def test_empty_string_goes_null_on_save(self):
b = BlocklistItem(guid='guid', min='', max='', os='')
b.save()
assert b.min is None
assert b.max is None
assert b.os is None
def test_lastupdate(self):
def eq(a, b):
eq_(a, b.replace(microsecond=0))
def find_lastupdate():
bl = self.dom(self.fx4_url).getElementsByTagName('blocklist')[0]
t = int(bl.getAttribute('lastupdate')) / 1000
return datetime.fromtimestamp(t)
eq(find_lastupdate(), self.item.created)
self.item.save()
eq(find_lastupdate(), self.item.modified)
plugin, app = self.create_blplugin(app_guid=amo.FIREFOX.guid)
eq(find_lastupdate(), plugin.created)
plugin.save()
eq(find_lastupdate(), plugin.modified)
gfx = BlocklistGfx.objects.create(guid=amo.FIREFOX.guid)
eq(find_lastupdate(), gfx.created)
gfx.save()
eq(find_lastupdate(), gfx.modified)
assert (self.item.created != self.item.modified != plugin.created
!= plugin.modified != gfx.created != gfx.modified)
def test_no_items(self):
self.item.delete()
dom = self.dom(self.fx4_url)
children = dom.getElementsByTagName('blocklist')[0].childNodes
# There are only text nodes.
assert all(e.nodeType == 3 for e in children)
def test_existing_user_cookie(self):
self.client.cookies[settings.BLOCKLIST_COOKIE] = 'adfadf'
self.client.get(self.fx4_url)
eq_(self.client.cookies[settings.BLOCKLIST_COOKIE].value, 'adfadf')
def test_url_params(self):
eq_(self.client.get(self.fx4_url).status_code, 200)
eq_(self.client.get(self.fx2_url).status_code, 200)
# We ignore trailing url parameters.
eq_(self.client.get(self.fx4_url + 'other/junk/').status_code, 200)
def test_app_guid(self):
# There's one item for Firefox.
r = self.client.get(self.fx4_url)
eq_(r.status_code, 200)
eq_(len(r.context['items']), 1)
# There are no items for mobile.
r = self.client.get(self.mobile_url)
eq_(r.status_code, 200)
eq_(len(r.context['items']), 0)
# Without the app constraint we see the item.
self.app.delete()
r = self.client.get(self.mobile_url)
eq_(r.status_code, 200)
eq_(len(r.context['items']), 1)
def test_item_guid(self):
items = self.dom(self.fx4_url).getElementsByTagName('emItem')
eq_(len(items), 1)
eq_(items[0].getAttribute('id'), '<EMAIL>')
def test_block_id(self):
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
eq_(item.getAttribute('blockID'), 'i' + str(self.details.id))
def test_item_os(self):
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
assert 'os' not in item.attributes.keys()
self.item.update(os='win,mac')
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
eq_(item.getAttribute('os'), 'win,mac')
def test_item_pref(self):
self.item.update(severity=2)
eq_(len(self.vr()), 1)
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
prefs = item.getElementsByTagName('prefs')
pref = prefs[0].getElementsByTagName('pref')
eq_(pref[0].firstChild.nodeValue, self.pref.pref)
def test_item_severity(self):
self.item.update(severity=2)
eq_(len(self.vr()), 1)
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
vrange = item.getElementsByTagName('versionRange')
eq_(vrange[0].getAttribute('severity'), '2')
def test_item_severity_zero(self):
# Don't show severity if severity==0.
self.item.update(severity=0, min='0.1')
eq_(len(self.vr()), 1)
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
vrange = item.getElementsByTagName('versionRange')
eq_(vrange[0].getAttribute('minVersion'), '0.1')
assert not vrange[0].hasAttribute('severity')
def vr(self):
item = self.dom(self.fx4_url).getElementsByTagName('emItem')[0]
return item.getElementsByTagName('versionRange')
def test_item_version_range(self):
self.item.update(min='0.1')
eq_(len(self.vr()), 1)
eq_(self.vr()[0].attributes.keys(), ['minVersion'])
eq_(self.vr()[0].getAttribute('minVersion'), '0.1')
self.item.update(max='0.2')
keys = self.vr()[0].attributes.keys()
eq_(len(keys), 2)
ok_('minVersion' in keys)
ok_('maxVersion' in keys)
eq_(self.vr()[0].getAttribute('minVersion'), '0.1')
eq_(self.vr()[0].getAttribute('maxVersion'), '0.2')
def test_item_multiple_version_range(self):
# There should be two <versionRange>s under one <emItem>.
self.item.update(min='0.1', max='0.2')
BlocklistItem.objects.create(guid=self.item.guid, severity=3)
item = self.dom(self.fx4_url).getElementsByTagName('emItem')
eq_(len(item), 1)
vr = item[0].getElementsByTagName('versionRange')
eq_(len(vr), 2)
eq_(vr[0].getAttribute('minVersion'), '0.1')
eq_(vr[0].getAttribute('maxVersion'), '0.2')
eq_(vr[1].getAttribute('severity'), '3')
def test_item_target_app(self):
app = self.app
self.app.delete()
self.item.update(severity=2)
version_range = self.vr()[0]
eq_(version_range.getElementsByTagName('targetApplication'), [])
app.save()
version_range = self.vr()[0]
target_app = version_range.getElementsByTagName('targetApplication')
eq_(len(target_app), 1)
eq_(target_app[0].getAttribute('id'), amo.FIREFOX.guid)
app.update(min='0.1', max='*')
version_range = self.vr()[0]
target_app = version_range.getElementsByTagName('targetApplication')
eq_(target_app[0].getAttribute('id'), amo.FIREFOX.guid)
tvr = target_app[0].getElementsByTagName('versionRange')
eq_(tvr[0].getAttribute('minVersion'), '0.1')
eq_(tvr[0].getAttribute('maxVersion'), '*')
def test_item_multiple_apps(self):
# Make sure all <targetApplication>s go under the same <versionRange>.
self.app.update(min='0.1', max='0.2')
BlocklistApp.objects.create(guid=amo.FIREFOX.guid, blitem=self.item,
min='3.0', max='3.1')
version_range = self.vr()[0]
apps = version_range.getElementsByTagName('targetApplication')
eq_(len(apps), 2)
eq_(apps[0].getAttribute('id'), amo.FIREFOX.guid)
vr = apps[0].getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('minVersion'), '0.1')
eq_(vr.getAttribute('maxVersion'), '0.2')
eq_(apps[1].getAttribute('id'), amo.FIREFOX.guid)
vr = apps[1].getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('minVersion'), '3.0')
eq_(vr.getAttribute('maxVersion'), '3.1')
def test_item_empty_version_range(self):
# No version_range without an app, min, max, or severity.
self.app.delete()
self.item.update(min=None, max=None, severity=None)
eq_(len(self.vr()), 0)
def test_item_empty_target_app(self):
# No empty <targetApplication>.
self.item.update(severity=1)
self.app.delete()
eq_(self.dom(self.fx4_url).getElementsByTagName('targetApplication'),
[])
def test_item_target_empty_version_range(self):
app = self.dom(self.fx4_url).getElementsByTagName('targetApplication')
eq_(app[0].getElementsByTagName('versionRange'), [])
def test_name(self):
self.assertAttribute(self.item, field='name', tag='emItem',
attr_name='name')
def test_creator(self):
self.assertAttribute(self.item, field='creator', tag='emItem',
attr_name='creator')
def test_homepage_url(self):
self.assertAttribute(self.item, field='homepage_url', tag='emItem',
attr_name='homepageURL')
def test_update_url(self):
self.assertAttribute(self.item, field='update_url', tag='emItem',
attr_name='updateURL')
def test_urls_escaped(self):
self.assertEscaped(self.item, 'homepage_url')
self.assertEscaped(self.item, 'update_url')
class BlocklistPluginTest(XMLAssertsMixin, BlocklistViewTest):
def setUp(self):
super(BlocklistPluginTest, self).setUp()
self.plugin, self.app = self.create_blplugin(app_guid=amo.FIREFOX.guid,
details=self.details)
def test_no_plugins(self):
dom = BlocklistViewTest.dom(self, self.mobile_url)
children = dom.getElementsByTagName('blocklist')[0].childNodes
# There are only text nodes.
assert all(e.nodeType == 3 for e in children)
def dom(self, url=None):
url = url or self.fx4_url
r = self.client.get(url)
d = minidom.parseString(r.content)
return d.getElementsByTagName('pluginItem')[0]
def test_plugin_empty(self):
self.app.delete()
eq_(self.dom().attributes.keys(), ['blockID'])
eq_(self.dom().getElementsByTagName('match'), [])
eq_(self.dom().getElementsByTagName('versionRange'), [])
def test_block_id(self):
item = self.dom(self.fx4_url)
eq_(item.getAttribute('blockID'), 'p' + str(self.details.id))
def test_plugin_os(self):
self.plugin.update(os='win')
eq_(sorted(self.dom().attributes.keys()), ['blockID', 'os'])
eq_(self.dom().getAttribute('os'), 'win')
def test_plugin_xpcomabi(self):
self.plugin.update(xpcomabi='win')
eq_(sorted(self.dom().attributes.keys()), ['blockID', 'xpcomabi'])
eq_(self.dom().getAttribute('xpcomabi'), 'win')
def test_plugin_name(self):
self.plugin.update(name='flash')
match = self.dom().getElementsByTagName('match')
eq_(len(match), 1)
eq_(dict(match[0].attributes.items()),
{'name': 'name', 'exp': 'flash'})
def test_plugin_description(self):
self.plugin.update(description='flash')
match = self.dom().getElementsByTagName('match')
eq_(len(match), 1)
eq_(dict(match[0].attributes.items()),
{'name': 'description', 'exp': 'flash'})
def test_plugin_filename(self):
self.plugin.update(filename='flash')
match = self.dom().getElementsByTagName('match')
eq_(len(match), 1)
eq_(dict(match[0].attributes.items()),
{'name': 'filename', 'exp': 'flash'})
def test_plugin_severity(self):
self.plugin.update(severity=2)
v = self.dom().getElementsByTagName('versionRange')[0]
eq_(v.getAttribute('severity'), '2')
def test_plugin_severity_zero(self):
self.plugin.update(severity=0)
v = self.dom().getElementsByTagName('versionRange')[0]
eq_(v.getAttribute('severity'), '0')
def test_plugin_no_target_app(self):
self.plugin.update(severity=1, min='1', max='2')
self.app.delete()
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getElementsByTagName('targetApplication'), [],
'There should not be a <targetApplication> if there was no app')
eq_(vr.getAttribute('severity'), '1')
eq_(vr.getAttribute('minVersion'), '1')
eq_(vr.getAttribute('maxVersion'), '2')
def test_plugin_with_target_app(self):
self.plugin.update(severity=1)
self.app.update(guid=amo.FIREFOX.guid, min='1', max='2')
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '1')
assert not vr.getAttribute('vulnerabilitystatus')
app = vr.getElementsByTagName('targetApplication')[0]
eq_(app.getAttribute('id'), amo.FIREFOX.guid)
vr = app.getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('minVersion'), '1')
eq_(vr.getAttribute('maxVersion'), '2')
def test_plugin_with_multiple_target_apps(self):
self.plugin.update(severity=1, min='5', max='6')
self.app.update(guid=amo.FIREFOX.guid, min='1', max='2')
BlocklistApp.objects.create(guid=amo.THUNDERBIRD.guid,
min='3', max='4',
blplugin=self.plugin)
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '1')
eq_(vr.getAttribute('minVersion'), '5')
eq_(vr.getAttribute('maxVersion'), '6')
assert not vr.getAttribute('vulnerabilitystatus')
app = vr.getElementsByTagName('targetApplication')[0]
eq_(app.getAttribute('id'), amo.FIREFOX.guid)
vr = app.getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('minVersion'), '1')
eq_(vr.getAttribute('maxVersion'), '2')
vr = self.dom(self.tb4_url).getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '1')
eq_(vr.getAttribute('minVersion'), '5')
eq_(vr.getAttribute('maxVersion'), '6')
assert not vr.getAttribute('vulnerabilitystatus')
app = vr.getElementsByTagName('targetApplication')[0]
eq_(app.getAttribute('id'), amo.THUNDERBIRD.guid)
vr = app.getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('minVersion'), '3')
eq_(vr.getAttribute('maxVersion'), '4')
def test_plugin_with_target_app_with_vulnerability(self):
self.plugin.update(severity=0, vulnerability_status=2)
self.app.update(guid=amo.FIREFOX.guid, min='1', max='2')
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '0')
eq_(vr.getAttribute('vulnerabilitystatus'), '2')
app = vr.getElementsByTagName('targetApplication')[0]
eq_(app.getAttribute('id'), amo.FIREFOX.guid)
vr = app.getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('minVersion'), '1')
eq_(vr.getAttribute('maxVersion'), '2')
def test_plugin_with_severity_only(self):
self.plugin.update(severity=1)
self.app.delete()
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '1')
assert not vr.getAttribute('vulnerabilitystatus')
eq_(vr.getAttribute('minVersion'), '')
eq_(vr.getAttribute('maxVersion'), '')
eq_(vr.getElementsByTagName('targetApplication'), [],
'There should not be a <targetApplication> if there was no app')
def test_plugin_without_severity_and_with_vulnerability(self):
self.plugin.update(severity=0, vulnerability_status=1)
self.app.delete()
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '0')
eq_(vr.getAttribute('vulnerabilitystatus'), '1')
eq_(vr.getAttribute('minVersion'), '')
eq_(vr.getAttribute('maxVersion'), '')
def test_plugin_without_severity_and_with_vulnerability_and_minmax(self):
self.plugin.update(severity=0, vulnerability_status=1, min='2.0',
max='3.0')
self.app.delete()
vr = self.dom().getElementsByTagName('versionRange')[0]
eq_(vr.getAttribute('severity'), '0')
eq_(vr.getAttribute('vulnerabilitystatus'), '1')
eq_(vr.getAttribute('minVersion'), '2.0')
eq_(vr.getAttribute('maxVersion'), '3.0')
def test_plugin_apiver_lt_3(self):
self.plugin.update(severity='2')
# No min & max so the app matches.
e = self.dom(self.fx2_url).getElementsByTagName('versionRange')[0]
eq_(e.getAttribute('severity'), '2')
eq_(e.getElementsByTagName('targetApplication'), [])
# The app version is not in range.
self.app.update(min='3.0', max='4.0')
self.assertRaises(IndexError, self.dom, self.fx2_url)
# The app is back in range.
self.app.update(min='1.1')
e = self.dom(self.fx2_url).getElementsByTagName('versionRange')[0]
eq_(e.getAttribute('severity'), '2')
eq_(e.getElementsByTagName('targetApplication'), [])
def test_info_url(self):
self.assertOptional(self.plugin, 'info_url', 'infoURL')
self.assertEscaped(self.plugin, 'info_url')
class BlocklistGfxTest(BlocklistViewTest):
def setUp(self):
super(BlocklistGfxTest, self).setUp()
self.gfx = BlocklistGfx.objects.create(
guid=amo.FIREFOX.guid, os='os', vendor='vendor', devices='x y z',
feature='feature', feature_status='status', details=self.details,
driver_version='version', driver_version_max='version max',
driver_version_comparator='compare', hardware='giant_robot')
def test_no_gfx(self):
dom = self.dom(self.mobile_url)
children = dom.getElementsByTagName('blocklist')[0].childNodes
# There are only text nodes.
assert all(e.nodeType == 3 for e in children)
def test_gfx(self):
r = self.client.get(self.fx4_url)
dom = minidom.parseString(r.content)
gfx = dom.getElementsByTagName('gfxBlacklistEntry')[0]
def find(e):
return gfx.getElementsByTagName(e)[0].childNodes[0].wholeText
assert find('os') == self.gfx.os
assert find('feature') == self.gfx.feature
assert find('vendor') == self.gfx.vendor
assert find('featureStatus') == self.gfx.feature_status
assert find('driverVersion') == self.gfx.driver_version
assert find('driverVersionMax') == self.gfx.driver_version_max
expected_version_comparator = self.gfx.driver_version_comparator
assert find('driverVersionComparator') == expected_version_comparator
assert find('hardware') == self.gfx.hardware
devices = gfx.getElementsByTagName('devices')[0]
for device, val in zip(devices.getElementsByTagName('device'),
self.gfx.devices.split(' ')):
assert device.childNodes[0].wholeText == val
def test_empty_devices(self):
self.gfx.devices = None
self.gfx.save()
r = self.client.get(self.fx4_url)
self.assertNotContains(r, '<devices>')
def test_no_empty_nodes(self):
self.gfx.update(os=None, vendor=None, devices=None,
feature=None, feature_status=None,
driver_version=None, driver_version_max=None,
driver_version_comparator=None, hardware=None)
r = self.client.get(self.fx4_url)
self.assertNotContains(r, '<os>')
self.assertNotContains(r, '<vendor>')
self.assertNotContains(r, '<devices>')
self.assertNotContains(r, '<feature>')
self.assertNotContains(r, '<featureStatus>')
self.assertNotContains(r, '<driverVersion>')
self.assertNotContains(r, '<driverVersionMax>')
self.assertNotContains(r, '<driverVersionComparator>')
self.assertNotContains(r, '<hardware>')
def test_block_id(self):
item = (self.dom(self.fx4_url)
.getElementsByTagName('gfxBlacklistEntry')[0])
eq_(item.getAttribute('blockID'), 'g' + str(self.details.id))
class BlocklistCATest(BlocklistViewTest):
def setUp(self):
super(BlocklistCATest, self).setUp()
self.ca = BlocklistCA.objects.create(data=u'Ètå…, ≥•≤')
def test_ca(self):
r = self.client.get(self.fx4_url)
dom = minidom.parseString(r.content)
ca = dom.getElementsByTagName('caBlocklistEntry')[0]
eq_(base64.b64decode(ca.childNodes[0].toxml()), 'Ètå…, ≥•≤')
class BlocklistIssuerCertTest(BlocklistViewTest):
def setUp(self):
super(BlocklistIssuerCertTest, self).setUp()
self.issuerCertBlock = BlocklistIssuerCert.objects.create(
issuer='testissuer', serial='testserial',
details=BlocklistDetail.objects.create(name='one'))
self.issuerCertBlock2 = BlocklistIssuerCert.objects.create(
issuer='anothertestissuer', serial='anothertestserial',
details=BlocklistDetail.objects.create(name='two'))
def test_extant_nodes(self):
r = self.client.get(self.fx4_url)
dom = minidom.parseString(r.content)
certItem = dom.getElementsByTagName('certItem')[0]
eq_(certItem.getAttribute('issuerName'), self.issuerCertBlock.issuer)
serialNode = dom.getElementsByTagName('serialNumber')[0]
serialNumber = serialNode.childNodes[0].wholeText
eq_(serialNumber, self.issuerCertBlock.serial)
certItem = dom.getElementsByTagName('certItem')[1]
eq_(certItem.getAttribute('issuerName'), self.issuerCertBlock2.issuer)
serialNode = dom.getElementsByTagName('serialNumber')[1]
serialNumber = serialNode.childNodes[0].wholeText
eq_(serialNumber, self.issuerCertBlock2.serial)
```
#### File: olympia/compat/tests.py
```python
import json
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon
from olympia.compat.models import CompatReport, CompatTotals
# This is the structure sent to /compatibility/incoming from the ACR.
incoming_data = {
'appBuild': '20110429030623',
'appGUID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '6.0a1',
'clientOS': 'Intel Mac OS X 10.6',
'comments': 'what the what',
'guid': 'jid0-VsMuA0YYTKCjBh5F0pxHAudnEps@jetpack',
'otherAddons': [['<EMAIL>', '2.1.0']],
'version': '2.2',
'worksProperly': False,
}
class TestCompatReportModel(TestCase):
def test_none(self):
eq_(CompatReport.get_counts('xxx'), {'success': 0, 'failure': 0})
def test_some(self):
guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
CompatReport.objects.create(guid=guid, works_properly=True)
CompatReport.objects.create(guid=guid, works_properly=True)
CompatReport.objects.create(guid=guid, works_properly=False)
CompatReport.objects.create(guid='ballin', works_properly=True)
CompatReport.objects.create(guid='ballin', works_properly=False)
eq_(CompatReport.get_counts(guid), {'success': 2, 'failure': 1})
class TestIndex(TestCase):
# TODO: Test valid version processing here.
def setUp(self):
super(TestIndex, self).setUp()
self.url = reverse('compat.index', args=[amo.COMPAT[0]['main']])
CompatTotals.objects.create(app=1, total=1)
def test_no_version_redirect(self):
res = self.client.get(reverse('compat.index'))
self.assert3xx(res, self.url)
def test_previous_version_link(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
self.assertUrlEqual(doc('h2.c a').attr('href'),
'{url}?page=1&previous=1'.format(url=self.url))
def test_previous_version_link_with_active_pagination(self):
# The current pagination is not kept when we switch to previous
# versions. See 1056022.
r = self.client.get(self.url, {'page': 2, 'type': 'all'})
eq_(r.status_code, 200)
doc = pq(r.content)
self.assertUrlEqual(
doc('h2.c a').attr('href'),
'{url}?type=all&page=1&previous=1'.format(url=self.url))
class TestIncoming(TestCase):
def setUp(self):
super(TestIncoming, self).setUp()
self.url = reverse('compat.incoming')
self.data = dict(incoming_data)
self.json = json.dumps(self.data)
def test_success(self):
count = CompatReport.objects.count()
r = self.client.post(self.url, self.json,
content_type='application/json')
eq_(r.status_code, 204)
eq_(CompatReport.objects.count(), count + 1)
cr = CompatReport.objects.order_by('-id')[0]
eq_(cr.app_build, incoming_data['appBuild'])
eq_(cr.app_guid, incoming_data['appGUID'])
eq_(cr.works_properly, incoming_data['worksProperly'])
eq_(cr.comments, incoming_data['comments'])
eq_(cr.client_ip, '127.0.0.1')
# Check that the other_addons field is stored as json.
vals = CompatReport.objects.filter(id=cr.id).values('other_addons')
eq_(vals[0]['other_addons'],
json.dumps(incoming_data['otherAddons'], separators=(',', ':')))
def test_bad_json(self):
r = self.client.post(self.url, 'wuuu#$',
content_type='application/json')
eq_(r.status_code, 400)
def test_bad_field(self):
self.data['save'] = 1
js = json.dumps(self.data)
r = self.client.post(self.url, js, content_type='application/json')
eq_(r.status_code, 400)
class TestReporter(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestReporter, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.url = reverse('compat.reporter') + '?guid={0}'
def test_success(self):
r = self.client.get(reverse('compat.reporter'))
eq_(r.status_code, 200)
def test_redirect(self):
CompatReport.objects.create(guid=self.addon.guid,
app_guid=amo.FIREFOX.guid)
expected = reverse('compat.reporter_detail', args=[self.addon.guid])
self.assert3xx(
self.client.get(self.url.format(self.addon.id)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.slug)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.guid)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.guid[:5])), expected)
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: True)
def test_unlisted_addon_redirect_for_authorized(self):
"""Can display the reports for an unlisted addon if authorized."""
self.addon.update(is_listed=False)
self.test_redirect()
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_unlisted_addon_no_redirect_for_unauthorized(self):
"""If the user isn't authorized, don't redirect to unlisted addon."""
self.addon.update(is_listed=False)
CompatReport.objects.create(guid=self.addon.guid,
app_guid=amo.FIREFOX.guid)
assert self.client.get(
self.url.format(self.addon.id)).status_code == 200
assert self.client.get(
self.url.format(self.addon.slug)).status_code == 200
assert self.client.get(
self.url.format(self.addon.guid)).status_code == 200
assert self.client.get(
self.url.format(self.addon.guid[:5])).status_code == 200
def test_unlisted_addons_listed_in_left_sidebar(self):
"""Display unlisted addons in the 'reports for your add-ons' list."""
self.addon.update(is_listed=False)
self.client.login(username='<EMAIL>', password='password')
response = self.client.get(reverse('compat.reporter'))
assert self.addon in response.context['addons']
class TestReporterDetail(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestReporterDetail, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = reverse('compat.reporter_detail', args=[self.addon.guid])
self.reports = []
def _generate(self):
apps = [
(amo.FIREFOX.guid, '10.0.1', True), # 0
(amo.FIREFOX.guid, '10.0a1', True), # 1
(amo.FIREFOX.guid, '10.0', False), # 2
(amo.FIREFOX.guid, '6.0.1', False), # 3
(amo.THUNDERBIRD.guid, '10.0', True), # 4
(amo.THUNDERBIRD.guid, '6.6.3', False), # 5
(amo.THUNDERBIRD.guid, '6.0.1', False), # 6
(amo.SEAMONKEY.guid, '2.3.0', False), # 7
(amo.SEAMONKEY.guid, '2.3a1', False), # 8
(amo.SEAMONKEY.guid, '2.3', False), # 9
]
for app_guid, app_version, works_properly in apps:
report = CompatReport.objects.create(guid=self.addon.guid,
app_guid=app_guid,
app_version=app_version,
works_properly=works_properly)
self.reports.append(report.pk)
def check_table(self, data={}, good=0, bad=0, appver=None, report_pks=[]):
r = self.client.get(self.url, data)
eq_(r.status_code, 200)
# Check that we got the correct reports.
eq_(sorted(r.id for r in r.context['reports'].object_list),
sorted(self.reports[pk] for pk in report_pks))
doc = pq(r.content)
eq_(doc('.compat-info tbody tr').length, good + bad)
reports = doc('#reports')
if good == 0 and bad == 0:
eq_(reports.find('.good, .bad').length, 0)
eq_(doc('.no-results').length, 1)
else:
# Check "X success reports" and "X failure reports" buttons.
eq_(reports.find('.good').text().split()[0], str(good))
eq_(reports.find('.bad').text().split()[0], str(bad))
# Check "Filter by Application" field.
eq_(doc('#compat-form select[name=appver] option[selected]').val(),
appver)
return r
def test_appver_all(self):
self._generate()
self.check_table(
good=3, bad=7, appver='',
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_firefox_single(self):
self._generate()
appver = '%s-%s' % (amo.FIREFOX.id, '6.0')
self.check_table(data={'appver': appver}, good=0, bad=1, appver=appver,
report_pks=[3])
def test_firefox_multiple(self):
self._generate()
appver = '%s-%s' % (amo.FIREFOX.id, '10.0')
self.check_table(data={'appver': appver}, good=2, bad=1, appver=appver,
report_pks=[0, 1, 2])
def test_firefox_empty(self):
self._generate()
appver = '%s-%s' % (amo.FIREFOX.id,
amo.COMPAT[0]['main']) # Firefox 11.
self.check_table(data={'appver': appver}, good=0, bad=0, appver=appver,
report_pks=[])
def test_firefox_unknown(self):
self._generate()
# If we have a bad app/version combination, we don't apply any filters.
appver = '%s-%s' % (amo.FIREFOX.id, '0.9999')
self.check_table(
data={'appver': appver}, good=3, bad=7,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_thunderbird_multiple(self):
self._generate()
appver = '%s-%s' % (amo.THUNDERBIRD.id, '6.0')
self.check_table(data={'appver': appver}, good=0, bad=2, appver=appver,
report_pks=[5, 6])
def test_thunderbird_unknown(self):
self._generate()
appver = '%s-%s' % (amo.THUNDERBIRD.id, '0.9999')
self.check_table(
data={'appver': appver}, good=3, bad=7,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_seamonkey_multiple(self):
self._generate()
appver = '%s-%s' % (amo.SEAMONKEY.id, '2.3')
self.check_table(data={'appver': appver}, good=0, bad=3, appver=appver,
report_pks=[7, 8, 9])
def test_seamonkey_unknown(self):
self._generate()
appver = '%s-%s' % (amo.SEAMONKEY.id, '0.9999')
self.check_table(
data={'appver': appver}, good=3, bad=7,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_app_unknown(self):
# Testing for some unknown application such as 'Conkeror'.
app_guid = '{a79fe89b-6662-4ff4-8e88-09950ad4dfde}'
report = CompatReport.objects.create(
guid=self.addon.guid, app_guid=app_guid, app_version='0.9.3',
works_properly=True)
self.reports.append(report.pk)
r = self.check_table(good=1, bad=0, appver='', report_pks=[0])
msg = 'Unknown (%s)' % app_guid
assert msg in r.content, 'Expected %s in body' % msg
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: True)
def test_unlisted_addon_details_for_authorized(self):
"""If the user is authorized, display the reports."""
self.addon.update(is_listed=False)
self._generate()
self.check_table(
good=3, bad=7, appver='',
report_pks=[idx for idx, val in enumerate(self.reports)])
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_unlisted_addon_no_details_for_unauthorized(self):
"""If the user isn't authorized, don't display the reports."""
self.addon.update(is_listed=False)
self._generate()
self.check_table(
good=0, bad=0, appver='',
report_pks=[])
```
#### File: devhub/tests/test_views.py
```python
import json
import os
import socket
from datetime import datetime, timedelta
from decimal import Decimal
from django import http
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.core.files import temp
import mock
import waffle
from jingo.helpers import datetime as datetime_filter
from nose.plugins.attrib import attr
from nose.tools import assert_not_equal, assert_raises, eq_, ok_
from PIL import Image
from pyquery import PyQuery as pq
from tower import strip_whitespace
from olympia import amo, paypal, files
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon, AddonCategory, Category, Charity
from olympia.amo.helpers import absolutify, user_media_path, url as url_reverse
from olympia.amo.tests import addon_factory, formset, initial
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.urlresolvers import reverse
from olympia.api.models import APIKey, SYMMETRIC_JWT_TYPE
from olympia.applications.models import AppVersion
from olympia.devhub.forms import ContribForm
from olympia.devhub.models import ActivityLog, BlogPost, SubmitStep
from olympia.devhub.tasks import validate
from olympia.files.models import File, FileUpload
from olympia.files.tests.test_models import UploadTest as BaseUploadTest
from olympia.reviews.models import Review
from olympia.translations.models import Translation
from olympia.users.models import UserProfile
from olympia.versions.models import ApplicationsVersions, License, Version
def get_addon_count(name):
"""Return the number of addons with the given name."""
return Addon.unfiltered.filter(name__localized_string=name).count()
class HubTest(TestCase):
fixtures = ['browse/nameless-addon', 'base/users']
def setUp(self):
super(HubTest, self).setUp()
self.url = reverse('devhub.index')
assert self.client.login(username='<EMAIL>',
password='password')
eq_(self.client.get(self.url).status_code, 200)
self.user_profile = UserProfile.objects.get(id=999)
def clone_addon(self, num, addon_id=57132):
ids = []
for i in range(num):
addon = Addon.objects.get(id=addon_id)
data = dict(type=addon.type, status=addon.status,
name='cloned-addon-%s-%s' % (addon_id, i))
new_addon = Addon.objects.create(**data)
new_addon.addonuser_set.create(user=self.user_profile)
ids.append(new_addon.id)
return ids
class TestNav(HubTest):
def test_navbar(self):
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('#site-nav').length, 1)
def test_no_addons(self):
"""Check that no add-ons are displayed for this user."""
r = self.client.get(self.url)
doc = pq(r.content)
assert_not_equal(
doc('#navbar ul li.top a').eq(0).text(),
'My Add-ons',
'My Add-ons menu should not be visible if user has no add-ons.')
def test_my_addons(self):
"""Check that the correct items are listed for the My Add-ons menu."""
# Assign this add-on to the current user profile.
addon = Addon.objects.get(id=57132)
addon.name = 'Test'
addon.save()
addon.addonuser_set.create(user=self.user_profile)
r = self.client.get(self.url)
doc = pq(r.content)
# Check the anchor for the 'My Add-ons' menu item.
eq_(doc('#site-nav ul li.top a').eq(0).text(), 'My Add-ons')
# Check the anchor for the single add-on.
eq_(doc('#site-nav ul li.top li a').eq(0).attr('href'),
addon.get_dev_url())
# Create 6 add-ons.
self.clone_addon(6)
r = self.client.get(self.url)
doc = pq(r.content)
# There should be 8 items in this menu.
eq_(doc('#site-nav ul li.top').eq(0).find('ul li').length, 8)
# This should be the 8th anchor, after the 7 addons.
eq_(doc('#site-nav ul li.top').eq(0).find('li a').eq(7).text(),
'Submit a New Add-on')
self.clone_addon(1)
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('#site-nav ul li.top').eq(0).find('li a').eq(7).text(),
'more add-ons...')
def test_unlisted_addons_are_displayed(self):
"""Check that unlisted addons are displayed in the nav."""
# Assign this add-on to the current user profile.
addon = Addon.objects.get(id=57132)
addon.name = 'Test'
addon.is_listed = False
addon.save()
addon.addonuser_set.create(user=self.user_profile)
r = self.client.get(self.url)
doc = pq(r.content)
# Check the anchor for the unlisted add-on.
eq_(doc('#site-nav ul li.top li a').eq(0).attr('href'),
addon.get_dev_url())
class TestDashboard(HubTest):
def setUp(self):
super(TestDashboard, self).setUp()
self.url = reverse('devhub.addons')
self.themes_url = reverse('devhub.themes')
eq_(self.client.get(self.url).status_code, 200)
self.addon = Addon.objects.get(pk=57132)
self.addon.name = 'some addon'
self.addon.save()
self.addon.addonuser_set.create(user=self.user_profile)
def test_addons_layout(self):
doc = pq(self.client.get(self.url).content)
eq_(doc('title').text(),
'Manage My Submissions :: Developer Hub :: Add-ons for Firefox')
eq_(doc('#social-footer').length, 1)
eq_(doc('#copyright').length, 1)
eq_(doc('#footer-links .mobile-link').length, 0)
def get_action_links(self, addon_id):
r = self.client.get(self.url)
doc = pq(r.content)
links = [a.text.strip() for a in
doc('.item[data-addonid=%s] .item-actions li > a' % addon_id)]
return links
def test_no_addons(self):
"""Check that no add-ons are displayed for this user."""
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('.item item').length, 0)
def test_addon_pagination(self):
"""Check that the correct info. is displayed for each add-on:
namely, that add-ons are paginated at 10 items per page, and that
when there is more than one page, the 'Sort by' header and pagination
footer appear.
"""
# Create 9 add-ons, there's already one existing from the setUp.
self.clone_addon(9)
r = self.client.get(self.url)
doc = pq(r.content)
eq_(len(doc('.item .item-info')), 10)
eq_(doc('nav.paginator').length, 0)
# Create 5 add-ons.
self.clone_addon(5)
r = self.client.get(self.url, dict(page=2))
doc = pq(r.content)
eq_(len(doc('.item .item-info')), 5)
eq_(doc('nav.paginator').length, 1)
def test_themes(self):
"""Check themes show on dashboard."""
# Create 2 themes.
for x in range(2):
addon = addon_factory(type=amo.ADDON_PERSONA)
addon.addonuser_set.create(user=self.user_profile)
r = self.client.get(self.themes_url)
doc = pq(r.content)
eq_(len(doc('.item .item-info')), 2)
def test_show_hide_statistics(self):
# when Active and Public show statistics
self.addon.update(disabled_by_user=False, status=amo.STATUS_PUBLIC)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' in links, ('Unexpected: %r' % links)
# when Active and Incomplete hide statistics
self.addon.update(disabled_by_user=False, status=amo.STATUS_NULL)
SubmitStep.objects.create(addon=self.addon, step=6)
links = self.get_action_links(self.addon.pk)
assert 'Statistics' not in links, ('Unexpected: %r' % links)
def test_public_addon(self):
eq_(self.addon.status, amo.STATUS_PUBLIC)
doc = pq(self.client.get(self.url).content)
item = doc('.item[data-addonid=%s]' % self.addon.id)
eq_(item.find('h3 a').attr('href'), self.addon.get_dev_url())
assert item.find('p.downloads'), 'Expected weekly downloads'
assert item.find('p.users'), 'Expected ADU'
assert item.find('.item-details'), 'Expected item details'
assert not item.find('p.incomplete'), (
'Unexpected message about incomplete add-on')
def test_dev_news(self):
for i in xrange(7):
bp = BlogPost(title='hi %s' % i,
date_posted=datetime.now() - timedelta(days=i))
bp.save()
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('.blog-posts').length, 1)
eq_(doc('.blog-posts li').length, 5)
eq_(doc('.blog-posts li a').eq(0).text(), "hi 0")
eq_(doc('.blog-posts li a').eq(4).text(), "hi 4")
def test_sort_created_filter(self):
response = self.client.get(self.url + '?sort=created')
doc = pq(response.content)
eq_(doc('.item-details').length, 1)
d = doc('.item-details .date-created')
eq_(d.length, 1)
eq_(d.remove('strong').text(),
datetime_filter(self.addon.created, '%b %e, %Y'))
def test_sort_updated_filter(self):
response = self.client.get(self.url)
doc = pq(response.content)
eq_(doc('.item-details').length, 1)
d = doc('.item-details .date-updated')
eq_(d.length, 1)
eq_(d.remove('strong').text(),
strip_whitespace(datetime_filter(self.addon.last_updated,
'%b %e, %Y')))
def test_no_sort_updated_filter_for_themes(self):
# Create a theme.
addon = addon_factory(type=amo.ADDON_PERSONA)
addon.addonuser_set.create(user=self.user_profile)
# There's no "updated" sort filter, so order by the default: "Name".
response = self.client.get(self.themes_url + '?sort=updated')
doc = pq(response.content)
eq_(doc('#sorter li.selected').text(), 'Name')
sorts = doc('#sorter li a.opt')
assert not any('?sort=updated' in a.attrib['href'] for a in sorts)
# There's no "last updated" for themes, so always display "created".
eq_(doc('.item-details .date-updated'), []) # No "updated" in details.
d = doc('.item-details .date-created')
eq_(d.remove('strong').text(),
strip_whitespace(datetime_filter(addon.created)))
class TestUpdateCompatibility(TestCase):
fixtures = ['base/users', 'base/addon_4594_a9', 'base/addon_3615']
def setUp(self):
super(TestUpdateCompatibility, self).setUp()
assert self.client.login(username='<EMAIL>', password='password')
self.url = reverse('devhub.addons')
# TODO(andym): use Mock appropriately here.
self._versions = amo.FIREFOX.latest_version, amo.MOBILE.latest_version
amo.FIREFOX.latest_version = amo.MOBILE.latest_version = '3.6.15'
def tearDown(self):
amo.FIREFOX.latest_version, amo.MOBILE.latest_version = self._versions
super(TestUpdateCompatibility, self).tearDown()
def test_no_compat(self):
self.client.logout()
assert self.client.login(username='<EMAIL>',
password='password')
r = self.client.get(self.url)
doc = pq(r.content)
assert not doc('.item[data-addonid=4594] li.compat')
a = Addon.objects.get(pk=4594)
r = self.client.get(reverse('devhub.ajax.compat.update',
args=[a.slug, a.current_version.id]))
eq_(r.status_code, 404)
r = self.client.get(reverse('devhub.ajax.compat.status',
args=[a.slug]))
eq_(r.status_code, 404)
def test_compat(self):
a = Addon.objects.get(pk=3615)
r = self.client.get(self.url)
doc = pq(r.content)
cu = doc('.item[data-addonid=3615] .tooltip.compat-update')
assert cu
update_url = reverse('devhub.ajax.compat.update',
args=[a.slug, a.current_version.id])
eq_(cu.attr('data-updateurl'), update_url)
status_url = reverse('devhub.ajax.compat.status', args=[a.slug])
eq_(doc('.item[data-addonid=3615] li.compat').attr('data-src'),
status_url)
assert doc('.item[data-addonid=3615] .compat-update-modal')
def test_incompat_firefox(self):
versions = ApplicationsVersions.objects.all()[0]
versions.max = AppVersion.objects.get(version='2.0')
versions.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid=3615] .tooltip.compat-error')
def test_incompat_mobile(self):
appver = AppVersion.objects.get(version='2.0')
appver.update(application=amo.MOBILE.id)
av = ApplicationsVersions.objects.all()[0]
av.application = amo.MOBILE.id
av.max = appver
av.save()
doc = pq(self.client.get(self.url).content)
assert doc('.item[data-addonid=3615] .tooltip.compat-error')
class TestDevRequired(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDevRequired, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.get_url = self.addon.get_dev_url('payments')
self.post_url = self.addon.get_dev_url('payments.disable')
assert self.client.login(username='<EMAIL>', password='password')
self.au = self.addon.addonuser_set.get(user__email='<EMAIL>')
eq_(self.au.role, amo.AUTHOR_ROLE_OWNER)
def test_anon(self):
self.client.logout()
r = self.client.get(self.get_url, follow=True)
login = reverse('users.login')
self.assert3xx(r, '%s?to=%s' % (login, self.get_url))
def test_dev_get(self):
eq_(self.client.get(self.get_url).status_code, 200)
def test_dev_post(self):
self.assert3xx(self.client.post(self.post_url), self.get_url)
def test_viewer_get(self):
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
eq_(self.client.get(self.get_url).status_code, 200)
def test_viewer_post(self):
self.au.role = amo.AUTHOR_ROLE_VIEWER
self.au.save()
eq_(self.client.post(self.get_url).status_code, 403)
def test_disabled_post_dev(self):
self.addon.update(status=amo.STATUS_DISABLED)
eq_(self.client.post(self.get_url).status_code, 403)
def test_disabled_post_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert self.client.login(username='<EMAIL>',
password='password')
self.assert3xx(self.client.post(self.post_url), self.get_url)
class TestVersionStats(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionStats, self).setUp()
assert self.client.login(username='<EMAIL>',
password='password')
def test_counts(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
user = UserProfile.objects.get(email='<EMAIL>')
for _ in range(10):
Review.objects.create(addon=addon, user=user,
version=addon.current_version)
url = reverse('devhub.versions.stats', args=[addon.slug])
r = json.loads(self.client.get(url).content)
exp = {str(version.id):
{'reviews': 10, 'files': 1, 'version': version.version,
'id': version.id}}
self.assertDictEqual(r, exp)
class TestEditPayments(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestEditPayments, self).setUp()
self.addon = self.get_addon()
self.addon.the_reason = self.addon.the_future = '...'
self.addon.save()
self.foundation = Charity.objects.create(
id=amo.FOUNDATION_ORG, name='moz', url='$$.moz', paypal='moz.pal')
self.url = self.addon.get_dev_url('payments')
assert self.client.login(username='<EMAIL>', password='password')
self.paypal_mock = mock.Mock()
self.paypal_mock.return_value = (True, None)
paypal.check_paypal_id = self.paypal_mock
def get_addon(self):
return Addon.objects.no_cache().get(id=3615)
def post(self, *args, **kw):
d = dict(*args, **kw)
eq_(self.client.post(self.url, d).status_code, 302)
def check(self, **kw):
addon = self.get_addon()
for k, v in kw.items():
eq_(getattr(addon, k), v)
assert addon.wants_contributions
assert addon.takes_contributions
def test_logging(self):
count = ActivityLog.objects.all().count()
self.post(recipient='dev', suggested_amount=2, paypal_id='greed@dev',
annoying=amo.CONTRIB_AFTER)
eq_(ActivityLog.objects.all().count(), count + 1)
def test_success_dev(self):
self.post(recipient='dev', suggested_amount=2, paypal_id='greed@dev',
annoying=amo.CONTRIB_AFTER)
self.check(paypal_id='greed@dev', suggested_amount=2,
annoying=amo.CONTRIB_AFTER)
def test_success_foundation(self):
self.post(recipient='moz', suggested_amount=2,
annoying=amo.CONTRIB_ROADBLOCK)
self.check(paypal_id='', suggested_amount=2,
charity=self.foundation, annoying=amo.CONTRIB_ROADBLOCK)
def test_success_charity(self):
d = dict(recipient='org', suggested_amount=11.5,
annoying=amo.CONTRIB_PASSIVE)
d.update({'charity-name': 'fligtar fund',
'charity-url': 'http://feed.me',
'charity-paypal': 'greed@org'})
self.post(d)
self.check(paypal_id='', suggested_amount=Decimal('11.50'),
charity=Charity.objects.get(name='fligtar fund'))
def test_dev_paypal_id_length(self):
r = self.client.get(self.url)
doc = pq(r.content)
eq_(int(doc('#id_paypal_id').attr('size')), 50)
def test_dev_paypal_reqd(self):
d = dict(recipient='dev', suggested_amount=2,
annoying=amo.CONTRIB_PASSIVE)
r = self.client.post(self.url, d)
self.assertFormError(r, 'contrib_form', 'paypal_id',
'PayPal ID required to accept contributions.')
def test_bad_paypal_id_dev(self):
self.paypal_mock.return_value = False, 'error'
d = dict(recipient='dev', suggested_amount=2, paypal_id='greed@dev',
annoying=amo.CONTRIB_AFTER)
r = self.client.post(self.url, d)
self.assertFormError(r, 'contrib_form', 'paypal_id', 'error')
def test_bad_paypal_id_charity(self):
self.paypal_mock.return_value = False, 'error'
d = dict(recipient='org', suggested_amount=11.5,
annoying=amo.CONTRIB_PASSIVE)
d.update({'charity-name': 'fligtar fund',
'charity-url': 'http://feed.me',
'charity-paypal': '<EMAIL>'})
r = self.client.post(self.url, d)
self.assertFormError(r, 'charity_form', 'paypal', 'error')
def test_paypal_timeout(self):
self.paypal_mock.side_effect = socket.timeout()
d = dict(recipient='dev', suggested_amount=2, paypal_id='greed<PASSWORD>',
annoying=amo.CONTRIB_AFTER)
r = self.client.post(self.url, d)
self.assertFormError(r, 'contrib_form', 'paypal_id',
'Could not validate PayPal id.')
def test_max_suggested_amount(self):
too_much = settings.MAX_CONTRIBUTION + 1
msg = ('Please enter a suggested amount less than $%d.' %
settings.MAX_CONTRIBUTION)
r = self.client.post(self.url, {'suggested_amount': too_much})
self.assertFormError(r, 'contrib_form', 'suggested_amount', msg)
def test_neg_suggested_amount(self):
msg = 'Please enter a suggested amount greater than 0.'
r = self.client.post(self.url, {'suggested_amount': -1})
self.assertFormError(r, 'contrib_form', 'suggested_amount', msg)
def test_charity_details_reqd(self):
d = dict(recipient='org', suggested_amount=11.5,
annoying=amo.CONTRIB_PASSIVE)
r = self.client.post(self.url, d)
self.assertFormError(r, 'charity_form', 'name',
'This field is required.')
eq_(self.get_addon().suggested_amount, None)
def test_switch_charity_to_dev(self):
self.test_success_charity()
self.test_success_dev()
eq_(self.get_addon().charity, None)
eq_(self.get_addon().charity_id, None)
def test_switch_charity_to_foundation(self):
self.test_success_charity()
self.test_success_foundation()
# This will break if we start cleaning up licenses.
old_charity = Charity.objects.get(name='fligtar fund')
assert old_charity.id != self.foundation
def test_switch_foundation_to_charity(self):
self.test_success_foundation()
self.test_success_charity()
moz = Charity.objects.get(id=self.foundation.id)
eq_(moz.name, 'moz')
eq_(moz.url, '$$.moz')
eq_(moz.paypal, 'moz.pal')
def test_contrib_form_initial(self):
eq_(ContribForm.initial(self.addon)['recipient'], 'dev')
self.addon.charity = self.foundation
eq_(ContribForm.initial(self.addon)['recipient'], 'moz')
self.addon.charity_id = amo.FOUNDATION_ORG + 1
eq_(ContribForm.initial(self.addon)['recipient'], 'org')
eq_(ContribForm.initial(self.addon)['annoying'], amo.CONTRIB_PASSIVE)
self.addon.annoying = amo.CONTRIB_AFTER
eq_(ContribForm.initial(self.addon)['annoying'], amo.CONTRIB_AFTER)
def test_enable_thankyou(self):
d = dict(enable_thankyou='on', thankyou_note='woo',
annoying=1, recipient='moz')
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
addon = self.get_addon()
eq_(addon.enable_thankyou, True)
eq_(unicode(addon.thankyou_note), 'woo')
def test_enable_thankyou_unchecked_with_text(self):
d = dict(enable_thankyou='', thankyou_note='woo',
annoying=1, recipient='moz')
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
addon = self.get_addon()
eq_(addon.enable_thankyou, False)
eq_(addon.thankyou_note, None)
def test_contribution_link(self):
self.test_success_foundation()
r = self.client.get(self.url)
doc = pq(r.content)
span = doc('#status-bar').find('span')
eq_(span.length, 1)
assert span.text().startswith('Your contribution page: ')
a = span.find('a')
eq_(a.length, 1)
eq_(a.attr('href'), reverse('addons.about',
args=[self.get_addon().slug]))
eq_(a.text(), url_reverse('addons.about', self.get_addon().slug,
host=settings.SITE_URL))
def test_enable_thankyou_no_text(self):
d = dict(enable_thankyou='on', thankyou_note='',
annoying=1, recipient='moz')
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
addon = self.get_addon()
eq_(addon.enable_thankyou, False)
eq_(addon.thankyou_note, None)
def test_no_future(self):
self.get_addon().update(the_future=None)
res = self.client.get(self.url)
err = pq(res.content)('p.error').text()
eq_('completed developer profile' in err, True)
def test_addon_public(self):
self.get_addon().update(status=amo.STATUS_PUBLIC)
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('#do-setup').text(), 'Set up Contributions')
def test_voluntary_contributions_addons(self):
r = self.client.get(self.url)
doc = pq(r.content)
eq_(doc('.intro').length, 1)
eq_(doc('.intro.full-intro').length, 0)
def test_no_voluntary_contributions_for_unlisted_addons(self):
self.addon.update(is_listed=False)
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('.intro').length == 1
assert doc('.intro.full-intro').length == 0
assert not doc('#do-setup') # No way to setup the payment.
assert doc('.intro .error').text() == (
'Contributions are only available for listed add-ons.')
class TestDisablePayments(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDisablePayments, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.addon.the_reason = self.addon.the_future = '...'
self.addon.save()
self.addon.update(wants_contributions=True, paypal_id='woohoo')
self.pay_url = self.addon.get_dev_url('payments')
self.disable_url = self.addon.get_dev_url('payments.disable')
assert self.client.login(username='<EMAIL>', password='password')
def test_statusbar_visible(self):
r = self.client.get(self.pay_url)
self.assertContains(r, '<div id="status-bar">')
self.addon.update(wants_contributions=False)
r = self.client.get(self.pay_url)
self.assertNotContains(r, '<div id="status-bar">')
def test_disable(self):
r = self.client.post(self.disable_url)
eq_(r.status_code, 302)
assert(r['Location'].endswith(self.pay_url))
eq_(Addon.objects.no_cache().get(id=3615).wants_contributions, False)
class TestPaymentsProfile(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestPaymentsProfile, self).setUp()
self.addon = a = self.get_addon()
self.url = self.addon.get_dev_url('payments')
# Make sure all the payment/profile data is clear.
assert not (a.wants_contributions or a.paypal_id or a.the_reason
or a.the_future or a.takes_contributions)
assert self.client.login(username='<EMAIL>', password='password')
self.paypal_mock = mock.Mock()
self.paypal_mock.return_value = (True, None)
paypal.check_paypal_id = self.paypal_mock
def get_addon(self):
return Addon.objects.get(id=3615)
def test_intro_box(self):
# We don't have payments/profile set up, so we see the intro.
doc = pq(self.client.get(self.url).content)
assert doc('.intro')
assert doc('#setup.hidden')
def test_status_bar(self):
# We don't have payments/profile set up, so no status bar.
doc = pq(self.client.get(self.url).content)
assert not doc('#status-bar')
def test_profile_form_exists(self):
doc = pq(self.client.get(self.url).content)
assert doc('#trans-the_reason')
assert doc('#trans-the_future')
def test_profile_form_success(self):
d = dict(recipient='dev', suggested_amount=2, paypal_id='xx@yy',
annoying=amo.CONTRIB_ROADBLOCK, the_reason='xxx',
the_future='yyy')
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
# The profile form is gone, we're accepting contributions.
doc = pq(self.client.get(self.url).content)
assert not doc('.intro')
assert not doc('#setup.hidden')
assert doc('#status-bar')
assert not doc('#trans-the_reason')
assert not doc('#trans-the_future')
addon = self.get_addon()
eq_(unicode(addon.the_reason), 'xxx')
eq_(unicode(addon.the_future), 'yyy')
eq_(addon.wants_contributions, True)
def test_profile_required(self):
def check_page(request):
doc = pq(request.content)
assert not doc('.intro')
assert not doc('#setup.hidden')
assert not doc('#status-bar')
assert doc('#trans-the_reason')
assert doc('#trans-the_future')
d = dict(recipient='dev', suggested_amount=2, paypal_id='xx<PASSWORD>',
annoying=amo.CONTRIB_ROADBLOCK)
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'profile_form', 'the_reason',
'This field is required.')
self.assertFormError(r, 'profile_form', 'the_future',
'This field is required.')
check_page(r)
eq_(self.get_addon().wants_contributions, False)
d = dict(recipient='dev', suggested_amount=2, paypal_id='xx@yy',
annoying=amo.CONTRIB_ROADBLOCK, the_reason='xxx')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'profile_form', 'the_future',
'This field is required.')
check_page(r)
eq_(self.get_addon().wants_contributions, False)
class TestDelete(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestDelete, self).setUp()
self.get_addon = lambda: Addon.objects.filter(id=3615)
assert self.client.login(username='<EMAIL>', password='password')
self.user = UserProfile.objects.get(email='<EMAIL>')
self.get_url = lambda: self.get_addon()[0].get_dev_url('delete')
def make_theme(self):
theme = addon_factory(
name='xpi name', type=amo.ADDON_PERSONA, slug='theme-slug')
theme.authors.through.objects.create(addon=theme, user=self.user)
return theme
def test_post_not(self):
r = self.client.post(self.get_url(), follow=True)
eq_(pq(r.content)('.notification-box').text(),
'URL name was incorrect. Add-on was not deleted.')
eq_(self.get_addon().exists(), True)
def test_post(self):
self.get_addon().get().update(slug='addon-slug')
r = self.client.post(self.get_url(), {'slug': 'addon-slug'},
follow=True)
eq_(pq(r.content)('.notification-box').text(), 'Add-on deleted.')
eq_(self.get_addon().exists(), False)
def test_post_wrong_slug(self):
self.get_addon().get().update(slug='addon-slug')
r = self.client.post(self.get_url(), {'slug': 'theme-slug'},
follow=True)
eq_(pq(r.content)('.notification-box').text(),
'URL name was incorrect. Add-on was not deleted.')
eq_(self.get_addon().exists(), True)
def test_post_theme(self):
theme = self.make_theme()
r = self.client.post(
theme.get_dev_url('delete'), {'slug': 'theme-slug'}, follow=True)
eq_(pq(r.content)('.notification-box').text(), 'Theme deleted.')
eq_(Addon.objects.filter(id=theme.id).exists(), False)
def test_post_theme_wrong_slug(self):
theme = self.make_theme()
r = self.client.post(
theme.get_dev_url('delete'), {'slug': 'addon-slug'}, follow=True)
eq_(pq(r.content)('.notification-box').text(),
'URL name was incorrect. Theme was not deleted.')
eq_(Addon.objects.filter(id=theme.id).exists(), True)
class TestHome(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestHome, self).setUp()
assert self.client.login(username='<EMAIL>', password='password')
self.url = reverse('devhub.index')
self.addon = Addon.objects.get(pk=3615)
def get_pq(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
return pq(r.content)
def test_addons(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'devhub/index.html')
def test_editor_promo(self):
eq_(self.get_pq()('#devhub-sidebar #editor-promo').length, 1)
def test_no_editor_promo(self):
Addon.objects.all().delete()
# Regular users (non-devs) should not see this promo.
eq_(self.get_pq()('#devhub-sidebar #editor-promo').length, 0)
def test_my_addons(self):
statuses = [(amo.STATUS_NOMINATED, amo.STATUS_NOMINATED),
(amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED),
(amo.STATUS_LITE, amo.STATUS_UNREVIEWED)]
for addon_status in statuses:
file = self.addon.latest_version.files.all()[0]
file.update(status=addon_status[1])
self.addon.update(status=addon_status[0])
doc = self.get_pq()
addon_item = doc('#my-addons .addon-item')
eq_(addon_item.length, 1)
eq_(addon_item.find('.addon-name').attr('href'),
self.addon.get_dev_url('edit'))
if self.addon.is_listed:
# We don't display a link to the inexistent public page for
# unlisted addons.
eq_(addon_item.find('p').eq(3).find('a').attr('href'),
self.addon.current_version.get_url_path())
eq_('Queue Position: 1 of 1', addon_item.find('p').eq(4).text())
eq_(addon_item.find('.upload-new-version a').attr('href'),
self.addon.get_dev_url('versions') + '#version-upload')
self.addon.status = statuses[1][0]
self.addon.save()
doc = self.get_pq()
addon_item = doc('#my-addons .addon-item')
eq_('Status: ' + unicode(
self.addon.STATUS_CHOICES[self.addon.status]),
addon_item.find('p').eq(1).text())
Addon.with_unlisted.all().delete()
eq_(self.get_pq()('#my-addons').length, 0)
def test_my_unlisted_addons(self):
self.addon.update(is_listed=False)
self.test_my_addons() # Run the test again but with an unlisted addon.
def test_incomplete_no_new_version(self):
def no_link():
doc = self.get_pq()
addon_item = doc('#my-addons .addon-item')
eq_(addon_item.length, 1)
eq_(addon_item.find('.upload-new-version').length, 0)
self.addon.update(status=amo.STATUS_NULL)
submit_step = SubmitStep.objects.create(addon=self.addon, step=6)
no_link()
submit_step.delete()
self.addon.update(status=amo.STATUS_DISABLED)
no_link()
self.addon.update(status=amo.STATUS_PUBLIC, disabled_by_user=True)
no_link()
class TestActivityFeed(TestCase):
fixtures = ('base/users', 'base/addon_3615')
def setUp(self):
super(TestActivityFeed, self).setUp()
assert self.client.login(username='<EMAIL>', password='password')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.versions.first()
def test_feed_for_all(self):
r = self.client.get(reverse('devhub.feed_all'))
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('header h2').text(), 'Recent Activity for My Add-ons')
eq_(doc('#breadcrumbs li:eq(2)').text(), 'Recent Activity')
def test_feed_for_addon(self):
r = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('header h2').text(),
'Recent Activity for %s' % self.addon.name)
eq_(doc('#breadcrumbs li:eq(3)').text(), self.addon.slug)
def test_feed_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
r = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
eq_(r.status_code, 200)
def test_feed_disabled_anon(self):
self.client.logout()
r = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
eq_(r.status_code, 302)
def add_log(self, action=amo.LOG.ADD_REVIEW):
amo.set_user(UserProfile.objects.get(email='<EMAIL>'))
amo.log(action, self.addon, self.version)
def add_hidden_log(self, action=amo.LOG.COMMENT_VERSION):
self.add_log(action=action)
def test_feed_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
eq_(len(doc('#recent-activity li.item')), 0)
def test_addons_hidden(self):
self.add_hidden_log()
self.add_hidden_log(amo.LOG.OBJECT_ADDED)
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
eq_(len(doc('.recent-activity li.item')), 0)
def test_unlisted_addons_dashboard(self):
"""Unlisted addons are displayed in the feed on the dashboard page."""
self.addon.update(is_listed=False)
self.add_log()
res = self.client.get(reverse('devhub.addons'))
doc = pq(res.content)
eq_(len(doc('.recent-activity li.item')), 1)
def test_unlisted_addons_feed_sidebar(self):
"""Unlisted addons are displayed in the left side in the feed page."""
self.addon.update(is_listed=False)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
eq_(len(doc('#refine-addon li')), 2) # First li is "All My Add-ons".
def test_unlisted_addons_feed(self):
"""Unlisted addons are displayed in the feed page."""
self.addon.update(is_listed=False)
self.add_log()
res = self.client.get(reverse('devhub.feed_all'))
doc = pq(res.content)
eq_(len(doc('#recent-activity .item')), 1)
def test_unlisted_addons_feed_filter(self):
"""Feed page can be filtered on unlisted addon."""
self.addon.update(is_listed=False)
self.add_log()
res = self.client.get(reverse('devhub.feed', args=[self.addon.slug]))
doc = pq(res.content)
eq_(len(doc('#recent-activity .item')), 1)
class TestProfileBase(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestProfileBase, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.url = self.addon.get_dev_url('profile')
assert self.client.login(username='<EMAIL>', password='password')
def get_addon(self):
return Addon.objects.no_cache().get(id=self.addon.id)
def enable_addon_contributions(self):
self.addon.wants_contributions = True
self.addon.paypal_id = 'somebody'
self.addon.save()
def post(self, *args, **kw):
d = dict(*args, **kw)
eq_(self.client.post(self.url, d).status_code, 302)
def check(self, **kw):
addon = self.get_addon()
for k, v in kw.items():
if k in ('the_reason', 'the_future'):
eq_(getattr(getattr(addon, k), 'localized_string'), unicode(v))
else:
eq_(getattr(addon, k), v)
class TestProfileStatusBar(TestProfileBase):
def setUp(self):
super(TestProfileStatusBar, self).setUp()
self.remove_url = self.addon.get_dev_url('profile.remove')
def test_no_status_bar(self):
self.addon.the_reason = self.addon.the_future = None
self.addon.save()
assert not pq(self.client.get(self.url).content)('#status-bar')
def test_status_bar_no_contrib(self):
self.addon.the_reason = self.addon.the_future = '...'
self.addon.wants_contributions = False
self.addon.save()
doc = pq(self.client.get(self.url).content)
assert doc('#status-bar')
eq_(doc('#status-bar button').text(), 'Remove Profile')
def test_status_bar_with_contrib(self):
self.addon.the_reason = self.addon.the_future = '...'
self.addon.wants_contributions = True
self.addon.paypal_id = 'xxx'
self.addon.save()
doc = pq(self.client.get(self.url).content)
assert doc('#status-bar')
eq_(doc('#status-bar button').text(), 'Remove Both')
def test_remove_profile(self):
self.addon.the_reason = self.addon.the_future = '...'
self.addon.save()
self.client.post(self.remove_url)
addon = self.get_addon()
eq_(addon.the_reason, None)
eq_(addon.the_future, None)
eq_(addon.takes_contributions, False)
eq_(addon.wants_contributions, False)
def test_remove_profile_without_content(self):
# See bug 624852
self.addon.the_reason = self.addon.the_future = None
self.addon.save()
self.client.post(self.remove_url)
addon = self.get_addon()
eq_(addon.the_reason, None)
eq_(addon.the_future, None)
def test_remove_both(self):
self.addon.the_reason = self.addon.the_future = '...'
self.addon.wants_contributions = True
self.addon.paypal_id = 'xxx'
self.addon.save()
self.client.post(self.remove_url)
addon = self.get_addon()
eq_(addon.the_reason, None)
eq_(addon.the_future, None)
eq_(addon.takes_contributions, False)
eq_(addon.wants_contributions, False)
class TestProfile(TestProfileBase):
def test_without_contributions_labels(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('label[for=the_reason] .optional').length, 1)
eq_(doc('label[for=the_future] .optional').length, 1)
def test_without_contributions_fields_optional(self):
self.post(the_reason='', the_future='')
self.check(the_reason='', the_future='')
self.post(the_reason='to be cool', the_future='')
self.check(the_reason='to be cool', the_future='')
self.post(the_reason='', the_future='hot stuff')
self.check(the_reason='', the_future='hot stuff')
self.post(the_reason='to be hot', the_future='cold stuff')
self.check(the_reason='to be hot', the_future='cold stuff')
def test_with_contributions_labels(self):
self.enable_addon_contributions()
r = self.client.get(self.url)
doc = pq(r.content)
assert doc('label[for=the_reason] .req').length, (
'the_reason field should be required.')
assert doc('label[for=the_future] .req').length, (
'the_future field should be required.')
def test_log(self):
self.enable_addon_contributions()
d = dict(the_reason='because', the_future='i can')
o = ActivityLog.objects
eq_(o.count(), 0)
self.client.post(self.url, d)
eq_(o.filter(action=amo.LOG.EDIT_PROPERTIES.id).count(), 1)
def test_with_contributions_fields_required(self):
self.enable_addon_contributions()
d = dict(the_reason='', the_future='')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'profile_form', 'the_reason',
'This field is required.')
self.assertFormError(r, 'profile_form', 'the_future',
'This field is required.')
d = dict(the_reason='to be cool', the_future='')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'profile_form', 'the_future',
'This field is required.')
d = dict(the_reason='', the_future='hot stuff')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'profile_form', 'the_reason',
'This field is required.')
self.post(the_reason='to be hot', the_future='cold stuff')
self.check(the_reason='to be hot', the_future='cold stuff')
class TestSubmitBase(TestCase):
fixtures = ['base/addon_3615', 'base/addon_5579', 'base/users']
def setUp(self):
super(TestSubmitBase, self).setUp()
assert self.client.login(username='<EMAIL>', password='password')
self.user = UserProfile.objects.get(email='<EMAIL>')
self.addon = self.get_addon()
def get_addon(self):
return Addon.with_unlisted.no_cache().get(pk=3615)
def get_version(self):
return self.get_addon().versions.get()
def get_step(self):
return SubmitStep.objects.get(addon=self.get_addon())
class TestAPIAgreement(TestSubmitBase):
def setUp(self):
super(TestAPIAgreement, self).setUp()
self.user = UserProfile.objects.get(email='<EMAIL>')
self.create_switch('signing-api')
def test_agreement_first(self):
render_agreement_path = 'olympia.devhub.views.render_agreement'
with mock.patch(render_agreement_path) as mock_submit:
mock_submit.return_value = http.HttpResponse("Okay")
self.client.get(reverse('devhub.api_key_agreement'))
assert mock_submit.called
def test_agreement_second(self):
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.api_key_agreement'),
follow=True)
self.assert3xx(response, reverse('devhub.api_key'))
class TestAPIKeyPage(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAPIKeyPage, self).setUp()
self.url = reverse('devhub.api_key')
assert self.client.login(username='<EMAIL>', password='password')
self.user = UserProfile.objects.get(email='<EMAIL>')
self.create_switch('signing-api')
def test_key_redirect(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.api_key'))
self.assert3xx(response, reverse('devhub.api_key_agreement'))
def test_view_without_credentials(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Generate new credentials'
inputs = doc('.api-input input')
assert len(inputs) == 0, 'Inputs should be hidden before keys exist'
def test_view_with_credentials(self):
APIKey.objects.create(user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
submit = doc('#generate-key')
assert submit.text() == 'Revoke and regenerate credentials'
key_input = doc('.key-input input').val()
assert key_input == 'some-jwt-key'
def test_create_new_credentials(self):
patch = mock.patch('olympia.devhub.views.APIKey.new_jwt_credentials')
with patch as mock_creator:
response = self.client.post(self.url)
mock_creator.assert_called_with(self.user)
email = mail.outbox[0]
assert len(mail.outbox) == 1
assert email.to == [self.user.email]
assert reverse('devhub.api_key') in email.body
self.assert3xx(response, self.url)
def test_delete_and_recreate_credentials(self):
old_key = APIKey.objects.create(user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='some-jwt-key',
secret='some-jwt-secret')
response = self.client.post(self.url)
self.assert3xx(response, self.url)
old_key = APIKey.objects.get(pk=old_key.pk)
assert not old_key.is_active
new_key = APIKey.get_jwt_key(user=self.user)
assert new_key.key != old_key.key
assert new_key.secret != old_key.secret
class TestSubmitStep1(TestSubmitBase):
def test_step1_submit(self):
self.user.update(read_dev_agreement=None)
response = self.client.get(reverse('devhub.submit.1'))
eq_(response.status_code, 200)
doc = pq(response.content)
eq_(doc('#breadcrumbs a').eq(1).attr('href'), reverse('devhub.addons'))
links = doc('#agreement-container a')
assert links
for ln in links:
href = ln.attrib['href']
assert not href.startswith('%'), (
"Looks like link %r to %r is still a placeholder" %
(href, ln.text))
def test_read_dev_agreement_set(self):
"""Store current date when the user agrees with the user agreement."""
self.user.update(read_dev_agreement=None)
response = self.client.post(reverse('devhub.submit.1'), follow=True)
user = response.context['user']
self.assertCloseToNow(user.read_dev_agreement)
def test_read_dev_agreement_skip(self):
# The current user fixture has already read the agreement so we skip
response = self.client.get(reverse('devhub.submit.1'))
self.assert3xx(response, reverse('devhub.submit.2'))
class TestSubmitStep2(TestCase):
# More tests in TestCreateAddon.
fixtures = ['base/users']
def setUp(self):
super(TestSubmitStep2, self).setUp()
self.client.login(username='<EMAIL>', password='password')
self.user = UserProfile.objects.get(email='<EMAIL>')
def test_step_2_seen(self):
r = self.client.post(reverse('devhub.submit.1'))
self.assert3xx(r, reverse('devhub.submit.2'))
r = self.client.get(reverse('devhub.submit.2'))
eq_(r.status_code, 200)
def test_step_2_not_seen(self):
# We require a cookie that gets set in step 1.
self.user.update(read_dev_agreement=None)
r = self.client.get(reverse('devhub.submit.2'), follow=True)
self.assert3xx(r, reverse('devhub.submit.1'))
def test_step_2_listed_checkbox(self):
# There is a checkbox for the "is_listed" addon field.
self.client.post(reverse('devhub.submit.1'))
response = self.client.get(reverse('devhub.submit.2'))
eq_(response.status_code, 200)
doc = pq(response.content)
assert doc('.list-addon input#id_is_unlisted[type=checkbox]')
# There also is a checkbox to select full review (side-load) or prelim.
assert doc('.list-addon input#id_is_sideload[type=checkbox]')
class TestSubmitStep3(TestSubmitBase):
def setUp(self):
super(TestSubmitStep3, self).setUp()
self.url = reverse('devhub.submit.3', args=['a3615'])
SubmitStep.objects.create(addon_id=3615, step=3)
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=23)).delete()
AddonCategory.objects.filter(
addon=self.get_addon(),
category=Category.objects.get(id=24)).delete()
ctx = self.client.get(self.url).context['cat_form']
self.cat_initial = initial(ctx.initial_forms[0])
def get_dict(self, **kw):
cat_initial = kw.pop('cat_initial', self.cat_initial)
fs = formset(cat_initial, initial_count=1)
result = {'name': '<NAME>', 'slug': 'testname',
'description': 'desc', 'summary': 'Hello!'}
result.update(**kw)
result.update(fs)
return result
def test_submit_success(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
# Post and be redirected.
d = self.get_dict()
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
eq_(self.get_step().step, 4)
addon = self.get_addon()
eq_(addon.name, '<NAME>')
eq_(addon.slug, 'testname')
eq_(addon.description, 'desc')
eq_(addon.summary, 'Hello!')
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_DESCRIPTIONS.id), (
"Creating a description needn't be logged.")
def test_submit_unlisted_addon(self):
self.addon.update(is_listed=False)
response = self.client.get(self.url)
assert response.status_code == 200
# Post and be redirected.
response = self.client.post(self.url, {'name': 'unlisted addon',
'slug': 'unlisted-addon',
'summary': 'summary'})
assert response.status_code == 302
assert response.url.endswith(reverse('devhub.submit.7',
args=['unlisted-addon']))
# Unlisted addons don't need much info, and their queue is chosen
# automatically on step 2, so we skip steps 4, 5 and 6. We thus have no
# more steps at that point.
assert not SubmitStep.objects.filter(addon=self.addon).exists()
addon = self.get_addon()
assert addon.name == 'unlisted addon'
assert addon.slug == 'unlisted-addon'
assert addon.summary == 'summary'
# Test add-on log activity.
log_items = ActivityLog.objects.for_addons(addon)
assert not log_items.filter(action=amo.LOG.EDIT_DESCRIPTIONS.id), (
"Creating a description needn't be logged.")
def test_submit_name_unique(self):
# Make sure name is unique.
r = self.client.post(self.url, self.get_dict(name='Cooliris'))
error = 'This name is already in use. Please choose another.'
self.assertFormError(r, 'form', 'name', error)
def test_submit_name_unique_only_for_listed(self):
"""A listed add-on can use the same name as unlisted add-ons."""
# Change the existing add-on with the 'Cooliris' name to be unlisted.
Addon.objects.get(name__localized_string='Cooliris').update(
is_listed=False)
assert get_addon_count('Cooliris') == 1
# It's allowed for the '3615' listed add-on to reuse the same name as
# the other 'Cooliris' unlisted add-on.
response = self.client.post(self.url, self.get_dict(name='Cooliris'))
assert response.status_code == 302
assert get_addon_count('Cooliris') == 2
def test_submit_unlisted_name_not_unique(self):
"""Unlisted add-ons names aren't unique."""
# Change the existing add-on with the 'Cooliris' name to be unlisted.
Addon.objects.get(name__localized_string='Cooliris').update(
is_listed=False)
# Change the '3615' add-on to be unlisted.
Addon.objects.get(pk=3615).update(is_listed=False)
assert get_addon_count('Cooliris') == 1
# It's allowed for the '3615' unlisted add-on to reuse the same name as
# the other 'Cooliris' unlisted add-on.
response = self.client.post(self.url, self.get_dict(name='Cooliris'))
assert response.status_code == 302
assert get_addon_count('Cooliris') == 2
def test_submit_name_unique_strip(self):
# Make sure we can't sneak in a name by adding a space or two.
r = self.client.post(self.url, self.get_dict(name=' Cooliris '))
error = 'This name is already in use. Please choose another.'
self.assertFormError(r, 'form', 'name', error)
def test_submit_name_unique_case(self):
# Make sure unique names aren't case sensitive.
r = self.client.post(self.url, self.get_dict(name='cooliris'))
error = 'This name is already in use. Please choose another.'
self.assertFormError(r, 'form', 'name', error)
def test_submit_name_length(self):
# Make sure the name isn't too long.
d = self.get_dict(name='a' * 51)
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
error = 'Ensure this value has at most 50 characters (it has 51).'
self.assertFormError(r, 'form', 'name', error)
def test_submit_slug_invalid(self):
# Submit an invalid slug.
d = self.get_dict(slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'slug', "Enter a valid 'slug' " +
"consisting of letters, numbers, underscores or "
"hyphens.")
def test_submit_slug_required(self):
# Make sure the slug is required.
r = self.client.post(self.url, self.get_dict(slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'slug', 'This field is required.')
def test_submit_summary_required(self):
# Make sure summary is required.
r = self.client.post(self.url, self.get_dict(summary=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form', 'summary', 'This field is required.')
def test_submit_summary_length(self):
# Summary is too long.
r = self.client.post(self.url, self.get_dict(summary='a' * 251))
eq_(r.status_code, 200)
error = 'Ensure this value has at most 250 characters (it has 251).'
self.assertFormError(r, 'form', 'summary', error)
def test_submit_categories_required(self):
del self.cat_initial['categories']
r = self.client.post(self.url,
self.get_dict(cat_initial=self.cat_initial))
eq_(r.context['cat_form'].errors[0]['categories'],
['This field is required.'])
def test_submit_categories_max(self):
eq_(amo.MAX_CATEGORIES, 2)
self.cat_initial['categories'] = [22, 23, 24]
r = self.client.post(self.url,
self.get_dict(cat_initial=self.cat_initial))
eq_(r.context['cat_form'].errors[0]['categories'],
['You can have only 2 categories.'])
def test_submit_categories_add(self):
eq_([c.id for c in self.get_addon().all_categories], [22])
self.cat_initial['categories'] = [22, 23]
self.client.post(self.url, self.get_dict())
addon_cats = self.get_addon().categories.values_list('id', flat=True)
eq_(sorted(addon_cats), [22, 23])
def test_submit_categories_addandremove(self):
AddonCategory(addon=self.addon, category_id=23).save()
eq_([c.id for c in self.get_addon().all_categories], [22, 23])
self.cat_initial['categories'] = [22, 24]
self.client.post(self.url, self.get_dict(cat_initial=self.cat_initial))
category_ids_new = [c.id for c in self.get_addon().all_categories]
eq_(category_ids_new, [22, 24])
def test_submit_categories_remove(self):
c = Category.objects.get(id=23)
AddonCategory(addon=self.addon, category=c).save()
eq_([a.id for a in self.get_addon().all_categories], [22, 23])
self.cat_initial['categories'] = [22]
self.client.post(self.url, self.get_dict(cat_initial=self.cat_initial))
category_ids_new = [cat.id for cat in self.get_addon().all_categories]
eq_(category_ids_new, [22])
def test_check_version(self):
r = self.client.get(self.url)
doc = pq(r.content)
version = doc("#current_version").val()
eq_(version, self.addon.current_version.version)
class TestSubmitStep4(TestSubmitBase):
def setUp(self):
super(TestSubmitStep4, self).setUp()
SubmitStep.objects.create(addon_id=3615, step=4)
self.url = reverse('devhub.submit.4', args=['a3615'])
self.next_step = reverse('devhub.submit.5', args=['a3615'])
self.icon_upload = reverse('devhub.addons.upload_icon',
args=['a3615'])
self.preview_upload = reverse('devhub.addons.upload_preview',
args=['a3615'])
def test_get(self):
eq_(self.client.get(self.url).status_code, 200)
def test_post(self):
data = dict(icon_type='')
data_formset = self.formset_media(**data)
r = self.client.post(self.url, data_formset)
eq_(r.status_code, 302)
eq_(self.get_step().step, 5)
def formset_new_form(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['preview_form'].forms[-1])
blank.update(**kw)
return blank
def formset_media(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.formset_new_form()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def test_icon_upload_attributes(self):
doc = pq(self.client.get(self.url).content)
field = doc('input[name=icon_upload]')
eq_(field.length, 1)
eq_(sorted(field.attr('data-allowed-types').split('|')),
['image/jpeg', 'image/png'])
eq_(field.attr('data-upload-url'), self.icon_upload)
def test_edit_media_defaulticon(self):
data = dict(icon_type='')
data_formset = self.formset_media(**data)
self.client.post(self.url, data_formset)
addon = self.get_addon()
assert addon.get_icon_url(64).endswith('icons/default-64.png')
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_media_preuploadedicon(self):
data = dict(icon_type='icon/appearance')
data_formset = self.formset_media(**data)
self.client.post(self.url, data_formset)
addon = self.get_addon()
eq_('/'.join(addon.get_icon_url(64).split('/')[-2:]),
'addon-icons/appearance-64.png')
for k in data:
eq_(unicode(getattr(addon, k)), data[k])
def test_edit_media_uploadedicon(self):
with open(get_image_path('mozilla.png'), 'rb') as filehandle:
data = {'upload_image': filehandle}
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(**data)
self.client.post(self.url, data_formset)
addon = self.get_addon()
# Sad we're hardcoding /3/ here, but that's how the URLs work
_url = addon.get_icon_url(64).split('?')[0]
assert _url.endswith('addon_icons/3/%s-64.png' % addon.id)
eq_(data['icon_type'], 'image/png')
# Check that it was actually uploaded
dirname = os.path.join(user_media_path('addon_icons'),
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-32.png' % addon.id)
assert storage.exists(dest)
eq_(Image.open(storage.open(dest)).size, (32, 12))
def test_edit_media_uploadedicon_noresize(self):
with open('static/img/notifications/error.png', 'rb') as filehandle:
data = {'upload_image': filehandle}
response = self.client.post(self.icon_upload, data)
response_json = json.loads(response.content)
addon = self.get_addon()
# Now, save the form so it gets moved properly.
data = dict(icon_type='image/png',
icon_upload_hash=response_json['upload_hash'])
data_formset = self.formset_media(**data)
self.client.post(self.url, data_formset)
addon = self.get_addon()
# Sad we're hardcoding /3/ here, but that's how the URLs work
_url = addon.get_icon_url(64).split('?')[0]
assert _url.endswith('addon_icons/3/%s-64.png' % addon.id)
eq_(data['icon_type'], 'image/png')
# Check that it was actually uploaded
dirname = os.path.join(user_media_path('addon_icons'),
'%s' % (addon.id / 1000))
dest = os.path.join(dirname, '%s-64.png' % addon.id)
assert storage.exists(dest)
eq_(Image.open(storage.open(dest)).size, (48, 48))
def test_client_lied(self):
with open(get_image_path('non-animated.gif'), 'rb') as filehandle:
data = {'upload_image': filehandle}
res = self.client.post(self.preview_upload, data)
response_json = json.loads(res.content)
eq_(response_json['errors'][0], u'Images must be either PNG or JPG.')
def test_client_error_triggers_tmp_image_cleanup(self):
with open(get_image_path('non-animated.gif'), 'rb') as filehandle:
data = {'upload_image': filehandle, 'upload_type': 'preview'}
self.client.post(self.preview_upload, data)
assert not os.listdir(os.path.join(settings.TMP_PATH, 'preview'))
def test_image_animated(self):
with open(get_image_path('animated.png'), 'rb') as filehandle:
data = {'upload_image': filehandle}
res = self.client.post(self.preview_upload, data)
response_json = json.loads(res.content)
eq_(response_json['errors'][0], u'Images cannot be animated.')
def test_icon_non_animated(self):
with open(get_image_path('non-animated.png'), 'rb') as filehandle:
data = {'icon_type': 'image/png', 'icon_upload': filehandle}
data_formset = self.formset_media(**data)
res = self.client.post(self.url, data_formset)
eq_(res.status_code, 302)
eq_(self.get_step().step, 5)
class Step5TestBase(TestSubmitBase):
def setUp(self):
super(Step5TestBase, self).setUp()
SubmitStep.objects.create(addon_id=self.addon.id, step=5)
self.url = reverse('devhub.submit.5', args=['a3615'])
self.next_step = reverse('devhub.submit.6', args=['a3615'])
License.objects.create(builtin=3, on_form=True)
class TestSubmitStep5(Step5TestBase):
"""License submission."""
def test_get(self):
eq_(self.client.get(self.url).status_code, 200)
def test_set_license(self):
r = self.client.post(self.url, {'builtin': 3})
self.assert3xx(r, self.next_step)
eq_(self.get_addon().current_version.license.builtin, 3)
eq_(self.get_step().step, 6)
log_items = ActivityLog.objects.for_addons(self.get_addon())
assert not log_items.filter(action=amo.LOG.CHANGE_LICENSE.id), (
"Initial license choice:6 needn't be logged.")
def test_license_error(self):
r = self.client.post(self.url, {'builtin': 4})
eq_(r.status_code, 200)
self.assertFormError(r, 'license_form', 'builtin',
'Select a valid choice. 4 is not one of '
'the available choices.')
eq_(self.get_step().step, 5)
def test_set_eula(self):
self.get_addon().update(eula=None, privacy_policy=None)
r = self.client.post(self.url, dict(builtin=3, has_eula=True,
eula='xxx'))
self.assert3xx(r, self.next_step)
eq_(unicode(self.get_addon().eula), 'xxx')
eq_(self.get_step().step, 6)
def test_set_eula_nomsg(self):
"""
You should not get punished with a 500 for not writing your EULA...
but perhaps you should feel shame for lying to us. This test does not
test for shame.
"""
self.get_addon().update(eula=None, privacy_policy=None)
r = self.client.post(self.url, dict(builtin=3, has_eula=True))
self.assert3xx(r, self.next_step)
eq_(self.get_step().step, 6)
class TestSubmitStep6(TestSubmitBase):
def setUp(self):
super(TestSubmitStep6, self).setUp()
SubmitStep.objects.create(addon_id=3615, step=6)
self.url = reverse('devhub.submit.6', args=['a3615'])
def test_get(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_require_review_type(self):
r = self.client.post(self.url, {'dummy': 'text'})
eq_(r.status_code, 200)
self.assertFormError(r, 'review_type_form', 'review_type',
'A review type must be selected.')
def test_bad_review_type(self):
d = dict(review_type='jetsfool')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'review_type_form', 'review_type',
'Select a valid choice. jetsfool is not one of '
'the available choices.')
def test_prelim_review(self):
d = dict(review_type=amo.STATUS_UNREVIEWED)
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
eq_(self.get_addon().status, amo.STATUS_UNREVIEWED)
assert_raises(SubmitStep.DoesNotExist, self.get_step)
def test_full_review(self):
self.get_version().update(nomination=None)
d = dict(review_type=amo.STATUS_NOMINATED)
r = self.client.post(self.url, d)
eq_(r.status_code, 302)
addon = self.get_addon()
eq_(addon.status, amo.STATUS_NOMINATED)
self.assertCloseToNow(self.get_version().nomination)
assert_raises(SubmitStep.DoesNotExist, self.get_step)
def test_nomination_date_is_only_set_once(self):
# This was a regression, see bug 632191.
# Nominate:
r = self.client.post(self.url, dict(review_type=amo.STATUS_NOMINATED))
eq_(r.status_code, 302)
nomdate = datetime.now() - timedelta(days=5)
self.get_version().update(nomination=nomdate, _signal=False)
# Update something else in the addon:
self.get_addon().update(slug='foobar')
eq_(self.get_version().nomination.timetuple()[0:5],
nomdate.timetuple()[0:5])
class TestSubmitStep7(TestSubmitBase):
def setUp(self):
super(TestSubmitStep7, self).setUp()
self.url = reverse('devhub.submit.7', args=[self.addon.slug])
@mock.patch.object(settings, 'SITE_URL', 'http://b.ro')
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_welcome_email_for_newbies(self, send_welcome_email_mock):
self.client.get(self.url)
context = {
'app': unicode(amo.FIREFOX.pretty),
'detail_url': 'http://b.ro/en-US/firefox/addon/a3615/',
'version_url': 'http://b.ro/en-US/developers/addon/a3615/versions',
'edit_url': 'http://b.ro/en-US/developers/addon/a3615/edit',
'full_review': False,
}
send_welcome_email_mock.assert_called_with(
self.addon.id, ['<EMAIL>'], context)
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay')
def test_no_welcome_email(self, send_welcome_email_mock):
"""You already submitted an add-on? We won't spam again."""
new_addon = Addon.objects.create(type=amo.ADDON_EXTENSION,
status=amo.STATUS_NOMINATED)
new_addon.addonuser_set.create(user=self.addon.authors.all()[0])
self.client.get(self.url)
assert not send_welcome_email_mock.called
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_finish_submitting_addon(self):
eq_(self.addon.current_version.supported_platforms, [amo.PLATFORM_ALL])
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
a = doc('a#submitted-addon-url')
url = self.addon.get_url_path()
eq_(a.attr('href'), url)
eq_(a.text(), absolutify(url))
next_steps = doc('.done-next-steps li a')
# edit listing of freshly submitted add-on...
eq_(next_steps.eq(0).attr('href'), self.addon.get_dev_url())
# edit your developer profile...
eq_(next_steps.eq(1).attr('href'), self.addon.get_dev_url('profile'))
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_finish_submitting_unlisted_addon(self):
self.addon.update(is_listed=False, status=amo.STATUS_UNREVIEWED)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
# For unlisted add-ons, there's only the devhub page link displayed and
# a link to the forum page on the wait times.
content = doc('.done-next-steps')
assert len(content('a')) == 2
assert content('a').eq(0).attr('href') == self.addon.get_dev_url()
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_finish_submitting_unlisted_addon_signed(self):
self.addon.update(is_listed=False, status=amo.STATUS_PUBLIC)
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
# For unlisted addon that are already signed, show a url to the devhub
# versions page and to the addon listing.
content = doc('.addon-submission-process')
links = content('a')
assert len(links) == 2
assert links[0].attrib['href'] == reverse(
'devhub.versions.edit',
args=[self.addon.slug, self.addon.current_version.id])
assert links[1].attrib['href'] == self.addon.get_dev_url()
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_finish_submitting_platform_specific_addon(self):
# mac-only Add-on:
addon = Addon.objects.get(name__localized_string='Cooliris')
addon.addonuser_set.create(user_id=55021)
r = self.client.get(reverse('devhub.submit.7', args=[addon.slug]))
eq_(r.status_code, 200)
next_steps = pq(r.content)('.done-next-steps li a')
# upload more platform specific files...
eq_(next_steps.eq(0).attr('href'),
reverse('devhub.versions.edit',
kwargs=dict(addon_id=addon.slug,
version_id=addon.current_version.id)))
# edit listing of freshly submitted add-on...
eq_(next_steps.eq(1).attr('href'), addon.get_dev_url())
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_finish_addon_for_prelim_review(self):
self.addon.update(status=amo.STATUS_UNREVIEWED)
response = self.client.get(self.url)
eq_(response.status_code, 200)
doc = pq(response.content)
intro = doc('.addon-submission-process p').text().strip()
assert 'Preliminary Review' in intro, ('Unexpected intro: %s' % intro)
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_finish_addon_for_full_review(self):
self.addon.update(status=amo.STATUS_NOMINATED)
response = self.client.get(self.url)
eq_(response.status_code, 200)
doc = pq(response.content)
intro = doc('.addon-submission-process p').text().strip()
assert 'Full Review' in intro, ('Unexpected intro: %s' % intro)
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_incomplete_addon_no_versions(self):
self.addon.update(status=amo.STATUS_NULL)
self.addon.versions.all().delete()
r = self.client.get(self.url, follow=True)
self.assert3xx(r, self.addon.get_dev_url('versions'), 302)
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_link_to_activityfeed(self):
r = self.client.get(self.url, follow=True)
doc = pq(r.content)
eq_(doc('.done-next-steps a').eq(2).attr('href'),
reverse('devhub.feed', args=[self.addon.slug]))
@mock.patch('olympia.devhub.tasks.send_welcome_email.delay', new=mock.Mock)
def test_display_non_ascii_url(self):
u = 'フォクすけといっしょ'
self.addon.update(slug=u)
r = self.client.get(reverse('devhub.submit.7', args=[u]))
eq_(r.status_code, 200)
# The meta charset will always be utf-8.
doc = pq(r.content.decode('utf-8'))
eq_(doc('#submitted-addon-url').text(),
u'%s/en-US/firefox/addon/%s/' % (
settings.SITE_URL, u.decode('utf8')))
class TestResumeStep(TestSubmitBase):
def setUp(self):
super(TestResumeStep, self).setUp()
self.url = reverse('devhub.submit.resume', args=['a3615'])
def test_no_step_redirect(self):
r = self.client.get(self.url, follow=True)
self.assert3xx(r, self.addon.get_dev_url('versions'), 302)
def test_step_redirects(self):
SubmitStep.objects.create(addon_id=3615, step=1)
for i in xrange(3, 7):
SubmitStep.objects.filter(addon=self.get_addon()).update(step=i)
r = self.client.get(self.url, follow=True)
self.assert3xx(r, reverse('devhub.submit.%s' % i,
args=['a3615']))
def test_redirect_from_other_pages(self):
SubmitStep.objects.create(addon_id=3615, step=4)
r = self.client.get(reverse('devhub.addons.edit', args=['a3615']),
follow=True)
self.assert3xx(r, reverse('devhub.submit.4', args=['a3615']))
class TestSubmitBump(TestSubmitBase):
def setUp(self):
super(TestSubmitBump, self).setUp()
self.url = reverse('devhub.submit.bump', args=['a3615'])
def test_bump_acl(self):
r = self.client.post(self.url, {'step': 4})
eq_(r.status_code, 403)
def test_bump_submit_and_redirect(self):
assert self.client.login(username='<EMAIL>',
password='password')
r = self.client.post(self.url, {'step': 4}, follow=True)
self.assert3xx(r, reverse('devhub.submit.4', args=['a3615']))
eq_(self.get_step().step, 4)
class TestSubmitSteps(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestSubmitSteps, self).setUp()
assert self.client.login(username='<EMAIL>', password='password')
self.user = UserProfile.objects.get(email='<EMAIL>')
def assert_linked(self, doc, numbers):
"""Check that the nth <li> in the steps list is a link."""
lis = doc('.submit-addon-progress li')
eq_(len(lis), 7)
for idx, li in enumerate(lis):
links = pq(li)('a')
if (idx + 1) in numbers:
eq_(len(links), 1)
else:
eq_(len(links), 0)
def assert_highlight(self, doc, num):
"""Check that the nth <li> is marked as .current."""
lis = doc('.submit-addon-progress li')
assert pq(lis[num - 1]).hasClass('current')
eq_(len(pq('.current', lis)), 1)
def test_step_1(self):
self.user.update(read_dev_agreement=None)
r = self.client.get(reverse('devhub.submit.1'))
eq_(r.status_code, 200)
def test_on_step_6(self):
# Hitting the step we're supposed to be on is a 200.
SubmitStep.objects.create(addon_id=3615, step=6)
r = self.client.get(reverse('devhub.submit.6',
args=['a3615']))
eq_(r.status_code, 200)
def test_skip_step_6(self):
# We get bounced back to step 3.
SubmitStep.objects.create(addon_id=3615, step=3)
r = self.client.get(reverse('devhub.submit.6',
args=['a3615']), follow=True)
self.assert3xx(r, reverse('devhub.submit.3', args=['a3615']))
def test_all_done(self):
# There's no SubmitStep, so we must be done.
r = self.client.get(reverse('devhub.submit.6',
args=['a3615']), follow=True)
self.assert3xx(r, reverse('devhub.submit.7', args=['a3615']))
def test_menu_step_1(self):
self.user.update(read_dev_agreement=None)
doc = pq(self.client.get(reverse('devhub.submit.1')).content)
self.assert_linked(doc, [1])
self.assert_highlight(doc, 1)
def test_menu_step_2(self):
self.client.post(reverse('devhub.submit.1'))
doc = pq(self.client.get(reverse('devhub.submit.2')).content)
self.assert_linked(doc, [1, 2])
self.assert_highlight(doc, 2)
def test_menu_step_3(self):
SubmitStep.objects.create(addon_id=3615, step=3)
url = reverse('devhub.submit.3', args=['a3615'])
doc = pq(self.client.get(url).content)
self.assert_linked(doc, [3])
self.assert_highlight(doc, 3)
def test_menu_step_3_from_6(self):
SubmitStep.objects.create(addon_id=3615, step=6)
url = reverse('devhub.submit.3', args=['a3615'])
doc = pq(self.client.get(url).content)
self.assert_linked(doc, [3, 4, 5, 6])
self.assert_highlight(doc, 3)
def test_menu_step_6(self):
SubmitStep.objects.create(addon_id=3615, step=6)
url = reverse('devhub.submit.6', args=['a3615'])
doc = pq(self.client.get(url).content)
self.assert_linked(doc, [3, 4, 5, 6])
self.assert_highlight(doc, 6)
def test_menu_step_7(self):
url = reverse('devhub.submit.7', args=['a3615'])
doc = pq(self.client.get(url).content)
self.assert_linked(doc, [])
self.assert_highlight(doc, 7)
def test_menu_step_7_unlisted(self):
SubmitStep.objects.create(addon_id=3615, step=7)
Addon.objects.get(pk=3615).update(is_listed=False)
url = reverse('devhub.submit.7', args=['a3615'])
doc = pq(self.client.get(url).content)
self.assert_linked(doc, []) # Last step: no previous step linked.
# Skipped from step 3 to 7, as unlisted add-ons don't need listing
# information. Thus none of the steps from 4 to 6 should be there.
# For reference, the steps that are with the "listed" class (instead of
# "all") aren't displayed.
assert len(doc('.submit-addon-progress li.all')) == 4
# The step 7 is thus the 4th visible in the list.
self.assert_highlight(doc, 7) # Current step is still the 7th.
class TestUpload(BaseUploadTest):
fixtures = ['base/users']
def setUp(self):
super(TestUpload, self).setUp()
assert self.client.login(username='<EMAIL>',
password='password')
self.url = reverse('devhub.upload')
self.image_path = get_image_path('animated.png')
def post(self):
# Has to be a binary, non xpi file.
data = open(self.image_path, 'rb')
return self.client.post(self.url, {'upload': data})
def test_login_required(self):
self.client.logout()
r = self.post()
eq_(r.status_code, 302)
def test_create_fileupload(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
assert 'animated.png' in upload.name
data = open(self.image_path, 'rb').read()
eq_(storage.open(upload.path).read(), data)
def test_fileupload_user(self):
self.client.login(username='<EMAIL>', password='password')
self.post()
user = UserProfile.objects.get(email='<EMAIL>')
eq_(FileUpload.objects.get().user, user)
@attr('validator')
def test_fileupload_validation(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
assert upload.validation
validation = json.loads(upload.validation)
eq_(validation['success'], False)
# The current interface depends on this JSON structure:
eq_(validation['errors'], 1)
eq_(validation['warnings'], 0)
assert len(validation['messages'])
msg = validation['messages'][0]
assert 'uid' in msg, "Unexpected: %r" % msg
eq_(msg['type'], u'error')
eq_(msg['message'], u'The package is not of a recognized type.')
assert not msg['description'], 'Found unexpected description.'
def test_redirect(self):
r = self.post()
upload = FileUpload.objects.get()
url = reverse('devhub.upload_detail', args=[upload.uuid, 'json'])
self.assert3xx(r, url)
@mock.patch('validator.validate.validate')
def test_upload_unlisted_addon(self, validate_mock):
"""Unlisted addons are validated as "self hosted" addons."""
validate_mock.return_value = json.dumps(amo.VALIDATOR_SKELETON_RESULTS)
self.url = reverse('devhub.upload_unlisted')
self.post()
# Make sure it was called with listed=False.
assert not validate_mock.call_args[1]['listed']
class TestUploadDetail(BaseUploadTest):
fixtures = ['base/appversion', 'base/users']
def setUp(self):
super(TestUploadDetail, self).setUp()
assert self.client.login(username='<EMAIL>',
password='password')
def post(self):
# Has to be a binary, non xpi file.
data = open(get_image_path('animated.png'), 'rb')
return self.client.post(reverse('devhub.upload'), {'upload': data})
def validation_ok(self):
return {
'errors': 0,
'success': True,
'warnings': 0,
'notices': 0,
'signing_summary': {'trivial': 1, 'low': 0, 'medium': 0,
'high': 0},
'passed_auto_validation': 1,
'message_tree': {},
'messages': [],
'rejected': False,
'metadata': {}}
def upload_file(self, file):
addon = os.path.join(
settings.ROOT, 'src', 'olympia', 'devhub', 'tests', 'addons', file)
with open(addon, 'rb') as f:
r = self.client.post(reverse('devhub.upload'),
{'upload': f})
eq_(r.status_code, 302)
@attr('validator')
def test_detail_json(self):
self.post()
upload = FileUpload.objects.get()
r = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid, 'json']))
eq_(r.status_code, 200)
data = json.loads(r.content)
assert data['validation']['errors'] == 2
eq_(data['url'],
reverse('devhub.upload_detail', args=[upload.uuid, 'json']))
eq_(data['full_report_url'],
reverse('devhub.upload_detail', args=[upload.uuid]))
# We must have tiers
assert len(data['validation']['messages'])
msg = data['validation']['messages'][0]
eq_(msg['tier'], 1)
def test_detail_view(self):
self.post()
upload = FileUpload.objects.filter().order_by('-created').first()
r = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid]))
eq_(r.status_code, 200)
doc = pq(r.content)
assert (doc('header h2').text() ==
'Validation Results for {0}_animated.png'.format(upload.uuid))
suite = doc('#addon-validator-suite')
eq_(suite.attr('data-validateurl'),
reverse('devhub.standalone_upload_detail', args=[upload.uuid]))
@mock.patch('olympia.devhub.tasks.run_validator')
def check_excluded_platforms(self, xpi, platforms, v):
v.return_value = json.dumps(self.validation_ok())
self.upload_file(xpi)
upload = FileUpload.objects.get()
r = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid, 'json']))
eq_(r.status_code, 200)
data = json.loads(r.content)
eq_(sorted(data['platforms_to_exclude']), sorted(platforms))
def test_multi_app_addon_can_have_all_platforms(self):
self.check_excluded_platforms('mobile-2.9.10-fx+fn.xpi', [])
def test_mobile_excludes_desktop_platforms(self):
self.check_excluded_platforms('mobile-0.1-fn.xpi', [
str(p) for p in amo.DESKTOP_PLATFORMS])
def test_android_excludes_desktop_platforms(self):
# Test native Fennec.
self.check_excluded_platforms('android-phone.xpi', [
str(p) for p in amo.DESKTOP_PLATFORMS])
def test_search_tool_excludes_all_platforms(self):
self.check_excluded_platforms('searchgeek-20090701.xml', [
str(p) for p in amo.SUPPORTED_PLATFORMS])
def test_desktop_excludes_mobile(self):
self.check_excluded_platforms('desktop.xpi', [
str(p) for p in amo.MOBILE_PLATFORMS])
@mock.patch('olympia.devhub.tasks.run_validator')
@mock.patch.object(waffle, 'flag_is_active')
def test_unparsable_xpi(self, flag_is_active, v):
flag_is_active.return_value = True
v.return_value = json.dumps(self.validation_ok())
self.upload_file('unopenable.xpi')
upload = FileUpload.objects.get()
r = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid, 'json']))
data = json.loads(r.content)
eq_([(m['message'], m.get('fatal', False))
for m in data['validation']['messages']],
[(u'Could not parse the manifest file.', True)])
@mock.patch('olympia.devhub.tasks.run_validator')
def test_experiment_xpi_allowed(self, mock_validator):
user = UserProfile.objects.get(email='<EMAIL>')
self.grant_permission(user, 'Experiments:submit')
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file('../../../files/fixtures/files/experiment.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid, 'json']))
data = json.loads(response.content)
assert data['validation']['messages'] == []
@mock.patch('olympia.devhub.tasks.run_validator')
def test_experiment_xpi_not_allowed(self, mock_validator):
mock_validator.return_value = json.dumps(self.validation_ok())
self.upload_file('../../../files/fixtures/files/experiment.xpi')
upload = FileUpload.objects.get()
response = self.client.get(reverse('devhub.upload_detail',
args=[upload.uuid, 'json']))
data = json.loads(response.content)
assert data['validation']['messages'] == [
{u'tier': 1, u'message': u'You cannot submit this type of add-on',
u'fatal': True, u'type': u'error'}]
def assert_json_error(request, field, msg):
eq_(request.status_code, 400)
eq_(request['Content-Type'], 'application/json')
field = '__all__' if field is None else field
content = json.loads(request.content)
assert field in content, '%r not in %r' % (field, content)
eq_(content[field], [msg])
def assert_json_field(request, field, msg):
eq_(request.status_code, 200)
eq_(request['Content-Type'], 'application/json')
content = json.loads(request.content)
assert field in content, '%r not in %r' % (field, content)
eq_(content[field], msg)
class UploadTest(BaseUploadTest, TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(UploadTest, self).setUp()
self.upload = self.get_upload('extension.xpi')
self.addon = Addon.objects.get(id=3615)
self.version = self.addon.current_version
self.addon.update(guid='guid@xpi')
assert self.client.login(username='<EMAIL>', password='password')
class TestQueuePosition(UploadTest):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestQueuePosition, self).setUp()
self.url = reverse('devhub.versions.add_file',
args=[self.addon.slug, self.version.id])
self.edit_url = reverse('devhub.versions.edit',
args=[self.addon.slug, self.version.id])
version_files = self.version.files.all()[0]
version_files.platform = amo.PLATFORM_LINUX.id
version_files.save()
def test_not_in_queue(self):
r = self.client.get(self.addon.get_dev_url('versions'))
eq_(self.addon.status, amo.STATUS_PUBLIC)
eq_(pq(r.content)('.version-status-actions .dark').length, 0)
def test_in_queue(self):
statuses = [(amo.STATUS_NOMINATED, amo.STATUS_UNREVIEWED),
(amo.STATUS_PUBLIC, amo.STATUS_UNREVIEWED),
(amo.STATUS_LITE, amo.STATUS_UNREVIEWED)]
for addon_status in statuses:
self.addon.status = addon_status[0]
self.addon.save()
file = self.addon.latest_version.files.all()[0]
file.status = addon_status[1]
file.save()
r = self.client.get(self.addon.get_dev_url('versions'))
doc = pq(r.content)
span = doc('.queue-position')
assert span.length
assert "Queue Position: 1 of 1" in span.text()
class TestVersionAddFile(UploadTest):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestVersionAddFile, self).setUp()
self.version.update(version='0.1')
self.url = reverse('devhub.versions.add_file',
args=[self.addon.slug, self.version.id])
self.edit_url = reverse('devhub.versions.edit',
args=[self.addon.slug, self.version.id])
version_files = self.version.files.all()[0]
version_files.platform = amo.PLATFORM_LINUX.id
version_files.save()
def make_mobile(self):
for a in self.version.apps.all():
a.application = amo.ANDROID.id
a.save()
def post(self, platform=amo.PLATFORM_MAC, source=None, beta=False):
return self.client.post(self.url, dict(upload=self.upload.uuid,
platform=platform.id,
source=source, beta=beta))
def test_guid_matches(self):
self.addon.update(guid='something.different')
r = self.post()
assert_json_error(r, None, "Add-on ID doesn't match add-on.")
def test_version_matches(self):
self.version.update(version='2.0')
r = self.post()
assert_json_error(r, None, "Version doesn't match")
def test_delete_button_enabled(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
r = self.client.get(self.edit_url)
doc = pq(r.content)('#file-list')
eq_(doc.find('a.remove').length, 1)
eq_(doc.find('span.remove.tooltip').length, 0)
def test_delete_button_disabled(self):
r = self.client.get(self.edit_url)
doc = pq(r.content)('#file-list')
eq_(doc.find('a.remove').length, 0)
eq_(doc.find('span.remove.tooltip').length, 1)
tip = doc.find('span.remove.tooltip')
assert "You cannot remove an individual file" in tip.attr('title')
def test_delete_button_multiple(self):
file = self.addon.current_version.files.all()[0]
file.pk = None
file.save()
cases = [(amo.STATUS_UNREVIEWED, amo.STATUS_UNREVIEWED, True),
(amo.STATUS_DISABLED, amo.STATUS_UNREVIEWED, False)]
for c in cases:
version_files = self.addon.current_version.files.all()
version_files[0].update(status=c[0])
version_files[1].update(status=c[1])
r = self.client.get(self.edit_url)
doc = pq(r.content)('#file-list')
assert (doc.find('a.remove').length > 0) == c[2]
assert not (doc.find('span.remove').length > 0) == c[2]
if not c[2]:
tip = doc.find('span.remove.tooltip')
assert "You cannot remove an individual" in tip.attr('title')
def test_delete_submit_disabled(self):
file_id = self.addon.current_version.files.all()[0].id
platform = amo.PLATFORM_MAC.id
form = {'DELETE': 'checked', 'id': file_id, 'platform': platform}
data = formset(form, platform=platform, upload=self.upload.uuid,
initial_count=1, prefix='files')
r = self.client.post(self.edit_url, data)
doc = pq(r.content)
assert "You cannot delete a file once" in doc('.errorlist li').text()
def test_delete_submit_enabled(self):
version = self.addon.current_version
version.files.all()[0].update(status=amo.STATUS_UNREVIEWED)
file_id = self.addon.current_version.files.all()[0].id
platform = amo.PLATFORM_MAC.id
form = {'DELETE': 'checked', 'id': file_id, 'platform': platform}
data = formset(form, platform=platform, upload=self.upload.uuid,
initial_count=1, prefix='files')
data.update(formset(total_count=1, initial_count=1))
r = self.client.post(self.edit_url, data)
doc = pq(r.content)
eq_(doc('.errorlist li').length, 0)
def test_platform_limits(self):
r = self.post(platform=amo.PLATFORM_BSD)
assert_json_error(r, 'platform',
'Select a valid choice. That choice is not one of '
'the available choices.')
def test_platform_choices(self):
r = self.client.get(self.edit_url)
form = r.context['new_file_form']
platform = self.version.files.get().platform
choices = form.fields['platform'].choices
# User cannot upload existing platforms:
assert platform not in dict(choices), choices
# User cannot upload platform=ALL when platform files exist.
assert amo.PLATFORM_ALL.id not in dict(choices), choices
def test_platform_choices_when_no_files(self):
all_choices = self.version.compatible_platforms().values()
self.version.files.all().delete()
url = reverse('devhub.versions.edit',
args=[self.addon.slug, self.version.id])
r = self.client.get(url)
form = r.context['new_file_form']
eq_(sorted(dict(form.fields['platform'].choices).keys()),
sorted([p.id for p in all_choices]))
def test_platform_choices_when_mobile(self):
self.make_mobile()
self.version.files.all().delete()
r = self.client.get(self.edit_url)
form = r.context['new_file_form']
eq_(sorted([unicode(c[1]) for c in form.fields['platform'].choices]),
sorted([unicode(p.name) for p in amo.MOBILE_PLATFORMS.values()]))
def test_type_matches(self):
self.addon.update(type=amo.ADDON_THEME)
r = self.post()
assert_json_error(r, None, "<em:type> doesn't match add-on")
def test_file_platform(self):
# Check that we're creating a new file with the requested platform.
qs = self.version.files
eq_(len(qs.all()), 1)
assert not qs.filter(platform=amo.PLATFORM_MAC.id)
self.post()
eq_(len(qs.all()), 2)
assert qs.get(platform=amo.PLATFORM_MAC.id)
def test_upload_not_found(self):
r = self.client.post(self.url, dict(upload='xxx',
platform=amo.PLATFORM_MAC.id))
assert_json_error(r, 'upload',
'There was an error with your upload. Please try '
'again.')
@mock.patch('olympia.versions.models.Version.is_allowed_upload')
def test_cant_upload(self, allowed):
"""Test that if is_allowed_upload fails, the upload will fail."""
allowed.return_value = False
res = self.post()
assert_json_error(res, '__all__',
'You cannot upload any more files for this version.')
def test_success_html(self):
r = self.post()
eq_(r.status_code, 200)
new_file = self.version.files.get(platform=amo.PLATFORM_MAC.id)
eq_(r.context['form'].instance, new_file)
def test_show_item_history(self):
version = self.addon.current_version
user = UserProfile.objects.get(email='<EMAIL>')
details = {'comments': 'yo', 'files': [version.files.all()[0].id]}
amo.log(amo.LOG.APPROVE_VERSION, self.addon,
self.addon.current_version, user=user, created=datetime.now(),
details=details)
doc = pq(self.client.get(self.edit_url).content)
appr = doc('#approval_status')
eq_(appr.length, 1)
eq_(appr.find('strong').eq(0).text(), "File (Linux)")
eq_(appr.find('.version-comments').length, 1)
comment = appr.find('.version-comments').eq(0)
eq_(comment.find('strong a').text(), 'Delicious Bookmarks Version 0.1')
eq_(comment.find('pre.email_comment').length, 1)
eq_(comment.find('pre.email_comment').text(), 'yo')
def test_show_item_history_hide_message(self):
""" Test to make sure comments not to the user aren't shown. """
version = self.addon.current_version
user = UserProfile.objects.get(email='<EMAIL>')
details = {'comments': 'yo', 'files': [version.files.all()[0].id]}
amo.log(amo.LOG.REQUEST_SUPER_REVIEW, self.addon,
self.addon.current_version, user=user, created=datetime.now(),
details=details)
doc = pq(self.client.get(self.edit_url).content)
comment = doc('#approval_status').find('.version-comments').eq(0)
eq_(comment.find('pre.email_comment').length, 0)
def test_show_item_history_multiple(self):
version = self.addon.current_version
user = UserProfile.objects.get(email='<EMAIL>')
details = {'comments': 'yo', 'files': [version.files.all()[0].id]}
amo.log(amo.LOG.APPROVE_VERSION, self.addon,
self.addon.current_version, user=user, created=datetime.now(),
details=details)
amo.log(amo.LOG.REQUEST_SUPER_REVIEW, self.addon,
self.addon.current_version, user=user, created=datetime.now(),
details=details)
doc = pq(self.client.get(self.edit_url).content)
comments = doc('#approval_status').find('.version-comments')
eq_(comments.length, 2)
def test_with_source(self):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=".zip", dir=tdir)
source.write('a' * (2 ** 21))
source.seek(0)
response = self.post(source=source)
eq_(response.status_code, 200)
assert self.addon.versions.get(pk=self.addon.current_version.pk).source
assert Addon.objects.get(pk=self.addon.pk).admin_review
def test_with_bad_source_format(self):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=".exe", dir=tdir)
source.write('a' * (2 ** 21))
source.seek(0)
response = self.post(source=source)
eq_(response.status_code, 400)
assert 'source' in json.loads(response.content)
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_sideload_fail_validation(self, mock_sign_file):
"""Sideloadable unlisted addons are also auto signed/reviewed."""
assert self.addon.status == amo.STATUS_PUBLIC # Fully reviewed.
self.addon.update(is_listed=False, trusted=False)
# Make sure the file has validation warnings or errors.
self.upload.update(
validation='{"notices": 2, "errors": 0, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 1,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 1}')
self.post()
file_ = File.objects.latest()
# Status is changed to fully reviewed and the file is signed.
assert self.addon.status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_PUBLIC
assert mock_sign_file.called
# There is a log for that unlisted file signature (with failed
# validation).
log = ActivityLog.objects.order_by('pk').last()
expected = amo.LOG.UNLISTED_SIDELOAD_SIGNED_VALIDATION_FAILED.id
assert log.action == expected
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_sideload_pass_validation(self, mock_sign_file):
"""Sideloadable unlisted addons are also auto signed/reviewed."""
assert self.addon.status == amo.STATUS_PUBLIC # Fully reviewed.
self.addon.update(is_listed=False, trusted=False)
# Make sure the file has no validation signing related messages.
self.upload.update(
validation='{"notices": 2, "errors": 0, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 0,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 1}')
self.post()
file_ = File.objects.latest()
# Status is changed to fully reviewed and the file is signed.
assert self.addon.status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_PUBLIC
assert mock_sign_file.called
# There is a log for that unlisted file signature (with failed
# validation).
log = ActivityLog.objects.order_by('pk').last()
expected = amo.LOG.UNLISTED_SIDELOAD_SIGNED_VALIDATION_PASSED.id
assert log.action == expected
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_fail_validation(self, mock_sign_file):
"""Files that fail validation are also auto signed/reviewed."""
self.addon.update(
is_listed=False, status=amo.STATUS_LITE, trusted=False)
assert self.addon.status == amo.STATUS_LITE # Preliminary reviewed.
# Make sure the file has validation warnings or errors.
self.upload.update(
validation='{"notices": 2, "errors": 0, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 1,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 1}')
self.post()
file_ = File.objects.latest()
# Status is changed to preliminary reviewed and the file is signed.
assert self.addon.status == amo.STATUS_LITE
assert file_.status == amo.STATUS_LITE
assert mock_sign_file.called
# There is a log for that unlisted file signature (with failed
# validation).
log = ActivityLog.objects.order_by('pk').last()
assert log.action == amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_pass_validation(self, mock_sign_file):
"""Files that pass validation are automatically signed/reviewed."""
self.addon.update(
is_listed=False, status=amo.STATUS_LITE, trusted=False)
# Make sure the file has no validation signing related messages.
self.upload.update(
validation='{"notices": 2, "errors": 0, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 0,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 1}')
assert self.addon.status == amo.STATUS_LITE # Preliminary reviewed.
self.post()
file_ = File.objects.latest()
# Status is changed to preliminary reviewed and the file is signed.
assert self.addon.status == amo.STATUS_LITE
assert file_.status == amo.STATUS_LITE
assert mock_sign_file.called
# There is a log for that unlisted file signature (with passed
# validation).
log = ActivityLog.objects.order_by('pk').last()
assert log.action == amo.LOG.UNLISTED_SIGNED_VALIDATION_PASSED.id
@mock.patch('olympia.devhub.views.sign_file')
def test_beta_addon_pass_validation(self, mock_sign_file):
"""Beta files that pass validation are automatically
signed/reviewed."""
# Make sure the file has no validation signing related messages.
self.upload.update(
validation='{"notices": 2, "errors": 0, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 0,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 1}')
assert self.addon.status == amo.STATUS_PUBLIC
self.post(beta=True)
file_ = File.objects.latest()
# Addon status didn't change and the file is signed.
assert self.addon.reload().status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_BETA
assert mock_sign_file.called
class TestUploadErrors(UploadTest):
fixtures = ['base/users', 'base/addon_3615']
validator_success = json.dumps({
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {},
})
def xpi(self):
return open(os.path.join(os.path.dirname(files.__file__),
'fixtures', 'files',
'delicious_bookmarks-2.1.106-fx.xpi'),
'rb')
@mock.patch.object(waffle, 'flag_is_active', return_value=True)
@mock.patch('olympia.devhub.tasks.validate')
@mock.patch('olympia.devhub.tasks.run_validator')
def test_version_upload(self, run_validator, validate_, flag_is_active):
# Load the versions page:
res = self.client.get(self.addon.get_dev_url('versions'))
eq_(res.status_code, 200)
doc = pq(res.content)
# javascript: upload file:
upload_url = doc('#upload-addon').attr('data-upload-url')
with self.xpi() as f:
res = self.client.post(upload_url, {'upload': f}, follow=True)
data = json.loads(res.content)
poll_url = data['url']
upload = FileUpload.objects.get(uuid=data['upload'])
# Check that `tasks.validate` has been called with the expected upload.
validate_.assert_called_with(upload, listed=True)
# Poll and check that we are still pending validation.
data = json.loads(self.client.get(poll_url).content)
assert data.get('validation') == ''
# Run the actual validation task which was delayed by the mock.
run_validator.return_value = self.validator_success
validate(upload, listed=True)
# And poll to see that we now have the expected validation results.
data = json.loads(self.client.get(poll_url).content)
assert data['validation']
assert not data['validation']['messages'], \
'Unexpected validation errors: %s' % data['validation']['messages']
@mock.patch.object(waffle, 'flag_is_active', return_value=True)
@mock.patch('olympia.devhub.tasks.validate')
@mock.patch('olympia.devhub.tasks.run_validator')
def test_dupe_xpi(self, run_validator, validate_, flag_is_active):
# Submit a new addon:
self.client.post(reverse('devhub.submit.1')) # set cookie
res = self.client.get(reverse('devhub.submit.2'))
eq_(res.status_code, 200)
doc = pq(res.content)
# javascript: upload file:
upload_url = doc('#upload-addon').attr('data-upload-url')
with self.xpi() as f:
res = self.client.post(upload_url, {'upload': f}, follow=True)
data = json.loads(res.content)
poll_url = data['url']
upload = FileUpload.objects.get(uuid=data['upload'])
# Check that `tasks.validate` has been called with the expected upload.
validate_.assert_called_with(upload, listed=True)
# Poll and check that we are still pending validation.
data = json.loads(self.client.get(poll_url).content)
assert data.get('validation') == ''
# Run the actual validation task which was delayed by the mock.
run_validator.return_value = self.validator_success
validate(upload, listed=True)
# And poll to see that we now have the expected validation results.
data = json.loads(self.client.get(poll_url).content)
messages = data['validation']['messages']
assert len(messages) == 1
assert messages[0]['message'] == u'Duplicate add-on ID found.'
def test_dupe_xpi_unlisted_addon(self):
"""Submitting an xpi with the same UUID as an unlisted addon."""
self.addon.update(is_listed=False)
self.test_dupe_xpi()
class AddVersionTest(UploadTest):
def post(self, supported_platforms=[amo.PLATFORM_MAC],
override_validation=False, expected_status=200, source=None,
beta=False):
d = dict(upload=self.upload.uuid, source=source,
supported_platforms=[p.id for p in supported_platforms],
admin_override_validation=override_validation, beta=beta)
r = self.client.post(self.url, d)
eq_(r.status_code, expected_status)
return r
def setUp(self):
super(AddVersionTest, self).setUp()
self.url = reverse('devhub.versions.add', args=[self.addon.slug])
class TestAddVersion(AddVersionTest):
def test_unique_version_num(self):
self.version.update(version='0.1')
r = self.post(expected_status=400)
assert_json_error(r, None, 'Version 0.1 already exists')
def test_same_version_if_previous_is_rejected(self):
# We can have several times the same version number, if the previous
# versions have been disabled/rejected.
self.version.update(version='0.1', approvalnotes='approval notes')
self.version.releasenotes = 'release notes'
self.version.save()
self.version.files.update(status=amo.STATUS_DISABLED)
self.post(expected_status=200)
self.version.reload()
version = Version.objects.latest()
ok_(version.pk != self.version.pk)
eq_(version.version, self.version.version)
# We reuse the release and approval notes from the last rejected
# version with the same version number.
eq_(version.releasenotes, self.version.releasenotes)
eq_(version.approvalnotes, self.version.approvalnotes)
def test_success(self):
r = self.post()
version = self.addon.versions.get(version='0.1')
assert_json_field(r, 'url',
reverse('devhub.versions.edit',
args=[self.addon.slug, version.id]))
def test_public(self):
self.post()
fle = File.objects.latest()
eq_(fle.status, amo.STATUS_PUBLIC)
def test_not_public(self):
self.addon.update(trusted=False)
self.post()
fle = File.objects.latest()
assert_not_equal(fle.status, amo.STATUS_PUBLIC)
def test_multiple_platforms(self):
r = self.post(supported_platforms=[amo.PLATFORM_MAC,
amo.PLATFORM_LINUX])
eq_(r.status_code, 200)
version = self.addon.versions.get(version='0.1')
eq_(len(version.all_files), 2)
@mock.patch('olympia.devhub.views.auto_sign_file')
def test_multiple_platforms_unlisted_addon(self, mock_auto_sign_file):
self.addon.update(is_listed=False)
r = self.post(supported_platforms=[amo.PLATFORM_MAC,
amo.PLATFORM_LINUX])
eq_(r.status_code, 200)
version = self.addon.versions.get(version='0.1')
eq_(len(version.all_files), 2)
mock_auto_sign_file.assert_has_calls(
[mock.call(f, is_beta=False) for f in version.all_files])
def test_with_source(self):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=".zip", dir=tdir)
source.write('a' * (2 ** 21))
source.seek(0)
response = self.post(source=source)
eq_(response.status_code, 200)
assert self.addon.versions.get(version='0.1').source
assert Addon.objects.get(pk=self.addon.pk).admin_review
def test_with_bad_source_format(self):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=".exe", dir=tdir)
source.write('a' * (2 ** 21))
source.seek(0)
response = self.post(source=source, expected_status=400)
assert 'source' in json.loads(response.content)
def test_force_beta(self):
self.post(beta=True)
f = File.objects.latest()
assert f.status == amo.STATUS_BETA
def test_no_force_beta_for_unlisted_addons(self):
"""No beta version for unlisted addons."""
self.addon.update(is_listed=False)
self.post(beta=True)
f = File.objects.latest()
assert f.status != amo.STATUS_BETA
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_sideload_fail_validation(self, mock_sign_file):
"""Sideloadable unlisted addons also get auto signed/reviewed."""
assert self.addon.status == amo.STATUS_PUBLIC # Fully reviewed.
self.addon.update(is_listed=False, trusted=False)
# Make sure the file has validation warnings or errors.
self.upload.update(
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
"signing_summary": {"trivial": 1, "low": 1,
"medium": 0, "high": 0},
"passed_auto_validation": 0}))
self.post()
file_ = File.objects.latest()
# Status is changed to fully reviewed and the file is signed.
assert self.addon.status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_PUBLIC
assert mock_sign_file.called
# There is a log for that unlisted file signature (with failed
# validation).
log = ActivityLog.objects.order_by('pk').last()
expected = amo.LOG.UNLISTED_SIDELOAD_SIGNED_VALIDATION_FAILED.id
assert log.action == expected
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_sideload_pass_validation(self, mock_sign_file):
"""Sideloadable unlisted addons also get auto signed/reviewed."""
assert self.addon.status == amo.STATUS_PUBLIC # Fully reviewed.
self.addon.update(is_listed=False, trusted=False)
# Make sure the file has no validation warnings nor errors.
self.upload.update(
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
"signing_summary": {"trivial": 1, "low": 0,
"medium": 0, "high": 0},
"passed_auto_validation": 1}))
self.post()
file_ = File.objects.latest()
# Status is changed to fully reviewed and the file is signed.
assert self.addon.status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_PUBLIC
assert mock_sign_file.called
# There is a log for that unlisted file signature (with failed
# validation).
log = ActivityLog.objects.order_by('pk').last()
expected = amo.LOG.UNLISTED_SIDELOAD_SIGNED_VALIDATION_PASSED.id
assert log.action == expected
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_fail_validation(self, mock_sign_file):
"""Files that fail validation are also auto signed/reviewed."""
self.addon.update(
is_listed=False, status=amo.STATUS_LITE, trusted=False)
assert self.addon.status == amo.STATUS_LITE # Preliminary reviewed.
# Make sure the file has validation warnings or errors.
self.upload.update(
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
"signing_summary": {"trivial": 1, "low": 1,
"medium": 0, "high": 0},
"passed_auto_validation": 0}))
self.post()
file_ = File.objects.latest()
# Status is changed to preliminary reviewed and the file is signed.
assert self.addon.status == amo.STATUS_LITE
assert file_.status == amo.STATUS_LITE
assert mock_sign_file.called
# There is a log for that unlisted file signature (with failed
# validation).
log = ActivityLog.objects.order_by('pk').last()
assert log.action == amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id
@mock.patch('olympia.editors.helpers.sign_file')
def test_unlisted_addon_pass_validation(self, mock_sign_file):
"""Files that pass validation are automatically signed/reviewed."""
self.addon.update(
is_listed=False, status=amo.STATUS_LITE, trusted=False)
# Make sure the file has no validation warnings nor errors.
self.upload.update(
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
"signing_summary": {"trivial": 1, "low": 0,
"medium": 0, "high": 0},
"passed_auto_validation": 1}))
assert self.addon.status == amo.STATUS_LITE # Preliminary reviewed.
self.post()
file_ = File.objects.latest()
# Status is changed to preliminary reviewed and the file is signed.
assert self.addon.status == amo.STATUS_LITE
assert file_.status == amo.STATUS_LITE
assert mock_sign_file.called
# There is a log for that unlisted file signature (with passed
# validation).
log = ActivityLog.objects.order_by('pk').last()
assert log.action == amo.LOG.UNLISTED_SIGNED_VALIDATION_PASSED.id
@mock.patch('olympia.devhub.views.sign_file')
def test_experiments_are_auto_signed(self, mock_sign_file):
"""Experiment extensions (bug 1220097) are auto-signed."""
# We're going to sign even if it has signing related errors/warnings.
self.upload = self.get_upload(
'experiment.xpi',
validation=json.dumps({
"notices": 2, "errors": 0, "messages": [],
"metadata": {}, "warnings": 1,
"signing_summary": {"trivial": 1, "low": 0,
"medium": 0, "high": 1},
"passed_auto_validation": 0}))
self.addon.update(guid='experiment@xpi', is_listed=True,
status=amo.STATUS_PUBLIC, trusted=False)
self.post()
# Make sure the file created and signed is for this addon.
assert mock_sign_file.call_count == 1
mock_sign_file_call = mock_sign_file.call_args[0]
signed_file = mock_sign_file_call[0]
assert signed_file.version.addon == self.addon
# There is a log for that beta file signature (with passed validation).
log = ActivityLog.objects.get()
assert log.action == amo.LOG.EXPERIMENT_SIGNED.id
class TestAddBetaVersion(AddVersionTest):
fixtures = ['base/users', 'base/appversion', 'base/addon_3615']
def setUp(self):
super(TestAddBetaVersion, self).setUp()
self.do_upload()
def do_upload(self):
self.upload = self.get_upload('extension-0.2b1.xpi')
def post_additional(self, version, platform=amo.PLATFORM_MAC):
url = reverse('devhub.versions.add_file',
args=[self.addon.slug, version.id])
return self.client.post(url, dict(upload=self.upload.uuid,
platform=platform.id, beta=True))
def test_add_multi_file_beta(self):
r = self.post(supported_platforms=[amo.PLATFORM_MAC], beta=True)
version = self.addon.versions.all().order_by('-id')[0]
# Make sure that the first file is beta
fle = File.objects.all().order_by('-id')[0]
eq_(fle.status, amo.STATUS_BETA)
self.do_upload()
r = self.post_additional(version, platform=amo.PLATFORM_LINUX)
eq_(r.status_code, 200)
# Make sure that the additional files are beta
fle = File.objects.all().order_by('-id')[0]
eq_(fle.status, amo.STATUS_BETA)
def test_force_not_beta(self):
self.post(beta=False)
f = File.objects.latest()
assert f.status == amo.STATUS_PUBLIC
@mock.patch('olympia.devhub.views.sign_file')
def test_listed_beta_pass_validation(self, mock_sign_file):
"""Beta files that pass validation are signed with prelim cert."""
self.addon.update(
is_listed=True, status=amo.STATUS_PUBLIC, trusted=False)
# Make sure the file has no validation warnings nor errors.
self.upload.update(
validation='{"notices": 2, "errors": 0, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 0,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 1}')
assert self.addon.status == amo.STATUS_PUBLIC # Fully reviewed.
self.post(beta=True)
file_ = File.objects.latest()
assert self.addon.reload().status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_BETA
assert mock_sign_file.called
# There is a log for that beta file signature (with passed validation).
log = ActivityLog.objects.beta_signed_events().get()
assert log.action == amo.LOG.BETA_SIGNED_VALIDATION_PASSED.id
@mock.patch('olympia.devhub.views.sign_file')
def test_listed_beta_do_not_pass_validation(self, mock_sign_file):
"""Beta files that don't pass validation should be logged."""
self.addon.update(is_listed=True, status=amo.STATUS_PUBLIC)
# Make sure the file has validation warnings.
self.upload.update(
validation='{"notices": 2, "errors": 1, "messages": [],'
' "metadata": {}, "warnings": 1,'
' "signing_summary": {"trivial": 1, "low": 1,'
' "medium": 0, "high": 0},'
' "passed_auto_validation": 0}')
assert self.addon.status == amo.STATUS_PUBLIC
self.post(beta=True)
file_ = File.objects.latest()
assert self.addon.reload().status == amo.STATUS_PUBLIC
assert file_.status == amo.STATUS_BETA
assert mock_sign_file.called
# There is a log for that beta file signature (with failed validation).
log = ActivityLog.objects.beta_signed_events().get()
assert log.action == amo.LOG.BETA_SIGNED_VALIDATION_FAILED.id
class TestAddVersionValidation(AddVersionTest):
def login_as_admin(self):
assert self.client.login(username='<EMAIL>',
password='password')
def do_upload_non_fatal(self):
validation = {
'errors': 1,
'detected_type': 'extension',
'success': False,
'warnings': 0,
'notices': 0,
'signing_summary': {'trivial': 1, 'low': 0, 'medium': 0,
'high': 0},
'passed_auto_validation': 1,
'message_tree': {},
'ending_tier': 5,
'messages': [
{'description': 'The subpackage could not be opened due to '
'issues with corruption. Ensure that the file '
'is valid.',
'type': 'error',
'id': [],
'file': 'unopenable.jar',
'tier': 2,
'message': 'Subpackage corrupt.',
'uid': '8a3d5854cf0d42e892b3122259e99445',
'compatibility_type': None}],
'metadata': {}}
self.upload = self.get_upload(
'validation-error.xpi',
validation=json.dumps(validation))
assert not self.upload.valid
def test_non_admin_validation_override_fails(self):
self.do_upload_non_fatal()
self.post(override_validation=True, expected_status=400)
def test_admin_validation_override(self):
self.login_as_admin()
self.do_upload_non_fatal()
assert not self.addon.admin_review
self.post(override_validation=True, expected_status=200)
eq_(self.addon.reload().admin_review, True)
def test_admin_validation_sans_override(self):
self.login_as_admin()
self.do_upload_non_fatal()
self.post(override_validation=False, expected_status=400)
class TestVersionXSS(UploadTest):
def test_unique_version_num(self):
# Can't use a "/" to close the tag, as we're doing a get_url_path on
# it, which uses addons.versions, which consumes up to the first "/"
# encountered.
self.version.update(
version='<script>alert("Happy XSS-Xmas");<script>')
r = self.client.get(reverse('devhub.addons'))
eq_(r.status_code, 200)
assert '<script>alert' not in r.content
assert '&lt;script&gt;alert' in r.content
class UploadAddon(object):
def post(self, supported_platforms=[amo.PLATFORM_ALL], expect_errors=False,
source=None, is_listed=True, is_sideload=False, status_code=200):
d = dict(upload=self.upload.uuid, source=source,
supported_platforms=[p.id for p in supported_platforms],
is_unlisted=not is_listed, is_sideload=is_sideload)
r = self.client.post(self.url, d, follow=True)
eq_(r.status_code, status_code)
if not expect_errors:
# Show any unexpected form errors.
if r.context and 'new_addon_form' in r.context:
eq_(r.context['new_addon_form'].errors.as_text(), '')
return r
class TestCreateAddon(BaseUploadTest, UploadAddon, TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestCreateAddon, self).setUp()
self.upload = self.get_upload('extension.xpi')
self.url = reverse('devhub.submit.2')
assert self.client.login(username='<EMAIL>',
password='password')
self.client.post(reverse('devhub.submit.1'))
def assert_json_error(self, *args):
UploadTest().assert_json_error(self, *args)
def test_unique_name(self):
addon_factory(name='xpi name')
response = self.post(expect_errors=True)
assert response.context['new_addon_form'].non_field_errors() == (
['This name is already in use. Please choose another.'])
def test_unlisted_name_not_unique(self):
"""We don't enforce name uniqueness for unlisted add-ons."""
addon_factory(name='xpi name', is_listed=False)
assert get_addon_count('xpi name') == 1
# We're not passing `expected_errors=True`, so if there was any errors
# like "This name is already in use. Please choose another one", the
# test would fail.
response = self.post()
# Kind of redundant with the `self.post()` above: we just want to make
# really sure there's no errors raised by posting an add-on with a name
# that is already used by an unlisted add-on.
assert 'new_addon_form' not in response.context
assert get_addon_count('xpi name') == 2
def test_name_not_unique_between_types(self):
"""We don't enforce name uniqueness between add-ons types."""
addon_factory(name='xpi name', type=amo.ADDON_THEME)
assert get_addon_count('xpi name') == 1
# We're not passing `expected_errors=True`, so if there was any errors
# like "This name is already in use. Please choose another one", the
# test would fail.
response = self.post()
# Kind of redundant with the `self.post()` above: we just want to make
# really sure there's no errors raised by posting an add-on with a name
# that is already used by an unlisted add-on.
assert 'new_addon_form' not in response.context
assert get_addon_count('xpi name') == 2
def test_success_listed(self):
assert Addon.objects.count() == 0
r = self.post()
addon = Addon.objects.get()
assert addon.is_listed
self.assert3xx(r, reverse('devhub.submit.3', args=[addon.slug]))
log_items = ActivityLog.objects.for_addons(addon)
assert log_items.filter(action=amo.LOG.CREATE_ADDON.id), (
'New add-on creation never logged.')
@mock.patch('olympia.editors.helpers.sign_file')
def test_success_unlisted(self, mock_sign_file):
"""Sign automatically."""
assert Addon.with_unlisted.count() == 0
# No validation errors or warning.
self.upload = self.get_upload(
'extension.xpi',
validation=json.dumps(dict(errors=0, warnings=0, notices=2,
metadata={}, messages=[],
signing_summary={
'trivial': 1, 'low': 0, 'medium': 0,
'high': 0},
passed_auto_validation=True
)))
self.post(is_listed=False)
addon = Addon.with_unlisted.get()
assert not addon.is_listed
assert addon.status == amo.STATUS_LITE # Automatic signing.
assert mock_sign_file.called
@mock.patch('olympia.editors.helpers.sign_file')
def test_success_unlisted_fail_validation(self, mock_sign_file):
assert Addon.with_unlisted.count() == 0
self.upload = self.get_upload(
'extension.xpi',
validation=json.dumps(dict(errors=0, warnings=0, notices=2,
metadata={}, messages=[],
signing_summary={
'trivial': 0, 'low': 1, 'medium': 0,
'high': 0},
passed_auto_validation=False
)))
self.post(is_listed=False)
addon = Addon.with_unlisted.get()
assert not addon.is_listed
assert addon.status == amo.STATUS_LITE # Prelim review.
assert mock_sign_file.called
@mock.patch('olympia.editors.helpers.sign_file')
def test_success_unlisted_sideload(self, mock_sign_file):
assert Addon.with_unlisted.count() == 0
self.post(is_listed=False, is_sideload=True)
addon = Addon.with_unlisted.get()
assert not addon.is_listed
# Full review for sideload addons.
assert addon.status == amo.STATUS_PUBLIC
assert mock_sign_file.called
def test_missing_platforms(self):
r = self.client.post(self.url, dict(upload=self.upload.uuid))
eq_(r.status_code, 200)
eq_(r.context['new_addon_form'].errors.as_text(),
'* supported_platforms\n * Need at least one platform.')
doc = pq(r.content)
eq_(doc('ul.errorlist').text(),
'Need at least one platform.')
def test_one_xpi_for_multiple_platforms(self):
eq_(Addon.objects.count(), 0)
r = self.post(supported_platforms=[amo.PLATFORM_MAC,
amo.PLATFORM_LINUX])
addon = Addon.objects.get()
self.assert3xx(r, reverse('devhub.submit.3', args=[addon.slug]))
eq_(sorted([f.filename for f in addon.current_version.all_files]),
[u'xpi_name-0.1-linux.xpi', u'xpi_name-0.1-mac.xpi'])
@mock.patch('olympia.devhub.views.auto_sign_file')
def test_one_xpi_for_multiple_platforms_unlisted_addon(
self, mock_auto_sign_file):
eq_(Addon.objects.count(), 0)
r = self.post(supported_platforms=[amo.PLATFORM_MAC,
amo.PLATFORM_LINUX],
is_listed=False)
addon = Addon.unfiltered.get()
self.assert3xx(r, reverse('devhub.submit.3', args=[addon.slug]))
eq_(sorted([f.filename for f in addon.current_version.all_files]),
[u'xpi_name-0.1-linux.xpi', u'xpi_name-0.1-mac.xpi'])
mock_auto_sign_file.assert_has_calls(
[mock.call(f) for f in addon.current_version.all_files])
def test_with_source(self):
tdir = temp.gettempdir()
source = temp.NamedTemporaryFile(suffix=".zip", dir=tdir)
source.write('a' * (2 ** 21))
source.seek(0)
eq_(Addon.objects.count(), 0)
r = self.post(source=source)
addon = Addon.objects.get()
self.assert3xx(r, reverse('devhub.submit.3', args=[addon.slug]))
assert addon.current_version.source
assert Addon.objects.get(pk=addon.pk).admin_review
class TestDeleteAddon(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestDeleteAddon, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = self.addon.get_dev_url('delete')
self.client.login(username='<EMAIL>', password='password')
def test_bad_password(self):
r = self.client.post(self.url, dict(slug='nope'))
self.assert3xx(r, self.addon.get_dev_url('versions'))
eq_(r.context['title'],
'URL name was incorrect. Add-on was not deleted.')
eq_(Addon.objects.count(), 1)
def test_success(self):
r = self.client.post(self.url, dict(slug='a3615'))
self.assert3xx(r, reverse('devhub.addons'))
eq_(r.context['title'], 'Add-on deleted.')
eq_(Addon.objects.count(), 0)
class TestRequestReview(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRequestReview, self).setUp()
self.addon = Addon.objects.create(type=1, name='xxx')
self.version = Version.objects.create(addon=self.addon)
self.file = File.objects.create(version=self.version,
platform=amo.PLATFORM_ALL.id)
self.redirect_url = self.addon.get_dev_url('versions')
self.lite_url = reverse('devhub.request-review',
args=[self.addon.slug, amo.STATUS_LITE])
self.public_url = reverse('devhub.request-review',
args=[self.addon.slug, amo.STATUS_PUBLIC])
assert self.client.login(username='<EMAIL>',
password='password')
def get_addon(self):
return Addon.objects.get(id=self.addon.id)
def get_version(self):
return Version.objects.get(pk=self.version.id)
def check(self, old_status, url, new_status):
self.addon.update(status=old_status)
r = self.client.post(url)
self.assert3xx(r, self.redirect_url)
eq_(self.get_addon().status, new_status)
def check_400(self, url):
r = self.client.post(url)
eq_(r.status_code, 400)
def test_404(self):
bad_url = self.public_url.replace(str(amo.STATUS_PUBLIC), '0')
eq_(self.client.post(bad_url).status_code, 404)
def test_public(self):
self.addon.update(status=amo.STATUS_PUBLIC)
self.check_400(self.lite_url)
self.check_400(self.public_url)
def test_disabled_by_user_to_lite(self):
self.addon.update(disabled_by_user=True)
self.check_400(self.lite_url)
def test_disabled_by_admin(self):
self.addon.update(status=amo.STATUS_DISABLED)
self.check_400(self.lite_url)
def test_lite_to_lite(self):
self.addon.update(status=amo.STATUS_LITE)
self.check_400(self.lite_url)
def test_lite_to_public(self):
eq_(self.version.nomination, None)
self.check(amo.STATUS_LITE, self.public_url,
amo.STATUS_LITE_AND_NOMINATED)
self.assertCloseToNow(self.get_version().nomination)
def test_purgatory_to_lite(self):
self.check(amo.STATUS_PURGATORY, self.lite_url, amo.STATUS_UNREVIEWED)
def test_purgatory_to_public(self):
eq_(self.version.nomination, None)
self.check(amo.STATUS_PURGATORY, self.public_url,
amo.STATUS_NOMINATED)
self.assertCloseToNow(self.get_version().nomination)
def test_lite_and_nominated_to_public(self):
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.check_400(self.public_url)
def test_lite_and_nominated(self):
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
self.check_400(self.lite_url)
self.check_400(self.public_url)
def test_renominate_for_full_review(self):
# When a version is rejected, the addon is disabled.
# The author must upload a new version and re-nominate.
# However, renominating the *same* version does not adjust the
# nomination date.
orig_date = datetime.now() - timedelta(days=30)
# Pretend it was nominated in the past:
self.version.update(nomination=orig_date)
self.check(amo.STATUS_NULL, self.public_url, amo.STATUS_NOMINATED)
eq_(self.get_version().nomination.timetuple()[0:5],
orig_date.timetuple()[0:5])
def test_renomination_doesnt_reset_nomination_date(self):
# Nominate:
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
# Pretend it was nominated in the past:
orig_date = datetime.now() - timedelta(days=30)
self.version.update(nomination=orig_date, _signal=False)
# Reject it:
self.addon.update(status=amo.STATUS_NULL)
# Re-nominate:
self.addon.update(status=amo.STATUS_LITE_AND_NOMINATED)
eq_(self.get_version().nomination.timetuple()[0:5],
orig_date.timetuple()[0:5])
class TestRedirects(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestRedirects, self).setUp()
self.base = reverse('devhub.index')
assert self.client.login(username='<EMAIL>',
password='password')
def test_edit(self):
url = self.base + 'addon/edit/3615'
r = self.client.get(url, follow=True)
self.assert3xx(r, reverse('devhub.addons.edit', args=['a3615']), 301)
url = self.base + 'addon/edit/3615/'
r = self.client.get(url, follow=True)
self.assert3xx(r, reverse('devhub.addons.edit', args=['a3615']), 301)
def test_status(self):
url = self.base + 'addon/status/3615'
r = self.client.get(url, follow=True)
self.assert3xx(r, reverse('devhub.addons.versions',
args=['a3615']), 301)
def test_versions(self):
url = self.base + 'versions/3615'
r = self.client.get(url, follow=True)
self.assert3xx(r, reverse('devhub.addons.versions',
args=['a3615']), 301)
class TestDocs(TestCase):
def test_doc_urls(self):
eq_('/en-US/developers/docs/', reverse('devhub.docs', args=[]))
eq_('/en-US/developers/docs/te', reverse('devhub.docs', args=['te']))
eq_('/en-US/developers/docs/te/st', reverse('devhub.docs',
args=['te/st']))
urls = [(reverse('devhub.docs', args=["getting-started"]), 301),
(reverse('devhub.docs', args=["how-to"]), 301),
(reverse('devhub.docs', args=["how-to/other-addons"]), 301),
(reverse('devhub.docs', args=["fake-page"]), 404),
(reverse('devhub.docs', args=["how-to/fake-page"]), 404),
(reverse('devhub.docs'), 301)]
index = reverse('devhub.index')
for url in urls:
r = self.client.get(url[0])
eq_(r.status_code, url[1])
if url[1] == 302: # Redirect to the index page
self.assert3xx(r, index)
class TestRemoveLocale(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestRemoveLocale, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = reverse('devhub.addons.remove-locale', args=['a3615'])
assert self.client.login(username='<EMAIL>', password='password')
def test_bad_request(self):
r = self.client.post(self.url)
eq_(r.status_code, 400)
def test_success(self):
self.addon.name = {'en-US': 'woo', 'el': 'yeah'}
self.addon.save()
self.addon.remove_locale('el')
qs = (Translation.objects.filter(localized_string__isnull=False)
.values_list('locale', flat=True))
r = self.client.post(self.url, {'locale': 'el'})
eq_(r.status_code, 200)
eq_(sorted(qs.filter(id=self.addon.name_id)), ['en-US'])
def test_delete_default_locale(self):
r = self.client.post(self.url, {'locale': self.addon.default_locale})
eq_(r.status_code, 400)
def test_remove_version_locale(self):
version = self.addon.versions.all()[0]
version.releasenotes = {'fr': 'oui'}
version.save()
self.client.post(self.url, {'locale': 'fr'})
res = self.client.get(reverse('devhub.versions.edit',
args=[self.addon.slug, version.pk]))
doc = pq(res.content)
# There's 2 fields, one for en-us, one for init.
eq_(len(doc('div.trans textarea')), 2)
class TestXssOnAddonName(amo.tests.TestXss):
def test_devhub_feed_page(self):
url = reverse('devhub.feed', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_devhub_addon_edit_page(self):
url = reverse('devhub.addons.edit', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_devhub_version_edit_page(self):
url = reverse('devhub.versions.edit', args=[self.addon.slug,
self.addon.latest_version.id])
self.assertNameAndNoXSS(url)
def test_devhub_version_list_page(self):
url = reverse('devhub.addons.versions', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
```
#### File: olympia/editors/admin.py
```python
from django.contrib import admin
from olympia.translations.helpers import truncate
from .models import CannedResponse, EventLog, ReviewerScore
class CannedResponseAdmin(admin.ModelAdmin):
def truncate_response(obj):
return truncate(obj.response, 50)
truncate_response.short_description = 'Response'
list_display = ('name', truncate_response)
list_filter = ('type',)
class EventLogAdmin(admin.ModelAdmin):
list_display = ('created', 'type', 'action', 'field', 'user',
'changed_id', 'added', 'removed', 'notes')
list_filter = ('type', 'action')
readonly_fields = list_display
date_hierarchy = 'created'
raw_id_fields = ('user',)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReviewerScoreAdmin(admin.ModelAdmin):
list_display = ('user', 'score', 'note_key', 'note', 'created')
raw_id_fields = ('user', 'addon')
fieldsets = (
(None, {
'fields': ('user', 'addon', 'score', 'note'),
}),
)
list_filter = ('note_key',)
admin.site.register(CannedResponse, CannedResponseAdmin)
admin.site.register(EventLog, EventLogAdmin)
admin.site.register(ReviewerScore, ReviewerScoreAdmin)
```
#### File: olympia/pages/tests.py
```python
from django.conf import settings
from nose.tools import eq_
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
class TestPages(TestCase):
def _check(self, url, status):
resp = self.client.get(reverse(url))
eq_(resp.status_code, status)
def test_status(self):
pages = ['pages.about', 'pages.credits', 'pages.faq',
'pages.acr_firstrun', 'pages.dev_faq', 'pages.review_guide',
'pages.sunbird']
for page in pages:
self._check(page, 200)
class TestRedirects(TestCase):
def _check(self, pages):
for old, new in pages.iteritems():
if new.startswith('http'):
r = self.client.get(old)
eq_(r['Location'], new)
else:
r = self.client.get(old, follow=True)
self.assert3xx(r, new, 301)
def test_app_pages(self):
self._check({
'/en-US/firefox/pages/compatibility_firstrun':
reverse('pages.acr_firstrun'),
'/en-US/firefox/pages/validation': settings.VALIDATION_FAQ_URL,
})
def test_nonapp_pages(self):
self._check({
'/en-US/pages/developer_faq': reverse('pages.dev_faq'),
'/en-US/pages/review_guide': reverse('pages.review_guide'),
'/en-US/pages/developer_agreement': reverse(
'devhub.docs', args=['policies/agreement']),
})
```
#### File: olympia/search/middleware.py
```python
import logging
from django.shortcuts import render
from elasticsearch import TransportError
log = logging.getLogger('z.es')
class ElasticsearchExceptionMiddleware(object):
def process_exception(self, request, exception):
if issubclass(exception.__class__, TransportError):
log.error(u'Elasticsearch error: %s' % exception)
return render(request, 'search/down.html', status=503)
```
#### File: olympia/sharing/helpers.py
```python
from jingo import register
import jinja2
from olympia import sharing
@register.inclusion_tag('sharing/sharing_widget.html')
@jinja2.contextfunction
def sharing_widget(context, obj, condensed=False):
c = dict(context.items())
services = sharing.get_services()
c.update({
'condensed': condensed,
'services': services,
'obj': obj,
})
return c
@register.inclusion_tag('sharing/sharing_box.html')
@jinja2.contextfunction
def sharing_box(context):
request = context['request']
services = sharing.get_services()
c = dict(context.items())
c.update({
'request': request,
'user': request.user,
'services': services
})
return c
```
#### File: users/tests/test_backends.py
```python
from django.contrib.auth import authenticate
from olympia.amo.tests import TestCase
class TestAmoUserBackend(TestCase):
fixtures = ['users/test_backends']
def test_success(self):
assert authenticate(username='<EMAIL>',
password='password')
def test_failure(self):
assert not authenticate(username='<EMAIL>', password='x')
```
#### File: olympia/zadmin/forms.py
```python
import os
import re
from django import forms
from django.conf import settings
from django.forms import ModelForm
from django.forms.models import modelformset_factory
from django.template import Context, Template, TemplateSyntaxError
import commonware.log
import happyforms
from piston.models import Consumer
from product_details import product_details
from tower import ugettext_lazy as _lazy
from quieter_formset.formset import BaseModelFormSet
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.urlresolvers import reverse
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import (
Collection, FeaturedCollection, MonthlyPick)
from olympia.compat.forms import CompatForm as BaseCompatForm
from olympia.files.models import File
from olympia.zadmin.models import SiteEvent, ValidationJob
LOGGER_NAME = 'z.zadmin'
log = commonware.log.getLogger(LOGGER_NAME)
class DevMailerForm(happyforms.Form):
_choices = [('eula',
'Developers who have set up EULAs for active add-ons'),
('sdk', 'Developers of active SDK add-ons'),
('all_extensions', 'All extension developers')]
recipients = forms.ChoiceField(choices=_choices, required=True)
subject = forms.CharField(widget=forms.TextInput(attrs=dict(size='100')),
required=True)
preview_only = forms.BooleanField(initial=True, required=False,
label=u'Log emails instead of sending')
message = forms.CharField(widget=forms.Textarea, required=True)
class BulkValidationForm(happyforms.ModelForm):
application = forms.ChoiceField(
label=_lazy(u'Application'),
choices=amo.APPS_CHOICES)
curr_max_version = forms.ChoiceField(
label=_lazy(u'Current Max. Version'),
choices=[('', _lazy(u'Select an application first'))])
target_version = forms.ChoiceField(
label=_lazy(u'Target Version'),
choices=[('', _lazy(u'Select an application first'))])
finish_email = forms.CharField(
required=False,
label=_lazy(u'Email when finished'))
class Meta:
model = ValidationJob
fields = ('application', 'curr_max_version', 'target_version',
'finish_email')
def __init__(self, *args, **kw):
kw.setdefault('initial', {})
kw['initial']['finish_email'] = settings.FLIGTAR
super(BulkValidationForm, self).__init__(*args, **kw)
w = self.fields['application'].widget
# Get the URL after the urlconf has loaded.
w.attrs['data-url'] = reverse('zadmin.application_versions_json')
def version_choices_for_app_id(self, app_id):
versions = AppVersion.objects.filter(application=app_id)
return [(v.id, v.version) for v in versions]
def clean_application(self):
app_id = int(self.cleaned_data['application'])
self.cleaned_data['application'] = app_id
choices = self.version_choices_for_app_id(app_id)
self.fields['target_version'].choices = choices
self.fields['curr_max_version'].choices = choices
return self.cleaned_data['application']
def _clean_appversion(self, field):
return AppVersion.objects.get(pk=int(field))
def clean_curr_max_version(self):
return self._clean_appversion(self.cleaned_data['curr_max_version'])
def clean_target_version(self):
return self._clean_appversion(self.cleaned_data['target_version'])
path = os.path.join(settings.ROOT, 'src/olympia/zadmin/templates/zadmin')
texts = {
'validation': open('%s/%s' % (path, 'validation-email.txt')).read(),
}
varname = re.compile(r'{{\s*([a-zA-Z0-9_]+)\s*}}')
class NotifyForm(happyforms.Form):
subject = forms.CharField(widget=forms.TextInput, required=True)
preview_only = forms.BooleanField(
initial=True, required=False,
label=_lazy(u'Log emails instead of sending'))
text = forms.CharField(widget=forms.Textarea, required=True)
variables = ['{{PASSING_ADDONS}}', '{{FAILING_ADDONS}}', '{{APPLICATION}}',
'{{VERSION}}']
variable_names = [varname.match(v).group(1) for v in variables]
def __init__(self, *args, **kw):
kw.setdefault('initial', {})
if 'text' in kw:
kw['initial']['text'] = texts[kw.pop('text')]
kw['initial']['subject'] = ('Add-on compatibility with '
'{{APPLICATION}} {{VERSION}}')
super(NotifyForm, self).__init__(*args, **kw)
def check_template(self, data):
try:
Template(data).render(Context({}))
except TemplateSyntaxError, err:
raise forms.ValidationError(err)
return data
def clean_text(self):
return self.check_template(self.cleaned_data['text'])
def clean_subject(self):
return self.check_template(self.cleaned_data['subject'])
class FeaturedCollectionForm(happyforms.ModelForm):
LOCALES = (('', u'(Default Locale)'),) + tuple(
(i, product_details.languages[i]['native'])
for i in settings.AMO_LANGUAGES)
application = forms.ChoiceField(amo.APPS_CHOICES)
collection = forms.CharField(widget=forms.HiddenInput)
locale = forms.ChoiceField(choices=LOCALES, required=False)
class Meta:
model = FeaturedCollection
fields = ('application', 'locale')
def clean_collection(self):
application = self.cleaned_data.get('application', None)
collection = self.cleaned_data.get('collection', None)
if not Collection.objects.filter(id=collection,
application=application).exists():
raise forms.ValidationError(
u'Invalid collection for this application.')
return collection
def save(self, commit=False):
collection = self.cleaned_data['collection']
f = super(FeaturedCollectionForm, self).save(commit=commit)
f.collection = Collection.objects.get(id=collection)
f.save()
return f
class BaseFeaturedCollectionFormSet(BaseModelFormSet):
def __init__(self, *args, **kw):
super(BaseFeaturedCollectionFormSet, self).__init__(*args, **kw)
for form in self.initial_forms:
try:
form.initial['collection'] = (
FeaturedCollection.objects
.get(id=form.instance.id).collection.id)
except (FeaturedCollection.DoesNotExist, Collection.DoesNotExist):
form.initial['collection'] = None
FeaturedCollectionFormSet = modelformset_factory(
FeaturedCollection,
form=FeaturedCollectionForm, formset=BaseFeaturedCollectionFormSet,
can_delete=True, extra=0)
class OAuthConsumerForm(happyforms.ModelForm):
class Meta:
model = Consumer
fields = ['name', 'description', 'status']
class MonthlyPickForm(happyforms.ModelForm):
image = forms.CharField(required=False)
blurb = forms.CharField(max_length=200,
widget=forms.Textarea(attrs={'cols': 20,
'rows': 2}))
class Meta:
model = MonthlyPick
widgets = {
'addon': forms.TextInput(),
}
fields = ('addon', 'image', 'blurb', 'locale')
MonthlyPickFormSet = modelformset_factory(MonthlyPick, form=MonthlyPickForm,
can_delete=True, extra=0)
class AddonStatusForm(ModelForm):
class Meta:
model = Addon
fields = ('status', 'highest_status')
class FileStatusForm(ModelForm):
class Meta:
model = File
fields = ('status',)
FileFormSet = modelformset_factory(File, form=FileStatusForm,
formset=BaseModelFormSet, extra=0)
class SiteEventForm(ModelForm):
class Meta:
model = SiteEvent
fields = ('start', 'end', 'event_type', 'description',
'more_info_url')
class YesImSure(happyforms.Form):
yes = forms.BooleanField(required=True, label="Yes, I'm sure")
class CompatForm(BaseCompatForm):
_minimum_choices = [(x, x) for x in xrange(100, -10, -10)]
minimum = forms.TypedChoiceField(choices=_minimum_choices, coerce=int,
required=False)
_ratio_choices = [('%.1f' % (x / 10.0), '%.0f%%' % (x * 10))
for x in xrange(9, -1, -1)]
ratio = forms.ChoiceField(choices=_ratio_choices, required=False)
class GenerateErrorForm(happyforms.Form):
error = forms.ChoiceField(choices=(
['zerodivisionerror', 'Zero Division Error (will email)'],
['iorequesterror', 'IORequest Error (no email)'],
['heka_statsd', 'Heka statsd message'],
['heka_json', 'Heka JSON message'],
['heka_cef', 'Heka CEF message'],
['heka_sentry', 'Heka Sentry message'],
['amo_cef', 'AMO CEF message']))
def explode(self):
error = self.cleaned_data.get('error')
if error == 'zerodivisionerror':
1 / 0
elif error == 'iorequesterror':
class IOError(Exception):
pass
raise IOError('request data read error')
elif error == 'heka_cef':
environ = {'REMOTE_ADDR': '127.0.0.1', 'HTTP_HOST': '127.0.0.1',
'PATH_INFO': '/', 'REQUEST_METHOD': 'GET',
'HTTP_USER_AGENT': 'MySuperBrowser'}
config = {'cef.version': '0',
'cef.vendor': 'Mozilla',
'cef.device_version': '3',
'cef.product': 'zamboni',
'cef': True}
settings.HEKA.cef(
'xx\nx|xx\rx', 5, environ, config,
username='me', ext1='ok=ok', ext2='ok\\ok',
logger_info='settings.HEKA')
elif error == 'heka_statsd':
settings.HEKA.incr(name=LOGGER_NAME)
elif error == 'heka_json':
settings.HEKA.heka(
type="heka_json",
fields={'foo': 'bar', 'secret': 42,
'logger_type': 'settings.HEKA'})
elif error == 'heka_sentry':
# These are local variables only used
# by Sentry's frame hacking magic.
# They won't be referenced which may trigger flake8
# errors.
heka_conf = settings.HEKA_CONF # NOQA
active_heka_conf = settings.HEKA._config # NOQA
try:
1 / 0
except:
settings.HEKA.raven('heka_sentry error triggered')
elif error == 'amo_cef':
from olympia.amo.utils import log_cef
env = {'REMOTE_ADDR': '127.0.0.1', 'HTTP_HOST': '127.0.0.1',
'PATH_INFO': '/', 'REQUEST_METHOD': 'GET',
'HTTP_USER_AGENT': 'MySuperBrowser'}
log_cef(settings.STATSD_PREFIX, 6, env)
class PriceTiersForm(happyforms.Form):
prices = forms.FileField()
``` |
{
"source": "jpeumesmo/lexico",
"score": 3
} |
#### File: jpeumesmo/lexico/gera.py
```python
import sys
path = sys.path[0]
assembly = open(path+"/assembly.txt",'w')
codigo = []
def geraCod(comando):
# global assembly
global cont
global codigo
#print(comando)
codigo.append(comando)
#assembly.write(comando+"\n")
return
def fim():
global assembly
global codigo
for c in codigo:
assembly.write(str(c))
assembly.write("\n")
assembly.close()
``` |
{
"source": "jpeumesmo/poc",
"score": 3
} |
#### File: jpeumesmo/poc/header.py
```python
import numpy as np
import cv2
import imutils
def getArea(image):
im2, contours, hierarchy = cv2.findContours(image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
cnt = cnts[0]
return cv2.contourArea(cnt)
def avaliar(image, vertices):
mask = np.zeros_like(image)
if len(mask.shape)==2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,)*mask.shape[2]) # in case, the input image has a channel dimension
return cv2.bitwise_and(image, mask)
def biggest_region(image,frame):
kernel = np.ones((5,5),np.float32)/25
cv2.filter2D(image,-1,kernel)
contours, hierarchy = cv2.findContours(image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key=cv2.contourArea, reverse=True)[:1]
cnt = cnts[0]
#cv2.drawContours(frame, [cnt], 0, (255,255,255), 1)
cv2.drawContours(frame, [cnt], 0, (255,255,255), -1)
return frame
def negative(imagem):
imagem = (255-imagem)
return imagem
def filter_region(image, vertices):
"""
Create the mask using the vertices and apply it to the input image
get from https://github.com/naokishibuya/car-finding-lane-lines
"""
mask = np.zeros_like(image)
if len(mask.shape)==2:
cv2.fillPoly(mask, vertices, 255)
else:
cv2.fillPoly(mask, vertices, (255,)*mask.shape[2]) # in case, the input image has a channel dimension
return cv2.bitwise_and(image, mask)
def select_region(image):
"""
It keeps the region surrounded by the `vertices` (i.e. polygon). Other area is set to 0 (black).
get from https://github.com/naokishibuya/car-finding-lane-lines
"""
# first, define the polygon by vertices
rows, cols = image.shape[:2]
bottom_left = [0, rows]
top_left = [cols*0.3, rows*0.7]
bottom_right = [cols, rows]
top_right = [cols*0.7, rows*0.7]
# the vertices are an array of polygons (i.e array of arrays) and the data type must be integer
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
return filter_region(image, vertices)
# images showing the region of interest only
#sroi_images = list(map(select_region, edge_images))
def skel(image):
skel = imutils.skeletonize(image, size=(3, 3))
return skel
``` |
{
"source": "jpevans/mdssdiff",
"score": 2
} |
#### File: mdssdiff/test/test_mdsspath.py
```python
from __future__ import print_function
import pytest
import sys
import os
import shutil
import shlex
import subprocess
import pdb #; pdb.set_trace()
from mdssdiff import mdsspath
from mdssdiff import mdssdiff
dirs = ["1","2","3"]
dirtree = os.path.join(*dirs)
paths = [ ["1","lala"], ["1","po"], ["1","2","Mickey"], ["1","2","Minny"], ["1","2","Pluto"], ["1","2","3","Ren"], ["1","2","3","Stimpy"] ]
remote = "remote"
dirtreeroot = dirs[0]
verbose=False
prefix='test_mdss'
dumbname = 'nowayisthereadirectorycalledthis'
# Test if we have a working mdss to connect to
try:
if 'DEBUGLOCAL' in os.environ:
raise ValueError('A very specific bad thing happened')
project=os.environ['PROJECT']
mdsspath.mdss_ls(".",project)
except:
# Monkey-patch to use local file commands if we don't
print("\n\n!!! No mdss: Monkey patching to use local commands !!!\n")
mdsspath._mdss_ls_cmd = 'ls -l --time-style=+"%Y-%m-%d %H:%M ___ "'
mdsspath._mdss_put_cmd = 'cp'
mdsspath._mdss_get_cmd = 'cp'
mdsspath._mdss_mkdir_cmd = 'mkdir'
mdsspath._mdss_rm_cmd = 'rm'
mdsspath._mdss_rmdir_cmd = 'rmdir'
project=''
def touch(fname, times=None):
# http://stackoverflow.com/a/1160227/4727812
with open(fname, 'a'):
os.utime(fname, times)
def mtime(fname):
return os.stat(fname).st_mtime
def runcmd(cmd):
subprocess.check_call(shlex.split(cmd),stderr=subprocess.STDOUT)
def setup_module(module):
if verbose: print ("setup_module module:%s" % module.__name__)
try:
shutil.rmtree(dirtreeroot)
except:
pass
os.makedirs(dirtree)
for p in paths:
touch(os.path.join(*p))
# Write 3 bytes into a local file
file = os.path.join(*paths[2])
fh = open(file,"wb")
fh.write(b"\x5F\x9D\x3E")
fh.close()
# shutil.copytree(dirtreeroot, os.path.join(remote,dirtreeroot))
# Make our top level directory
runcmd(" ".join([mdsspath._mdss_mkdir_cmd.format(project),prefix]))
# Copy files into it
runcmd(" ".join([mdsspath._mdss_put_cmd.format(project),'-r',dirs[0],prefix]))
def teardown_module(module):
if verbose: print ("teardown_module module:%s" % module.__name__)
shutil.rmtree(dirtreeroot)
runcmd(" ".join([mdsspath._mdss_rm_cmd.format(project),'-r',prefix]))
runcmd(" ".join([mdsspath._mdss_rmdir_cmd.format(project),dumbname]))
def test_integrity():
assert(os.path.isdir(dirs[0]))
assert(not mdsspath.isdir(dumbname,project))
mdsspath.mdss_mkdir(dumbname,project)
assert(mdsspath.isdir(dumbname,project))
assert(mdsspath.mdss_listdir(os.path.join(prefix,dirs[0]),project)[0:2] == (['2'], ['lala', 'po']))
assert(mdsspath.getsize(os.path.join(prefix,*paths[2]),project) == 3)
def test_localmtime():
"""
Test localmtime returns datetime object without seconds resolution
"""
dt = mdsspath.localmtime(os.path.join(*paths[2]))
assert(dt.second == 0)
def test_get():
# Testing slightly out of order, but it is very useful to use it here so I will
listinglocal = mdssdiff.getlisting(dirs[0],recursive=True)
for file in listinglocal:
# print(file)
assert(os.path.isfile(file))
# This will (indirectly) test mdsspath.walk
listingremote = mdssdiff.getlisting(os.path.join(prefix,dirs[0]),project,recursive=True)
# pdb.set_trace()
for file in listingremote:
# print(file)
assert(mdsspath.isfile(file,project))
assert(os.path.relpath(file,prefix) in listinglocal)
# check the modification times are the same (within a minute resolution)
assert(mdsspath.getmtime(file,project) == mdsspath.localmtime(os.path.relpath(file,prefix)))
missingfile = listinglocal.pop()
os.remove(missingfile)
mdsspath.remote_get(prefix, missingfile, project)
assert(os.path.isfile(missingfile))
def test_put():
newfile = os.path.join(dirtree,'newfile')
touch(newfile)
mdsspath.remote_put(prefix, newfile, project)
mdsspath.remote_get(prefix, newfile, project)
assert(os.path.isfile(newfile))
``` |
{
"source": "JPEWdev/aaop-utils",
"score": 2
} |
#### File: aaop-utils/aaop/aaopcnvt.py
```python
import argparse
import logging
import sys
import math
from aaop import AAOPFile
def to_obj(args, infile, outfile):
for c in infile.comments:
outfile.write('# {c}\n'.format(c=c))
# Write all vertexes
for stack in range(infile.num_stacks):
outfile.write('# Stack {stack:d}\n'.format(stack=stack))
for spoke in range(infile.num_spokes):
phi = 2 * math.pi * spoke / infile.num_spokes
rho = infile.spokes[stack][spoke]
outfile.write('v {x:f} {y:f} {z:f}\n'.format(
z = infile.stacks[stack],
x = rho * math.cos(phi),
y = rho * math.sin(phi)))
# Write all faces. Make a rectangle for each stack that links the
# vertex in it to the next
for stack in range(infile.num_stacks - 1):
outfile.write('# Stack {stack:d}\n'.format(stack=stack))
cur_stack_idx = stack * infile.num_spokes + 1
next_stack_idx = (stack + 1) * infile.num_spokes + 1
for spoke in range(infile.num_spokes):
next_spoke = (spoke + 1) % infile.num_spokes
outfile.write('f {v1:d} {v2:d} {v3:d} {v4:d}\n'.format(
v1=cur_stack_idx + spoke,
v2=cur_stack_idx + next_spoke,
v3=next_stack_idx + next_spoke,
v4=next_stack_idx + spoke
))
if args.enclose:
outfile.write('# top cap\n')
outfile.write('f {v}\n'.format(
v=' '.join('%d' % (1 + d) for d in range(infile.num_spokes))))
outfile.write('# bottom cap\n')
outfile.write('f {v}\n'.format(
v=' '.join('%d' % (1 + d + infile.num_spokes * (infile.num_stacks - 1)) for d in range(infile.num_spokes))))
def main():
def handle_output(args, infile, outfile):
if args.output == 'obj':
to_obj(args, infile, outfile)
parser = argparse.ArgumentParser(description="Converts an AAOP file to a OBJ model")
parser.add_argument('--enclose', '-e', help="Enclose ends of model", action='store_true')
parser.add_argument('infile', help='AAOP input file (.aop). "-" for stdin')
parser.add_argument('outfile', help='Output file. "-" for stdout')
parser.add_argument('-o', '--output', help='Output format', choices=['obj'], default='obj')
args = parser.parse_args()
if args.infile == '-':
aaop = AAOPFile(sys.stdin)
else:
with open(args.infile, 'r') as infile:
aaop = AAOPFile(infile)
if args.outfile == '-':
handle_output(args, aaop, sys.stdout)
else:
with open(args.outfile, 'w') as outfile:
handle_output(args, aaop, outfile)
if __name__ == "__main__":
main()
```
#### File: aaop-utils/aaop/__init__.py
```python
import logging
logger = logging.getLogger('AAOP')
class InvalidFormat(Exception):
pass
class AAOPFile(object):
"""
AAOP file. Currently only cylindrical formats are supported.
The file format is reverse engineered from a single data file, so there are
several fields that are of unknown purpose.
The cylindrical file format records a number of "stacks". Each stack
specifies a set of points that share a Z coordinate. Each stack has a
fixed number of "spokes". Each spoke a number that indicates the linear
distance from center of the stack. Spokes are equally distributed around
the stack.
In mathematical cylindrical geometry, the stack defines the Z coordinate,
the spoke length is rho, and phi can be calculated by evenly dividing the
spokes over 2 pi.
"""
def __init__(self, aaop):
def get_line():
while True:
yield next(aaop).rstrip()
def next_line():
return next(get_line())
self.version = next_line()
if self.version != 'AAOP1':
raise InvalidFormat('Unknown Version "%s"' % self.version)
self.comments = []
for l in get_line():
if l == 'END COMMENTS':
break
self.comments.append(l)
self.fmt = next_line()
unknown1 = next_line()
unknown2 = next_line()
self.num_spokes = int(next_line())
unknown3 = next_line()
self.num_stacks = int(next_line())
unknown4 = next_line()
logger.info('format = %s', self.fmt)
logger.info('unknown1 = %s', unknown1)
logger.info('unknown2 = %s', unknown2)
logger.info('number of spokes = %d', self.num_spokes)
logger.info('unknown3 = %s', unknown3)
logger.info('number of stacks = %d', self.num_stacks)
logger.info('unknown4 = %s', unknown4)
if self.fmt != 'CYLINDRICAL':
raise InvalidFormat('Unknown format "%s"' % self.fmt)
self.stacks = []
for i in range(self.num_stacks):
self.stacks.append(float(next_line()))
self.spokes = []
for stack in range(self.num_stacks):
d = []
for spoke in range(self.num_spokes):
d.append(float(next_line()))
self.spokes.append(d)
``` |
{
"source": "JPEWdev/pyradur",
"score": 2
} |
#### File: pyradur/pyradur/db.py
```python
import sqlite3
import logging
import json
logger = logging.getLogger('pyradur.db')
class DBManager(object):
def __init__(self, logger=logger):
self.dbs = {}
self.db_factory = None
self.logger = logger
def set_db_factory(self, factory):
self.db_factory = factory
def add_db(self, name, db):
self.logger.debug("Added database '%s'", name)
self.dbs[name] = db
def get_db(self, name):
try:
return self.dbs[name]
except KeyError:
if self.db_factory:
self.add_db(name, self.db_factory(name))
return self.dbs[name]
self.logger.debug("Database '%s' not found", name)
raise
class Sqlite3DB(object):
def __init__(self, db_path, *args, **kwargs):
self.db_path = db_path
self.db = sqlite3.connect(db_path, *args, **kwargs)
self.db.text_factory = str
self.cursor = self.db.cursor()
self.cursor.execute("pragma journal_mode = WAL;")
self.cursor.execute("CREATE TABLE IF NOT EXISTS data(key TEXT PRIMARY KEY NOT NULL, value TEXT);")
self.db.commit()
def __getitem__(self, key):
logger.debug('Getting %s', key)
self.cursor.execute("SELECT * from data where key=?;", [key])
row = self.cursor.fetchone()
logger.debug('%s = %s', key, row)
if row is not None:
return json.loads(row[1])
raise KeyError
def __setitem__(self, key, value):
logger.debug('Setting %s = %s', key, value)
self.cursor.execute("SELECT * from data where key=?;", [key])
row = self.cursor.fetchone()
if row is not None:
self.cursor.execute("UPDATE data SET value=? WHERE key=?;", [json.dumps(value), key])
else:
self.cursor.execute("INSERT into data(key, value) values (?, ?);", [key, json.dumps(value)])
self.db.commit()
def __delitem__(self, key):
logger.debug('Deleting %s', key)
self.cursor.execute("DELETE from data where key=?;", [key])
self.db.commit()
```
#### File: pyradur/tests/test_pyradur.py
```python
from pyradur import Dict
from pyradur.db import Sqlite3DB
from pyradur.server import SockServer
import tempfile
import threading
import unittest
import shutil
import os
import logging
import sys
class CommonTests(object):
use_cache = True
close_on_cleanup = True
def _server_thread(self, event):
try:
self.server.db.add_db('var', Sqlite3DB(':memory:'))
event.set()
self.server.serve_forever()
# Process any outstanding events until the queue is empty
while self.server.handle_request():
pass
except Exception as e:
logging.exception('Server raised %s', e, exc_info=True)
finally:
# Close down the server. This prevents the main thread from being
# stuck blocking on a response from the server in the event that it
# has an exception
self.server.close()
def setUp(self):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
self.handler = logging.StreamHandler(sys.stdout)
self.handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.handler.setFormatter(formatter)
root.addHandler(self.handler)
self.addCleanup(root.removeHandler, self.handler)
self.tempdir = tempfile.mkdtemp(prefix='pyradur-')
self.addCleanup(shutil.rmtree, self.tempdir, ignore_errors=True)
self.sock_path = os.path.join(self.tempdir, 'sock')
self.server = SockServer(self.sock_path)
self.sever_suspended = False
try:
event = threading.Event()
self.server_thread = threading.Thread(target=self._server_thread, args=[event])
self.server_thread.start()
event.wait()
self.addCleanup(self.check_server)
self.addCleanup(self.server_thread.join)
self.addCleanup(self.server.shutdown)
except Exception as e:
self.server.close()
raise e
def check_server(self):
# Check that all clients have disconnected
self.assertDictEqual(self.server.clients, {})
def get_dict(self, name, share_connection=True):
d = Dict(self.sock_path, name, use_cache=self.use_cache, share_connection=share_connection)
if self.close_on_cleanup:
self.addCleanup(lambda: d.close())
return d
def test_basic_get_set(self):
d = self.get_dict('var')
d['foo'] = 'bar'
self.assertEqual(d['foo'], 'bar')
with self.assertRaises(KeyError):
d['baz']
def test_get_set_shared(self):
a = self.get_dict('var')
b = self.get_dict('var')
a['foo'] = 'bar'
self.assertEqual(b['foo'], 'bar')
def test_get_set_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(a.get('bat', 'baz'), 'baz')
a.sync()
self.assertFalse('baz' in b)
a.set('test', 'blah')
a.sync()
self.assertEqual(b['test'], 'blah')
def test_del_nonshared(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
del a['foo']
a.sync()
with self.assertRaises(KeyError):
b['foo']
def test_setdefault(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
self.assertEqual(a.setdefault('foo', 'bar'), 'bar')
a.sync()
self.assertEqual(b['foo'], 'bar')
def test_server_suspend(self):
a = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
with self.server.suspended():
a['foo'] = 'test'
a.sync()
self.assertEqual(a['foo'], 'test')
def test_contains(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertTrue('foo' in b)
self.assertFalse('bar' in b)
def test_cache_grow(self):
import mmap
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
count = mmap.PAGESIZE * 2
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
a[key] = val
self.assertEqual(a[key], val)
a.sync()
for i in range(count):
key = 'foo%d' % i
val = 'bar%d' % i
self.assertEqual(a[key], val)
self.assertEqual(b[key], val)
def test_missing_var(self):
a = self.get_dict('var')
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist', share_connection=False)
with self.assertRaises(NameError):
b = self.get_dict('does-not-exist')
def test_var_factory(self):
def factory(name):
return Sqlite3DB(':memory:')
a = self.get_dict('var')
self.server.db.set_db_factory(factory)
b = self.get_dict('test1', share_connection=False)
c = self.get_dict('test2')
def test_cross_var(self):
def factory(name):
return Sqlite3DB(':memory:')
self.server.db.set_db_factory(factory)
a = self.get_dict('var', share_connection=False)
b = self.get_dict('test', share_connection=False)
a['foo'] = 'bar'
a.sync()
with self.assertRaises(KeyError):
b['foo']
b['foo'] = 'baz'
b.sync()
self.assertEqual(a['foo'], 'bar')
self.assertEqual(b['foo'], 'baz')
class NoCacheTests(CommonTests, unittest.TestCase):
use_cache = False
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
a['foo'] = 'test'
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertFalse(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class CacheTests(CommonTests, unittest.TestCase):
def test_cached(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
self.assertFalse(b.is_cached('not-present'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate('foo')
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
def test_invalidate_all(self):
a = self.get_dict('var', share_connection=False)
b = self.get_dict('var', share_connection=False)
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertTrue(b.is_cached('foo'))
with self.server.suspended():
a['foo'] = 'test'
self.assertEqual(b['foo'], 'bar')
b.invalidate_all()
self.assertFalse(b.is_cached('foo'))
a.sync()
self.assertEqual(b['foo'], 'test')
class ImplicitCloseTests(CacheTests):
close_on_cleanup = False
def test_close(self):
a = self.get_dict('var')
b = self.get_dict('var', share_connection=False)
c = self.get_dict('var')
a['foo'] = 'bar'
a.sync()
self.assertEqual(b['foo'], 'bar')
self.assertEqual(c['foo'], 'bar')
a.close()
c['baz'] = 'bat'
c.sync()
self.assertEqual(b['baz'], 'bat')
del c
del a
b['test'] = 'blah'
``` |
{
"source": "JPEWdev/pysimplemp",
"score": 2
} |
#### File: pysimplemp/pysimplemp/map.py
```python
import contextlib
import logging
import multiprocessing
import os
import signal
import sys
import time
from .errors import ResultQueueEmpty
from .util import sigmask
logger = logging.getLogger("pysimplemp.map")
class MapResult(object):
def __init__(self, is_exception, result, job_idx):
self.is_exception = is_exception
self.result = result
self.job_idx = job_idx
def get(self):
if self.is_exception:
raise self.result
return self.result
class MapPool(object):
MAX_WORKERS = 100000
# The job is ready to be executed
STATE_READY = -1
# The main process is reading the result from the worker queue
STATE_READING_RESULT = -2
# The job is finished
STATE_FINISHED = -3
# A worker process is currently processing the job. The index of the
# worker process can found by subtracting STATE_IN_PROGRESS from the value
STATE_IN_PROGRESS = MAX_WORKERS
# A worker process has (or will) write the result to it's result queue.
# The index of the worker process can be found by subtracting
# STATE_QUEUEING_RESULT
STATE_QUEUEING_RESULT = STATE_IN_PROGRESS + MAX_WORKERS
def __init__(
self,
func,
jobs,
num_processes=None,
interruptable=True,
init=None,
deinit=None,
ctx=None,
):
"""
Create a new pool of worker threads to apply `func` to each item of
`jobs`. All jobs must be know when the pool is created so `jobs` will
be converted to a `list`
The number of worker processes is controlled by the `num_processes`
argument. If unspecified, `multiprocess.cpu_count()` will be used
If `interruptable` is `True` (the default), `func` can be interrupted
when `join()` is called on the pool. Otherwise, the worker thread will
not terminated until `func` has completed. Care must be taken when
using this option, as `join()` may wait forever if `func` never exits.
`init` specifies a function to run once when the worker thread is
initialized, and is guaranteed to run, even if the worker process is
terminated with `join()`
`deinit` specifies a function to run when the worker thread terminates.
It is guaranteed to as long as `init` does not raise an exception
`ctx` is the multiprocessing context to use, or
`multiprocessing.get_context()` if unspecified
The pool may be used as a context manager which will automatically call
`start()` and `join()`::
with MapPool(foo, jobs) as p:
r = p.results()
is equivalent to::
p = MapPool(foo, jobs)
try:
p.start()
r = p.results()
finally:
p.terminate()
p.join()
"""
self.jobs = list(jobs)
self.func = func
self.ctx = ctx or multiprocessing.get_context()
self.interruptable = interruptable
self.result_queues = []
self.result_semaphore = self.ctx.Semaphore(0)
self.processes = []
self.num_processes = min(
num_processes or multiprocessing.cpu_count(), self.MAX_WORKERS
)
self.init = init
self.deinit = deinit
self.states = self.ctx.Array("i", [self.STATE_READY] * len(self.jobs))
@contextlib.contextmanager
def _sigblock(self):
# Helper function to block SIGTERM
with sigmask(signal.SIG_BLOCK, [signal.SIGTERM]):
yield
def _child_worker(self, worker_idx, queue):
def do_exit(*args, **kwargs):
if self.deinit:
self.deinit()
os._exit(0)
try:
if self.init:
self.init()
except:
os._exit(1)
signal.signal(signal.SIGTERM, do_exit)
# This thread is ready to be terminated. Unblock SIGTERM inherited from
# parent
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
while True:
# Look for a job to process and reserve one if found. Block the
# termination signal to ensure that this is atomic and the process
# isn't killed with the array lock held
job_index = None
with self._sigblock():
for idx, state, _ in self._foreach_state():
if state == self.STATE_READY:
job_index = idx
self.states[idx] = self.STATE_IN_PROGRESS + worker_idx
break
if job_index is None:
# No work left to do
do_exit()
break
if self.interruptable:
mask = []
else:
mask = [signal.SIGTERM]
try:
with sigmask(signal.SIG_BLOCK, mask):
result = MapResult(
False, self.func(*self.jobs[job_index]), job_index
)
except Exception as e:
result = MapResult(True, e, job_index)
with self._sigblock():
# Mark the job as ready to be received by the main process
with self.states.get_lock():
self.states[job_index] = self.STATE_QUEUEING_RESULT + worker_idx
# Signal there is an item ready to be processed
self.result_semaphore.release()
queue.put(result)
def __enter__(self):
self.start()
return self
def __exit__(self, *args, **kwargs):
self.terminate()
self.join()
def _foreach_state(self):
with self.states.get_lock():
for idx in range(len(self.states)):
(state, worker_idx) = self._get_state(idx)
yield idx, state, worker_idx
def _get_state(self, idx):
v = self.states[idx]
if v >= self.STATE_IN_PROGRESS and v < self.STATE_QUEUEING_RESULT:
return (self.STATE_IN_PROGRESS, v - self.STATE_IN_PROGRESS)
if v >= self.STATE_QUEUEING_RESULT:
return (self.STATE_QUEUEING_RESULT, v - self.STATE_QUEUEING_RESULT)
return (v, None)
def results(self, block=True):
"""
An iterator that gets the mapping results from the worker pool. The
results may be returned in any order.
If any job raised an exception in the worker, it will be raised when in
the parent process when its result would be returned
If `block` is `True` (the default), the function will block until
a result is ready or there are no more results left
"""
try:
while True:
yield self.get(block)
except ResultQueueEmpty:
pass
def results_ordered(self, block=True):
"""
An iterator that gets the mapping results from the worker pool. The
results are returned in the same order as they are listed in the job
list.
If any job raised an exception in the worker, it will be raised when in
the parent process when its result would be returned
If `block` is `True` (the default), the function will block until
a result is ready or there are no more results left
"""
results = {}
for i in range(len(self.jobs)):
try:
while not i in results:
result = self._get_next_result(block)
results[result.job_idx] = result
except ResultQueueEmpty:
pass
if i in results:
yield results[i].get()
else:
break
def start(self):
"""
Starts the worker pool. This must be called to create the worker pool
and have the workers start processing jobs.
`join()` must be called after this to clean up the worker pool.
"""
# Flush to prevent duplication in worker processes
sys.stdout.flush()
sys.stderr.flush()
# Block signals. The worker processes will inherit this signal mask,
# which ensures that they cannot terminate before they have initialized
with self._sigblock():
self.result_queues = []
self.processes = []
for i in range(self.num_processes):
queue = self.ctx.SimpleQueue()
pid = os.fork()
if pid == 0:
self._child_worker(i, queue)
os._exit(0)
else:
self.processes.append(pid)
self.result_queues.append(queue)
def _get_next_result(self, block):
global logger
# There a small race where join() may read items out of a result queue
# before this code can change the state to STATE_READING_RESULT to
# "reserve" it. In this case, the code needs to loop again (which will
# most likely result in all states being STATE_FINISHED and raising
# ResultQueueEmpty()). Not that for this to happen, join() must be
# called on from a thread other than the one processing results.
while True:
# Find the queue that has the result, and mark is as being read
is_finished = True
worker_idx = None
for idx, state, widx in self._foreach_state():
if state == self.STATE_QUEUEING_RESULT:
worker_idx = widx
self.states[idx] = self.STATE_READING_RESULT
logger.debug(
"Reading result for job %i from worker %i" % (idx, worker_idx)
)
break
elif state != self.STATE_FINISHED:
is_finished = False
else:
# If we didn't find a worker and all jobs are finished,
# raise the queue empty exception
if is_finished:
raise ResultQueueEmpty()
if worker_idx is not None:
break
# Wait for any result to be ready
if not self.result_semaphore.acquire(block):
raise ResultQueueEmpty()
result = self.result_queues[worker_idx].get()
logger.debug("Done reading result")
# Mark job as finished
with self.states.get_lock():
self.states[result.job_idx] = self.STATE_FINISHED
return result
def get(self, block=True):
"""
Gets the next available result from the worker pool. Either returns
the result of the function, or raises an exception if one was raised in
the worker process. If there are no more results to be returned, a
`ResultQueueEmpty` exception is raised.
Results may be returned in any order.
If `block` is `True` (the default), the function will block until
a result is ready or there are no more results left. If `False` and the
function would block, a `ResultQueueEmpty` exception is raised.
"""
return self._get_next_result(block).get()
def terminate(self):
"""
Terminate all worker processes. This must be called before `join()`
"""
with self.states.get_lock():
for idx, state, worker_idx in self._foreach_state():
if state == self.STATE_READY:
self.states[idx] = self.STATE_FINISHED
for p in self.processes:
os.kill(p, signal.SIGTERM)
def join(self):
"""
Wait for all worker processes to exit. This must be called to cleanup
all the worker processes when finished.
Any results that have not been collected will be lost
"""
global logger
wait_pids = set(self.processes)
while True:
queue_reads = []
in_process_pids = set()
for idx, state, worker_idx in self._foreach_state():
if state == self.STATE_QUEUEING_RESULT:
# Workers block the terminate signal while writing to their
# result queue. If they are blocked waiting for data to be
# read out of the queue, they will not be able to receive
# the terminate signal. Find all tasks writing to their
# result queue and read from it (discarding the results) to
# unblock them
logger.debug(
"Discarding result for job %i from worker %i"
% (idx, worker_idx)
)
self.result_queues[worker_idx].get()
logger.debug("Done discarding")
self.states[idx] = self.STATE_FINISHED
elif state == self.STATE_IN_PROGRESS:
# If this worker is still in progress, there are a few
# possible options:
# 1) The worker is still executing it's job (which may be
# uninterruptable)
# 2) The worker has exited
# 3) The worker has signals disabled and is waiting for the
# states lock to change the state to QUEUEING_RESULT
#
# If we detect a process in this state, we cannot block
# when waiting for the process to terminate, otherwise it
# could deadlock
in_process_pids.add(self.processes[worker_idx])
new_wait_pids = set()
for pid in wait_pids:
logger.debug("Waiting for %d" % pid)
(wait_pid, status) = os.waitpid(
pid, os.WNOHANG if pid in in_process_pids else 0
)
if (wait_pid, status) == (0, 0):
new_wait_pids.add(pid)
if not new_wait_pids:
# No processes still pending.
break
wait_pids = new_wait_pids
logger.debug("PIDs %r are still running" % wait_pids)
time.sleep(0.1)
``` |
{
"source": "jpexposito/python",
"score": 4
} |
#### File: python/clases/calculadora.py
```python
class Calculadora(): #Creamos la clase Calculadora
def __init__(self, operando1, operando2): #Definimos el parametro operando1, operando2
self.operando1 = operando1 # Asignamos valor al atributo operando1
self.operando2 = operando2 # Asignamos valor al atributo operando2
#Creacion del metodo sum
def sum(self):
for operando in (self.operando1, self.operando2):
if not isinstance(operando, int) and not isinstance(operando, float):
raise TypeError
return self.operando1 + self.operando2
``` |
{
"source": "jpeyhardi/GLM",
"score": 2
} |
#### File: statiskit/glm/link.py
```python
from functools import wraps
from . import _glm
from .__glm.statiskit.glm import(_FLink,
ScalarLink,
PoissonLink,
PoissonCanonicalLink,
PoissonVLink,
BinomialLink,
BinomialCanonicalLink,
BinomialFLink,
BinaryLink,
BinaryCanonicalLink,
BinaryFLink,
NegativeBinomialLink,
NegativeBinomialCanonicalLink,
NegativeBinomialULink,
NegativeBinomialVLink,
VectorLink,
NominalLink,
NominalCanonicalLink,
ReferenceLink,
OrdinalLink,
OrdinalCanonicalLink,
AdjacentLink,
CumulativeLink,
SequentialLink)
__all__ = ['PoissonLink',
'BinomialLink',
'BinaryLink',
'NegativeBinomialLink',
'NominalLink',
'OrdinalLink']
def FLink_decorator(cls):
cls.distribution = property(cls.get_distribution, cls.set_distribution)
del cls.get_distribution, cls.set_distribution
for cls in _FLink:
FLink_decorator(cls)
def _link(link, mapping, **kwargs):
try:
link = mapping[link]()
except KeyError:
raise ValueError('\'link\' parameter, possible values are ' + ', '.join('"' + link + '"' for link in mapping.iterkeys()))
except:
raise
for attr in list(kwargs.keys()):
if hasattr(link, attr):
setattr(link, attr, kwargs.pop(attr))
else:
raise AttributeError("'" + link.__class__.__name__ + "' object has no attribute '" + attr + "'")
else:
return link
def PoissonLink(link='canonical', **kwargs):
"""
"""
return _link(link, dict(canonical = PoissonCanonicalLink,
V = PoissonVLink), **kwargs)
def BinomialLink(link='canonical', **kwargs):
"""
"""
return _link(link, dict(canonical = BinomialCanonicalLink,
F = BinomialFLink), **kwargs)
def BinaryLink(link='canonical', **kwargs):
"""
"""
return _link(link, dict(canonical = BinaryCanonicalLink,
F = BinaryFLink), **kwargs)
def NegativeBinomialLink(link='canonical', **kwargs):
"""
"""
return _link(link, dict(canonical = NegativeBinomialCanonicalLink,
U = NegativeBinomialULink,
V = NegativeBinomialVLink), **kwargs)
def _ratio(ratio, mapping, **kwargs):
try:
ratio = mapping[ratio]()
except KeyError:
raise ValueError('\'ratio\' parameter, possible values are ' + ', '.join('"' + ratio + '"' for ratio in mapping.iterkeys()))
except:
raise
for attr in list(kwargs.keys()):
if hasattr(ratio, attr):
setattr(ratio, attr, kwargs.pop(attr))
else:
raise AttributeError("'" + ratio.__class__.__name__ + "' object has no attribute '" + attr + "'")
else:
return ratio
def NominalLink(ratio='canonical', **kwargs):
"""
"""
return _ratio(ratio, dict(canonical = NominalCanonicalLink,
reference = ReferenceLink), **kwargs)
def OrdinalLink(ratio='canonical', **kwargs):
"""
"""
return _ratio(ratio, dict(canonical = OrdinalCanonicalLink,
adjacent = AdjacentLink,
cumulative = CumulativeLink,
sequential = SequentialLink), **kwargs)
```
#### File: GLM/test/test_binomial.py
```python
from statiskit import (linalg,
core,
glm)
from statiskit.data import glm as data
import unittest
from nose.plugins.attrib import attr
import math
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestBinomialRegression(unittest.TestCase):#, AbstractTestDiscreteRegression):
# _eta = 4.7
# _nb_param = 4
# _canonical_link = glm.BinomialLink()
# _kappa = 5
# _mu = _kappa * math.exp(_eta)/(1+math.exp(_eta))
@classmethod
def setUpClass(cls):
"""Test Binomial regression construction"""
# cls.predictor_init()
# cls._canonical_model = glm.BinomialRegression(cls._kappa, cls._pred, cls._canonical_link)
cls._data = data.load('crabs')
for index, event in enumerate(cls._data.Satellites.events):
if event.value > 0:
cls._data.Satellites.events[index] = core.DiscreteElementaryEvent(1)
def test_Fisher_estimation(self):
"""Test binomial regression Fisher estimation"""
data = self._data.extract(explanatories=[1],
response=0)
fe = glm.binomial_estimation(algo='Fisher',
data=data,
kappa=1)
self.assertAlmostEqual(fe.estimated.predictor.alpha, -12.3508, places=4)
self.assertAlmostEqual(fe.estimated.predictor.delta[0], 0.4972, places=4)
self.assertAlmostEqual(fe.estimated.loglikelihood(data), -97.2263, places=4)
def test_SA_estimation(self):
"""Test binomial regression steepest ascent estimation"""
data = self._data.extract(explanatories=[1],
response=0)
fe = glm.binomial_estimation(algo='SA',
data=data)
self.assertEqual(fe.estimated.kappa, 1)
self.assertAlmostEqual(fe.estimated.predictor.alpha, -12.3508, places=4)
self.assertAlmostEqual(fe.estimated.predictor.delta[0], 0.4972, places=4)
self.assertAlmostEqual(fe.estimated.loglikelihood(data), -97.2263, places=4)
@classmethod
def tearDownClass(cls):
"""Test Binomial regression deletion"""
# del cls._canonical_model
pass
```
#### File: GLM/test/test_ordinal.py
```python
import math
from statiskit import core
from statiskit import glm
from statiskit import linalg
from statiskit.data import glm as data
import unittest
from nose.plugins.attrib import attr
from test_nominal import TestCanonicalNominalRegressionEstimation
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestCanonicalOrdinalRegressionEstimation(TestCanonicalNominalRegressionEstimation):
@classmethod
def setUpClass(cls):
"""Test multivariate data construction"""
cls._data = data.load('cars')
oss = cls._data.AirConditioning.sample_space.as_ordinal()
oss.ordered = ['little_important', 'important', 'very_important']
cls._data.AirConditioning.sample_space = oss
cls._data.Sex.sample_space.reference = 'women'
cls._data.Age.sample_space.reference = '18_23'
def test_Fisher(self):
"""Test Fisher scoring estimation for canonical ordinal regression with complete design"""
mle = glm.ordinal_estimation(data=self._data, response = 0, explanatories = {1,2})
alpha = mle.estimated.predictor.alpha
delta = mle.estimated.predictor.delta
self.assertAlmostEqual(mle.loglikelihood, self._loglikelihood, places=2)
self._data.AirConditioning.sample_space = self._data.AirConditioning.sample_space.as_nominal()
mle = glm.nominal_estimation(data=self._data, response = 0, explanatories = {1,2})
self.assertAlmostEqual(mle.loglikelihood, self._loglikelihood, places=2)
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestCumulativeRegressionEstimation(TestCanonicalOrdinalRegressionEstimation):
_alpha = linalg.Vector([0.044, 1.655])
_delta = linalg.Vector([0.576, -1.147, -2.232]) # (Dosbson, 2008) page 161
_loglikelihood = -290.648
def test_Fisher(self):
"""Test Fisher scoring estimation for cumulative regression with proportional design"""
#import ipdb
#ipdb.set_trace()
# beta_init = linalg.Vector([-0.66, 0.66, 0., 0., 0.])
# mle = glm.ordinal_estimation(ratio='cumulative', Z='proportional', data=self._data, response = 0, explanatories = {1,2}, beta_init=beta_init)
mle = glm.ordinal_estimation(ratio='cumulative', Z='proportional', data=self._data, response = 0, explanatories = {1,2})
alpha = mle.estimated.predictor.alpha
delta = mle.estimated.predictor.delta
self.assertAlmostEqual(mle.loglikelihood, self._loglikelihood, places=3)
for i in range(len(self._alpha)):
self.assertAlmostEqual(alpha[i], self._alpha[i], places=3)
for i in range(len(self._delta)):
self.assertAlmostEqual(delta[i], self._delta[i], places=3)
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestSequentialRegressionEstimation(TestCanonicalOrdinalRegressionEstimation):
_alpha = linalg.Vector([11.127, 10.915])
_delta = linalg.Vector([-0.377, 0.49, -0.128, -0.424, -0.062]) # (Tutz, 2012) page 255 - results are truncated at 10^{-3} -
@classmethod
def setUpClass(cls):
"""Test multivariate data construction"""
cls._data = data.load('retinopathy')
def test_Fisher(self):
"""Test Fisher scoring estimation for ordinal regression with proportional design"""
mle = glm.ordinal_estimation(ratio='sequential', Z='constrained', data=self._data, response = 0, explanatories = {1,2,3,4}, partial_proportional={1,2,3})
alpha = mle.estimated.predictor.alpha
delta = mle.estimated.predictor.delta
for i in range(len(self._alpha)):
x = int(alpha[i]*1000)/float(1000)
self.assertEqual(x, self._alpha[i])
for i in range(len(self._delta)):
x = int(delta[i]*1000)/float(1000)
self.assertEqual(x, self._delta[i])
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestHierarchicalRegressionEstimation(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Test multivariate data construction"""
cls._data_ordinal = data.load('retinopathy')
cls._data_hierarchical = data.load('retinopathy')
sample_spaces = []
ss1 = core.NominalSampleSpace(['0', '12'])
ss2 = core.NominalSampleSpace(['1', '2'])
hss = core.HierarchicalSampleSpace(ss1)
hss.partition('12', ss2)
sample_spaces.append(hss)
sample_spaces.append(core.controls.ZZ)
for i in [0,1,2]:
sample_spaces.append(core.controls.RR)
cls._data_hierarchical.RET.sample_space = hss
def test_Fisher(self):
"""Test Fisher scoring estimation for hierarchical regression"""
#mle_sequential = glm.ordinal_estimation(ratio='sequential', distribution = core.NormalDistribution(), data = self._data_ordinal, response = 0, explanatories = {1,2,3,4})
mle = glm.ordinal_estimation(ratio='sequential', data = self._data_ordinal, response = 0, explanatories = {1,2,3,4})
a = mle.estimated.predictor.alpha
mle = glm.hierarchical_estimation(data = self._data_hierarchical, response = 0, explanatories = {1,2,3,4})
A0 = mle[""].estimated.predictor.alpha[0]
A1 = mle["12"].estimated.predictor.alpha[0]
self.assertAlmostEqual(a[0], A0, places=3)
self.assertAlmostEqual(a[1], A1, places=2)
mle_sequential_cauchy = glm.ordinal_estimation(ratio='sequential', distribution = core.CauchyDistribution(), data = self._data_ordinal, response = 0, explanatories = {1,2,3,4})
estimator = glm.nominal_estimation(distribution = core.CauchyDistribution())
mle_hierarchical = glm.hierarchical_estimation(default = estimator, data = self._data_hierarchical, response = 0, explanatories = {1,2,3,4})
alpha_sequential_cauchy = mle_sequential_cauchy.estimated.predictor.alpha
alpha_hierarchical_root = mle_hierarchical[""].estimated.predictor.alpha[0]
alpha_hierarchical_12 = mle_hierarchical["12"].estimated.predictor.alpha[0]
self.assertAlmostEqual(alpha_sequential_cauchy[0], alpha_hierarchical_root, places=2)
self.assertAlmostEqual(alpha_sequential_cauchy[1], alpha_hierarchical_12, places=2)
mle_sequential_laplace = glm.ordinal_estimation(ratio='sequential', distribution = core.LaplaceDistribution(), data = self._data_ordinal, response = 0, explanatories = {1,2,3,4})
estimator_12 = glm.nominal_estimation(distribution = core.LaplaceDistribution())
mle_hierarchical = glm.hierarchical_estimation(default = estimator, **{"12" : estimator_12}, data = self._data_hierarchical, response = 0, explanatories = {1,2,3,4})
alpha_sequential_laplace = mle_sequential_laplace.estimated.predictor.alpha
alpha_hierarchical_root = mle_hierarchical[""].estimated.predictor.alpha[0]
alpha_hierarchical_12 = mle_hierarchical["12"].estimated.predictor.alpha[0]
self.assertAlmostEqual(alpha_sequential_cauchy[0], alpha_hierarchical_root, places=2)
self.assertAlmostEqual(alpha_sequential_laplace[1], alpha_hierarchical_12, places=1)
binary_estimator_cauchy = glm.binary_estimation(distribution = core.CauchyDistribution())
binary_estimator_laplace = glm.binary_estimation(distribution = core.LaplaceDistribution())
mle_hierarchical = glm.hierarchical_estimation(default = binary_estimator_cauchy, **{"12" : binary_estimator_laplace}, data = self._data_hierarchical, response = 0, explanatories = {1,2,3,4})
alpha_hierarchical_root = mle_hierarchical[""].estimated.predictor.alpha
alpha_hierarchical_12 = mle_hierarchical["12"].estimated.predictor.alpha
self.assertAlmostEqual(alpha_sequential_cauchy[0], alpha_hierarchical_root, places=3)
#self.assertAlmostEqual(alpha_sequential_laplace[1], alpha_hierarchical_12, places=5)
```
#### File: GLM/test/test_poisson.py
```python
from statiskit import (linalg,
core,
glm)
from statiskit.data import glm as data
import unittest
from nose.plugins.attrib import attr
import math
#from test_link import AbstractTestScalarLink
from test_regression import AbstractTestUnivariateRegression
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestPoissonRegression(unittest.TestCase):#, AbstractTestUnivariateRegression):
# _alpha = 1.
# _delta = linalg.Vector([0.5, 2.])
# _x = [1, 1.6]
# _nb_param = 3
# _eta = 4.7
# _canonical_link = glm.PoissonLink()
# _mu = math.exp(_eta)
# _places = 10
@classmethod
def setUpClass(cls):
"""Test Poisson regression construction"""
#cls.predictor_init()
#cls._canonical_model = glm.PoissonRegression(cls._pred, cls._canonical_link)
cls._data = data.load('crabs')
def test_Fisher_estimation(self):
"""Test Poisson regression Fisher estimation"""
fe = glm.poisson_estimation(algo='Fisher', data=self._data, explanatories=[1], response=0)
self.assertAlmostEqual(fe.estimated.predictor.alpha, -3.305, places=3)
self.assertAlmostEqual(fe.estimated.predictor.delta[0], 0.164, places=3)
# def test_get_nb_parameters(self):
# """Test discrete regression get number of parameters"""
# self.assertEqual(self._canonical_model.nb_parameters, self._nb_param)
# def test_conditional(self):
# """Test discrete regression conditional operator"""
# response_distribution = self._canonical_model(*self._x)
# self.assertAlmostEqual(response_distribution.mean, self._mu, places = self._places)
@classmethod
def tearDownClass(cls):
"""Test Poisson regression deletion"""
#del cls._canonical_model
pass
```
#### File: GLM/test/test_predictor.py
```python
from statiskit import core
from statiskit import glm
from statiskit import linalg
import unittest
from nose.plugins.attrib import attr
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestPredictor(object):
@classmethod
def sample_space_init(cls):
sample_spaces = []
sample_spaces.append(core.controls.ZZ)
sample_spaces.append(core.controls.RR)
cls._vector_sample_spaces = core.VectorSampleSpace(sample_spaces)
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestCompleteScalarPredictor(unittest.TestCase, TestPredictor):
_alpha = 1.
_delta = linalg.Vector([0.5, 2.])
#_x = [1, 1.6]
_len = 3
_eta = 4.7
@classmethod
def setUpClass(cls):
"""Test complete scalar predictor construction"""
cls.sample_space_init()
cls._pred = glm.CompleteScalarPredictor(cls._vector_sample_spaces)
def test_predictor(self):
"""Test complete scalar predictor operator"""
self._pred.alpha = self._alpha
self._pred.delta = self._delta
self.assertEqual(len(self._pred), self._len)
self.assertEqual(self._pred(1, 1.6), self._eta)
@classmethod
def tearDownClass(cls):
"""Test complete scalar predictor deletion"""
del cls._pred
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestCompleteVectorPredictor(unittest.TestCase, TestPredictor):
_alpha = linalg.Vector([1., 2.])
_delta = linalg.Matrix([[0.5, 2.], [1., 4.]])
_len = 6
_eta = linalg.Vector([4.7, 9.4])
@classmethod
def setUpClass(cls):
"""Test complete vector predictor construction"""
cls.sample_space_init()
cls._pred = glm.CompleteVectorPredictor(cls._vector_sample_spaces, 2)
def test_predictor(self):
"""Test vector predictor operator"""
self._pred.alpha = self._alpha
self._pred.delta = self._delta
self.assertEqual(len(self._pred), self._len)
eta = self._pred(1, 1.6)
self.assertEqual(eta[0], self._eta[0])
self.assertEqual(eta[1], self._eta[1])
@classmethod
def tearDownClass(cls):
"""Test complete vector predictor deletion"""
del cls._pred
@attr(linux=True,
osx=True,
win=True,
level=0)
class TestProportionalVectorPredictor(TestCompleteVectorPredictor):
_alpha = linalg.Vector([1., 2.])
_delta = linalg.Vector([0.5, 2.])
_len = 4
_eta = linalg.Vector([4.7, 5.7])
@classmethod
def setUpClass(cls):
"""Test proportional vector predictor construction"""
cls.sample_space_init()
cls._pred = glm.ProportionalVectorPredictor(cls._vector_sample_spaces, 2)
@classmethod
def tearDownClass(cls):
"""Test proportional vector predictor deletion"""
del cls._pred
``` |
{
"source": "jpeyhardi/STL",
"score": 2
} |
#### File: statiskit/stl/set.py
```python
from . import _stl
from . import __stl
from functools import wraps
import sys
__all__ = []
__module__ = sys.modules[__name__]
for var in dir(__stl.statiskit.stl):
if var.startswith('Set'):
setattr(__module__, var, getattr(__stl.statiskit.stl, var))
__all__.append(var)
def decoration(cls):
def wrapper__init__(f):
@wraps(f)
def __init__(self, arg=None):
if arg is None:
f(self)
elif not isinstance(arg, self.__class__):
raise TypeError('\'arg\' parameter must be a \'' + self.__class__ + '\' instance')
else:
f(self, arg)
return __init__
cls.__init__ = wrapper__init__(cls.__init__)
# def __eq__(self, other):
# if isinstance(other, self.__class__):
# return all(self[index] == other[index] for index in range(len(self)))
# else:
# return False
# cls.__eq__ = __eq__
# del __eq__
def __str__(self):
return "{" + ", ".join(str(value) for value in self) + "}"
cls.__str__ = __str__
def __repr__(self):
return "{" + ", ".join(repr(value) for value in self) + "}"
cls.__repr__ = __repr__
def _repr_latex_(self):
return "$\\left\\{" + ", ".join(getattr(value, "_repr_latex_", getattr(value, "__repr__"))() for value in self) + "\\right\\}$"
cls._repr_latex_ = _repr_latex_
for cls in __stl.std._Set:
decoration(cls)
```
#### File: STL/test/test_vector.py
```python
from statiskit import stl
import unittest
from nose.plugins.attrib import attr
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestVector(unittest.TestCase):
def test___initialization(self):
"""Test vector initialization"""
v = stl.VectorIndex()
self.assertEqual(len(v), 0)
def test__equal(self):
"""Test vectors equality"""
v1 = stl.VectorIndex()
v1.push_back(3)
v1.push_back(1)
v1.push_back(2)
v2 = stl.VectorIndex()
v2.push_back(3)
v2.push_back(1)
v2.push_back(2)
self.assertEqual(v1, v2)
v3 = stl.VectorString()
v3.push_back('A')
v3.push_back('B')
v3.push_back('C')
v4 = stl.VectorString()
v4.push_back('A')
v4.push_back('B')
v4.push_back('C')
self.assertEqual(v3, v4)
``` |
{
"source": "jpeyre/analogy",
"score": 2
} |
#### File: jpeyre/analogy/train.py
```python
from __future__ import division
import __init__
import tensorboard_logger
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import time
from utils import AverageMeter, Tb_logger, Parser
import argparse
import os.path as osp
import os
from datasets.BaseLoader import TrainSampler
from networks import models
import yaml
import warnings
warnings.filterwarnings("ignore")
"""
Parsing options
"""
args = argparse.ArgumentParser()
parser = Parser(args)
opt = parser.make_options()
print(opt)
"""
Train / val
"""
def train(epoch, split):
batch_time = 0
train_loss = {}
train_recall = {}
train_precision = {}
loader = loaders[split]
model.train()
start_time = time.time()
start = time.time()
for batch_idx, batch_input in enumerate(loader):
for key in batch_input.keys():
if opt.use_gpu:
batch_input[key] = Variable(batch_input[key].cuda())
else:
batch_input[key] = Variable(batch_input[key])
# Train
loss, tp_class, fp_class, num_pos_class = model.train_(batch_input)
batch_time += time.time() - start
start = time.time()
# True pos/false pos per branch
for gram in tp_class.keys():
recall = np.nanmean(tp_class[gram].numpy()/num_pos_class[gram].numpy())
precision = np.nanmean(tp_class[gram].numpy() / (tp_class[gram].numpy() + fp_class[gram].numpy()))
if gram not in train_recall.keys():
train_recall[gram] = AverageMeter()
if gram not in train_precision.keys():
train_precision[gram] = AverageMeter()
if gram not in train_loss.keys():
train_loss[gram] = AverageMeter()
train_recall[gram].update(recall, n=batch_input['pair_objects'].size(0))
train_precision[gram].update(precision, n=batch_input['pair_objects'].size(0))
train_loss[gram].update(loss[gram].data[0], n=batch_input['pair_objects'].size(0))
# Loss reg
if opt.use_analogy:
if 'reg' not in train_loss.keys():
train_loss['reg'] = AverageMeter()
train_loss['reg'].update(loss['reg'].data[0], n=batch_input['pair_objects'].size(0))
learning_rate = model.optimizer.param_groups[0]['lr']
if batch_idx % 100 ==0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tDone in: {:.2f} sec'.format(epoch, batch_idx, len(loader), 100. * batch_idx / len(loader), sum(loss.values()).data[0], (time.time()-start_time)))
start_time = time.time()
# Record logs in tensorboard
if model.ite % 500 ==0:
batch_time /= 500
total_train_loss = 0
if opt.use_analogy:
total_train_loss = train_loss['sro'].avg + opt.lambda_reg*train_loss['reg'].avg
else:
for _, val in train_loss.iteritems():
total_train_loss += val.avg
# Register in logger
tb_logger[split].log_value('epoch', epoch, model.ite)
tb_logger[split].log_value('loss', total_train_loss, model.ite)
tb_logger[split].log_value('batch_time', batch_time, model.ite)
tb_logger[split].log_value('learning_rate', learning_rate, model.ite)
tb_logger[split].log_value('weight_decay', opt.weight_decay, model.ite)
for gram in tp_class.keys():
tb_logger[split].log_value(gram+'_loss', train_loss[gram].avg, model.ite)
tb_logger[split].log_value(gram+'_mean_recall', 100.*train_recall[gram].avg, model.ite)
tb_logger[split].log_value(gram+'_mean_precision', 100.*train_precision[gram].avg, model.ite)
# Analogy loss
if opt.use_analogy:
tb_logger[split].log_value('loss_reg', train_loss['reg'].avg, model.ite)
batch_time = 0
model.ite += 1
for gram in tp_class.keys():
train_loss[gram].reset()
if opt.use_analogy:
train_loss['reg'].reset()
def evaluate(epoch, split):
model.eval()
batch_time = 0
test_loss = {}
test_recall = {}
test_precision = {}
loader = loaders[split]
start = time.time()
for batch_idx, batch_input in enumerate(loader):
for key in batch_input.keys():
if opt.use_gpu:
batch_input[key] = Variable(batch_input[key].cuda())
else:
batch_input[key] = Variable(batch_input[key])
# Eval
loss, tp_class, fp_class, num_pos_class = model.val_(batch_input)
batch_time += time.time() - start
start = time.time()
# Performance per gram
for gram in tp_class.keys():
recall = np.nanmean(tp_class[gram].numpy()/num_pos_class[gram].numpy())
precision = np.nanmean(tp_class[gram].numpy() / (tp_class[gram].numpy() + fp_class[gram].numpy()))
if gram not in test_recall.keys():
test_recall[gram] = AverageMeter()
if gram not in test_precision.keys():
test_precision[gram] = AverageMeter()
if gram not in test_loss.keys():
test_loss[gram] = AverageMeter()
test_recall[gram].update(recall, n=batch_input['pair_objects'].size(0))
test_precision[gram].update(precision, n=batch_input['pair_objects'].size(0))
test_loss[gram].update(loss[gram].data[0], n=batch_input['pair_objects'].size(0))
# Loss analogy
if opt.use_analogy:
if 'reg' not in test_loss.keys():
test_loss['reg'] = AverageMeter()
test_loss['reg'].update(loss['reg'].data[0], n=batch_input['pair_objects'].size(0))
# Save total loss on test
total_test_loss = 0
if opt.use_analogy:
total_test_loss = test_loss['sro'].avg + opt.lambda_reg*test_loss['reg'].avg
else:
for _, val in test_loss.iteritems():
total_test_loss += val.avg
tb_logger[split].log_value('epoch', epoch, model.ite)
tb_logger[split].log_value('loss', total_test_loss, model.ite)
tb_logger[split].log_value('batch_time', batch_time/len(loader), model.ite)
# Total performance per gram
recall_gram = {}
loss_gram = {}
precision_gram = {}
recall_gram = {}
for gram in tp_class.keys():
tb_logger[split].log_value(gram+'_loss', test_loss[gram].avg, model.ite)
tb_logger[split].log_value(gram+'_mean_recall', 100.*test_recall[gram].avg, model.ite)
tb_logger[split].log_value(gram+'_mean_precision', 100.*test_precision[gram].avg, model.ite)
recall_gram[gram] = test_recall[gram]
precision_gram[gram] = test_precision[gram]
loss_gram[gram] = test_loss[gram].avg
print('{} set: Average loss: {:.4f}, Recall: ({:.0f}%)'.format(split, sum(loss_gram.values()), \
100. * np.mean(map((lambda x:x.avg), test_recall.values()))))
for gram in tp_class.keys():
test_loss[gram].reset()
if opt.use_analogy:
test_loss['reg'].reset()
return loss_gram, precision_gram, recall_gram
#####################
""" Define logger """
#####################
splits = [opt.train_split, opt.test_split]
# Init logger
log = Tb_logger()
logger_path = osp.join(opt.logger_dir, opt.exp_name)
if osp.exists(logger_path):
answer = raw_input("Experiment directory %s already exists. Continue: yes/no?" %logger_path)
assert answer=='yes', 'Please speficy another experiment directory with exp_name option'
tb_logger = log.init_logger(logger_path, splits)
# Write options in directory
parser.write_opts_dir(opt, logger_path)
####################
""" Data loaders """
####################
store_ram = []
store_ram.append('objectscores') if opt.use_ram and opt.use_precompobjectscore else None
store_ram.append('appearance') if opt.use_ram and opt.use_precompappearance else None
if opt.data_name in ['hico','hicoforcocoa']:
from datasets.hico_api import Hico as Dataset
elif opt.data_name=='vrd':
from datasets.vrd_api import Vrd as Dataset
elif opt.data_name=='cocoa':
from datasets.cocoa_api import Cocoa as Dataset
loaders = {}
data_path = '{}/{}'.format(opt.data_path, opt.data_name)
image_path = '{}/{}/{}'.format(opt.data_path, opt.data_name, 'images')
cand_dir = '{}/{}/{}'.format(opt.data_path, opt.data_name, 'detections')
# Train split
dset = Dataset( data_path, \
image_path, \
opt.train_split, \
cand_dir = cand_dir,\
thresh_file = opt.thresh_file, \
use_gt = opt.use_gt, \
add_gt = opt.add_gt, \
train_mode = True, \
jittering = opt.use_jittering, \
store_ram = store_ram, \
l2norm_input = opt.l2norm_input, \
neg_GT = opt.neg_GT)
dset_loader = TrainSampler( dset, sampler_name = opt.sampler, \
num_negatives = opt.num_negatives, \
use_image = opt.use_image, \
use_precompappearance = opt.use_precompappearance, \
use_precompobjectscore = opt.use_precompobjectscore)
loaders[opt.train_split] = torch.utils.data.DataLoader(dset_loader, \
batch_size = opt.batch_size, \
shuffle = True, \
num_workers = opt.num_workers, \
collate_fn = dset_loader.collate_fn)
# Test split
dset = Dataset( data_path, \
image_path, \
opt.test_split, \
cand_dir = cand_dir,\
thresh_file = opt.thresh_file, \
use_gt = opt.use_gt, \
add_gt = opt.add_gt, \
train_mode = True, \
jittering = False, \
store_ram = store_ram, \
l2norm_input = opt.l2norm_input, \
neg_GT = opt.neg_GT)
dset_loader = TrainSampler(dset, sampler_name = opt.sampler,\
num_negatives = opt.num_negatives, \
use_image = opt.use_image, \
use_precompappearance = opt.use_precompappearance, \
use_precompobjectscore = opt.use_precompobjectscore)
loaders[opt.test_split] = torch.utils.data.DataLoader(dset_loader, \
batch_size = opt.batch_size, \
shuffle = False, \
num_workers = opt.num_workers, \
collate_fn = dset_loader.collate_fn)
####################
""" Define model """
####################
# Get all options
opt = parser.get_opts_from_dset(opt, dset) # additional options from dataset
# Define model
model = models.get_model(opt)
if torch.cuda.is_available():
model.cuda()
# Load pre-trained model
if opt.pretrained_model:
assert opt.start_epoch, 'Indicate epoch you start from'
if opt.start_epoch:
checkpoint = torch.load(opt.pretrained_model, map_location=lambda storage, loc: storage)
model.load_pretrained_weights(checkpoint['model'])
################
""" Speed-up """
################
model.eval()
if opt.use_analogy:
model.precomp_language_features() # pre-compute unigram emb
model.precomp_sim_tables() # pre-compute similarity tables for speed-up
###########
""" Run """
###########
model.train()
print('Train classifier')
best_recall = 0
for epoch in range(opt.num_epochs):
epoch_effective = epoch + opt.start_epoch + 1
# Train
model.adjust_learning_rate(opt, epoch)
train(epoch, opt.train_split)
# Val
loss_test, precision_test, recall_test = evaluate(epoch, opt.test_split)
if epoch_effective%opt.save_epoch==0:
state = {
'epoch':epoch_effective,
'model':model.state_dict(),
'loss':loss_test,
'precision':precision_test,
'recall':recall_test,
}
torch.save(state, osp.join(logger_path, 'model_' + 'epoch' + str(epoch_effective) + '.pth.tar'))
if recall_test > best_recall:
state = {
'epoch':epoch_effective,
'model':model.state_dict(),
'min_loss':loss_test,
'precision':precision_test,
'recall':recall_test,
}
torch.save(state, osp.join(logger_path, 'model_best.pth.tar'))
best_recall = recall_test
``` |
{
"source": "jpeyret/lazy-regression-tests",
"score": 2
} |
#### File: lazy-regression-tests/lazy_regression_tests/core.py
```python
import pdb
import os
import sys
from pathlib import Path
undefined = NotImplemented
verbose = "-v" in sys.argv
#######################################################
# Typing
#######################################################
from typing import (
Optional,
# TYPE_CHECKING,
Any,
Union,
)
#######################################################
def cpdb(*args, **kwargs):
"disabled conditional breakpoints - does nothing until activated by set_cpdb/rpdb/breakpoint3"
rpdb = breakpoints = cpdb
try:
from timeout_decorator import timeout
import timeout_decorator
CustomTimeoutError = timeout_decorator.timeout_decorator.TimeoutError
TIMEOUT_MAXTIME_TO_ALLOW = 5
except (ImportError,) as e: # pragma: no cover
# pdb.set_trace()
timeout = None
TIMEOUT_MAXTIME_TO_ALLOW = 0
class CustomTimeoutError(Exception):
"""we'll never see this """
# pylint: disable=unused-import
from lazy_regression_tests._baseutils import ppp, Dummy
from traceback import print_exc as xp
# pylint: enable=unused-import
# pylint: disable=unused-wildcard-import,wildcard-import
from .common import *
# pylint: enable=unused-wildcard-import,wildcard-import
from .validators import ValidationManager
# aliasing the JSON response filter management to DictFilterManager as there is
# very little that is HTTP specific
from lazy_regression_tests.utils import Subber, fill_template
from .core_assist import LazyChecker, LazyTemp, MediatedEnvironDict, _Control, _LazyMeta
class LazyMixin(metaclass=_LazyMeta):
""" Intended as a Mixin to unittest classes it provides a number of services automatically
- validations via cls_validators and set_expectation
- regression testing against last seen baseline via `self.assert_exp(data, extension)`
- filtering to avoid false regression alerts from variable data (timestamps, csrf protection tokens)
- cls_filters = dict(<extension>=FilterDirectives...)
- self.set_filter(FilterDirective)
- both validations and filters are intended to be defined in mixin ancestor classes and mixed and
matched as desired
class MyTest(LazyMixin, unittest.TestCase):
cls_filters = dict(html=filter_csrftokens)
cls_validators = [validate_http, validate_html]
"""
cls_filters = {}
cls_validators = []
add_lazy_dirname = []
lazy_dirname_extras = []
lazytemp = None
lazy_basename_extras = ""
# this normally resolves to os.environ, but can be preset for testing
lazy_environ = MediatedEnvironDict()
T_FILENAME = "%(filename)s %(classname)s %(_testMethodName)s %(lazy_basename_extras)s %(suffix)s %(lazy_extension)s"
ENVIRONMENT_VARNAME_ROOT = "lzrt_"
@classmethod
def get_basename(cls, name_, file_, module_) -> str:
""" called from mytest.py this returns mytest and is the base for file naming
its use in client code is a bit anomalous as the enclosing class
hasn't been created yet.
"""
cls.lazy_pa = pa = Path(file_)
return pa.stem
def lazy_build_filters(self):
try:
res = {}
for extension, filter_ in self.__class__.cls_filters.items():
# if cpdb(): pdb.set_trace()
filter_copy = filter_.copy()
assert filter_copy, "filter_copy empty"
res[extension] = filter_copy
return res
# pragma: no cover pylint: disable=unused-variable
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def _lazy_get_t_dirname(self, exp_got, subber):
""" get the template for the directory names where to save files """
try:
env_name = dict(
exp="template_dirname_exp",
got="template_dirname_got",
report="template_dirname_report",
)[exp_got]
dirname = subber.get("dirname") or self._lazy_control.env.get(env_name)
if dirname is None:
if exp_got in ("exp", "got"):
raise InvalidConfigurationException(
"could not get output directory for %s in %s"
% (exp_got, self._lazy_control.env)
)
return None
dirname2 = os.path.join(
dirname, self.lazy_filename, subber.get("classname")
)
return dirname2
# pragma: no cover pylint: disable=unused-variable
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def _handle_dirname_extras(self, _litd):
""" allows injection of extra instance-set attributes into the directory hierarchies
The intent of `lazy_dirname_extras` is to partition tests by other attributes, like say
a site name or a test database name.
given `lazy_dirname_extras = "site"` and `self.site = 'example.com'`
.
└── <myclass>
├── example.com
│ └── <get_basename>.<myclass>.<_testMethodName>.txt
"""
try:
dirname_extras = getattr(self, "lazy_dirname_extras", "")
if not dirname_extras:
return _litd
if isinstance(dirname_extras, list):
dirname_extras = " ".join(dirname_extras)
if dirname_extras:
# expand something like "foo, bar" into [..."%(foo)s", "%(bar)s"...]
li_replace = [
"%%(%s)s" % (attrname) for attrname in dirname_extras.split()
]
_litd.extend(li_replace)
return _litd
# pragma: no cover pylint: disable=unused-variable
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def _get_fnp_save(
self,
exp_got: Union["got", "exp"],
options: "LazyChecker",
suffix: Optional[str],
):
""" get the save path """
try:
subber = Subber(
options,
{
"filename": self.lazy_filename,
"suffix": suffix,
"classname": self.__class__.__name__,
"exp_got": exp_got,
},
# the lower priority the TestCase instance the less probability
# of name clashes
self,
)
# calculating the directory path
t_dirname = self._lazy_get_t_dirname(exp_got, subber)
if t_dirname is None:
return None
_litd = t_dirname.split(os.path.sep)
_litd = self._handle_dirname_extras(_litd)
_lid = ["/"] + [fill_template(t_, subber) for t_ in _litd]
dirname = os.path.join(*_lid)
# calculating the filename
t_basename = self.T_FILENAME
_litb = t_basename.split()
_lib = [fill_template(t_, subber) for t_ in _litb]
basename = ".".join([i_ for i_ in _lib if i_])
basename = basename.replace(" ", "_")
basename = basename.replace("/", "_")
if not os.path.isdir(dirname):
os.makedirs(dirname)
return os.path.join(dirname, basename)
# pragma: no cover pylint: disable=unused-variable, broad-except
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
#######################################################
# validator related
#######################################################
_filters = _validationmgr = undefined
@property
def validationmgr(self):
""" create the validationmgr on demand """
if self._validationmgr is undefined:
self._validationmgr = ValidationManager(self, *self.cls_validators)
return self._validationmgr
@property
def filters(self):
""" build filters at the instance level """
if self._filters is undefined:
self._filters = self.lazy_build_filters()
return self._filters
def check_expectations(
self, lazy_skip=None, lazy_skip_except=None, lazy_sourced_only=True, **sources
):
""" validate active validation directives """
try:
self.validationmgr.check_expectations(
self,
lazy_skip=lazy_skip,
lazy_skip_except=lazy_skip_except,
lazy_sourced_only=lazy_sourced_only,
**sources,
)
# pragma: no cover pylint: disable=unused-variable
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def set_expectation(self, *args, **kwargs):
""" add/modify a validation """
validationmgr = self.validationmgr
name = args[0]
if breakpoints("set_expectation", {"name": name}): # pragma: no cover
pdb.set_trace()
# put this in your breakpoints.json
validationmgr.set_expectation(*args, **kwargs)
#######################################################
# diff-related
#######################################################
def assert_exp(self, got: Any, extension: str, suffix: str = "") -> LazyTemp:
"""
regression test that `got` is the same as what was last encountered and stored
in `exp` file.
"""
try:
if not isinstance(extension, str):
raise InvalidConfigurationException(
"%s.extension has to be a string (extension=%s) and one of the existing filters"
% (self, extension)
)
try:
filter_ = self.filters[extension]
# pragma: no cover pylint: disable=unused-variable
except (KeyError,) as e:
raise InvalidConfigurationException(
f"{self}. unknown extension={extension}. known extensions in filters:{self.filters.keys()}"
)
rawfiltermgr, textfiltermgr = filter_.get_raw_text_filters()
checker = LazyChecker(
extension=extension,
rawfiltermgr=rawfiltermgr,
textfiltermgr=textfiltermgr,
)
if hasattr(filter_, "to_text"):
checker.to_text = filter_.to_text
if hasattr(filter_, "prep"):
checker.prep = filter_.prep
return self._lazycheck(got, checker, suffix)
# pragma: no cover pylint: disable=unused-variable
except (AssertionError,) as e: # pragma: no cover
raise
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def _get_fnp_raw(self, options, suffix, control, tmp, got):
"""
where do we save the raw received data,
after formatting, but before filtering ?
"""
try:
if not control.save_raw:
return
fnp_raw = self._get_fnp_save("report", options, suffix)
return fnp_raw
# pragma: no cover pylint: disable=unused-variable
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
def _lazycheck(self, got: Any, options: LazyChecker, suffix: str = "") -> LazyTemp:
try:
env = self.lazy_environ
if not self.lazy_environ.acquired:
env.clear()
env.acquire(self.ENVIRONMENT_VARNAME_ROOT)
self._lazy_control = control = _Control(self, env, options)
# only create the lazy temp the first time.
tmp = self.lazytemp = self.lazytemp or LazyTemp(control, env, self)
# the environment requests that no diffing or writing take place
# typically indicated by setting environment variable `lzrt_directive=skip`
if control.skip():
return tmp
# calculate the paths of the exp/got files
tmp.fnp_got = fnp_got = self._get_fnp_save("got", options, suffix)
tmp.fnp_exp = fnp_exp = self._get_fnp_save("exp", options, suffix)
fnp_raw = self._get_fnp_raw(options, suffix, control, tmp, got)
# linefeeds have a tendency to creep in sometimes
formatted_got = options.format(tmp, got, fnp_raw).rstrip()
# at this point, we want to write the received, formatted, data regardless
with open(fnp_got, "w") as fo:
fo.write(formatted_got)
# the newly received data is to be taken as our expectations
# typically indicated by setting environment variable `lzrt_directive=baseline`
if control.baseline():
with open(fnp_exp, "w") as fo:
fo.write(formatted_got)
return tmp
try:
with open(fnp_exp) as fi:
# linefeeds have a tendency to creep in sometimes
exp = fi.read().rstrip()
# pragma: no cover pylint: disable=unused-variable
except (IOError,) as e:
# we just want to write the received data as our expectation
tmp.execinfo.ioerror_exp = fnp_exp
tmp.message = message = "no check because IOError on %s" % (fnp_exp)
with open(fnp_exp, "w") as fo:
fo.write(formatted_got)
return tmp
# the environment requests only equality is checked, without trying to show details
# typically indicated by setting environment variable `lzrt_directive=nodiff`
# this may be desired if the differences could cause timeouts with `assertEqual`
if control.nodiff():
tmp.message = message = "exp and got are not equal but diffing disabled"
if exp != formatted_got():
raise self.fail(message)
# pragma: no cover pylint: disable=unused-variable
try:
# supports a timeout mechanism, if the module is available
self.assertEqualTimed(exp, formatted_got)
except (CustomTimeoutError,) as e: # pragma: no cover
tmp.message = message = (
"exp and got are not equal but comparison timed out after %s seconds"
% (TIMEOUT_MAXTIME_TO_ALLOW)
)
self.fail(message)
except (AssertionError,) as e: # pragma: no cover
raise
except (Exception,) as e:
if cpdb():
pdb.set_trace()
raise
return self.lazytemp
except (AssertionError,) as e: # pragma: no cover
raise
except (
Exception,
) as e: # pragma: no cover pylint: disable=unused-variable, broad-except
if cpdb():
pdb.set_trace()
raise
#######################################################
# Note the conditional method definition and the fallback to
# basic assertEqual
#######################################################
if timeout:
@timeout(TIMEOUT_MAXTIME_TO_ALLOW)
def assertEqualTimed(self, exp, got, message=None):
""" comparisons will automatically times out after %s seconds""" % (
TIMEOUT_MAXTIME_TO_ALLOW
)
try:
self.assertEqual(exp, got, message)
except (AssertionError,) as e:
raise self.format_assertion_message(e)
else:
#
def assertEqualTimed(self, exp, got, message=None):
""" fallback if timeout package is not available """
try:
self.assertEqual(exp, got, message)
except (AssertionError,) as e:
raise self.format_assertion_message(e)
def format_assertion_message(self, ori):
message = f"""
❌❌❌Regressions found:
Expected contents are in file:
{self.lazytemp.fnp_exp}
Received contents are in file:
{self.lazytemp.fnp_got}
Original exception:
{ori}
❌❌❌
"""
exc = ori.__class__(message)
return exc
``` |
{
"source": "jpeyret/pynoorm",
"score": 4
} |
#### File: pynoorm/pynoorm/binder.py
```python
import re
class Binder(object):
"""query template and substitution management - generic
"""
def __init__(self, *args, **kwds):
pass
def format(self, tqry, *args):
"""
:param tqry: query with optional substitution variables
Python style i.e.
select * from orders where custid = %(custid)s
:param *args: zero or more arguments that will be checked
left-to-right, argument[<key>], getattr(argument,<key>)
"""
def __repr__(self):
msg = "%s paramstyle=%s" % (self.__class__.__name__, self.paramstyle)
if hasattr(self, "supports"):
msg += " supports: %s" % (self.supports)
return msg
def _case_sensitive(self, key):
return [key]
def _case_insensitive(self, key):
if key == key.upper():
return [key, key.lower()]
if key == key.lower():
return [key, key.upper()]
return [key]
_key_expand = _case_sensitive
def _get_from_args(self, key_in):
"""generic way to look for a key in the arg list"""
li_key = self._key_expand(key_in)
for key in li_key:
for arg in self.li_arg:
try:
got = arg[key]
return got
except (KeyError):
try:
# try getattr
got = getattr(arg, key)
return got
except AttributeError:
continue
except (AttributeError, TypeError):
# no __getitem__, try getattr
try:
got = getattr(arg, key)
return got
except AttributeError:
continue
try:
raise KeyError(key_in)
except Exception as e:
raise
@classmethod
def factory(cls, paramstyle, case_insensitive=False):
"""
return a Binder subclass instance appropriate
to the underlying db library paramstyle bind variable
:param paramstyle: parameter style string as per PEP-249
:case_insensitive: %(custid)s will match {"custid":1} or {"CUSTID":2}, with priority
going to the initial case. mixed-case keys (custId) will only match {"custId":3}
"""
try:
inst = cls._di_paramstyle[paramstyle]()
if case_insensitive:
inst._key_expand = inst._case_insensitive
return inst
except KeyError:
msg = """got:%s,
but expecting one of %s.
See
https://www.python.org/dev/peps/pep-0249/#paramstyle
for details""" % (
paramstyle,
"/".join(list(cls._di_paramstyle.keys())),
)
raise ValueError(msg)
except NotImplementedError:
msg = "%s is not implemented yet" % (paramstyle)
raise NotImplementedError(msg)
_di_paramstyle = {}
# the regular expression pattern that looks for list type binds
re_pattern_listsubstition = re.compile("%\([a-zA-Z0-9_]+\)l")
# leading '__' variable name makes name clashes more unlikely
T_LIST_KEYNAME = "%<KEY>"
# def _pre_process(self):
# """do nothing for now - intended to support list substitutions"""
# pass
def _pre_process(self):
li_listsubstition = self.re_pattern_listsubstition.findall(self.tqry)
if li_listsubstition:
self.preprocess_listsubstitution(li_listsubstition)
def preprocess_listsubstitution(self, li_hit):
""" this will transform %(xxx)l into %(__xxx_000)s, %(__xxx_001)s """
di_list_sub = {}
self.li_arg.insert(0, di_list_sub)
for hit in li_hit:
key = hit[2:-2]
got = self._get_from_args(key)
if not isinstance(got, (list, set)):
raise ValueError(
"list substitutions require an iterable parameter: `%s` was of type `%s`"
% (key, type(got))
)
#
# self.tqry = self.tqry.replace(hit, hit[:-1] + "s")
else:
li = []
if not got:
# empty list or set
self.tqry = self.tqry.replace(hit, "NULL")
continue
for ix, val in enumerate(got):
ikeyname = self.T_LIST_KEYNAME % (key, ix)
ikeyname_sub = "%%(%s)s" % (ikeyname)
di_list_sub[ikeyname] = val
li.append(ikeyname_sub)
# replace the original bind %(xxx)l with
# %(__xxx_000)s, %(__xxx_001)s, ...
repval = ", ".join(li)
self.tqry = self.tqry.replace(hit, repval)
class Binder_pyformat(Binder):
"""support Postgresql
query template and substitution management for postgresql
query is unchanged because postgresql is happy
with %(somevar)s as a bind
"""
paramstyle = "pyformat"
supports = "Postgresql"
def _pre_process(self):
li_listsubstition = self.re_pattern_listsubstition.findall(self.tqry)
if li_listsubstition:
self.preprocess_listsubstitution(li_listsubstition)
def format(self, tqry, *args):
"""
looks up substitutions and sets them up in dictionary self.sub
postgresql accepts Python named variable so keeping the query as is
select * from foo where bar = %(somebar)s"
=>
select * from foo where bar = %(somebar)s
{"somebar" : value-found-for-somebar}
"""
self.sub = {}
self.li_arg = list(args)
self.tqry = tqry
self._pre_process()
try:
self.tqry % (self)
except (Exception,) as e:
raise
# Postgresql query format stays as %(foo)s
# so we just return the original query
# (which _pre_process may have altered)
return self.tqry, self.sub
__call__ = format
def __getitem__(self, key):
if key in self.sub:
return None
got = self._get_from_args(key)
self.sub[key] = got
return None
PARAMSTYLE_QMARK = PARAMSTYLE_SQLITE = PARAMSTYLE_SQLSERVER = "qmark"
class BinderQmark(Binder):
""" supports: sqlite3, SQL Server
query template and substitution management for sqlite3
query changes from %(somevar)s to ?
select * from foo where bar = %(somebar)s
=>
select * from foo where bar = ?,
(value-found-for-somebar,)
"""
paramstyle = PARAMSTYLE_QMARK
supports = "sqlite3, mssql"
qry_replace = "?"
def format(self, tqry, *args):
"""
looks up substitutions and sets them up in self.sub
Note:
Assuming both will be happy with a tuple.
Might be one SQL Server needs a list instead.
"""
self.tqry = tqry
self._di_sub = {}
self.sub = []
self.li_arg = list(args)
self._pre_process()
try:
qry = self.tqry % (self)
except (Exception,) as e:
raise
return qry, tuple(self.sub)
__call__ = format
def __getitem__(self, key):
"""
finds a substitution and append it to the bind list
but also transforms the variable in the query to ?
"""
qry_replace = self.qry_replace
try:
got = self._di_sub[key]
except KeyError:
got = self._di_sub[key] = self._get_from_args(key)
self.sub.append(got)
return qry_replace
class BinderFormat(BinderQmark):
"""supports: MySQL
query template and substitution management for MySQL
query changes from %(somevar)s to %s format
parameters are (<var1>,<var2>,)
Note: pretty much identical to BinderQmark/sqlite3
except for the placeholder being %s
"""
paramstyle = "format"
supports = "MySQL"
qry_replace = "%s"
class BinderNamed(Binder):
"""supports: Oracle
query template and substitution management for Oracle
query changes from %(somevar)s to :somevar format
list-based substitutions:
%(somelist)l :__somelist_000, :__somelist_001...
"""
paramstyle = "named"
supports = "Oracle"
t_qry_replace = ":%s"
def format(self, tqry, *args):
"""
looks up substitutions and sets them up in self.sub
but also transforms the query to Oracle named
format
"select * from foo where bar = %(somebar)s"
=>
"select * from foo where bar = :somebar "
{"somebar" : value-found-for-somebar}
"""
self.sub = {}
self.li_arg = list(args)
self.tqry = tqry
self._pre_process()
try:
qry = self.tqry % (self)
except (Exception,) as e:
raise
return qry, self.sub
__call__ = format
def __getitem__(self, key):
"""
finds a substitution
but also transforms the variable in the query to Oracle named
format :foo
"""
# already seen so already in the substition dict
# replace the query's %(foo)s with :foo
if key in self.sub:
return self.t_qry_replace % (key)
got = self._get_from_args(key)
self.sub[key] = got
return self.t_qry_replace % (key)
"""
https://www.python.org/dev/peps/pep-0249/#paramstyle
paramstyle Meaning
qmark Question mark style, e.g. ...WHERE name=? sequence
numeric Numeric, positional style, e.g. ...WHERE name=:1
named Named style, e.g. ...WHERE name=:name
format ANSI C printf format codes, e.g. ...WHERE name=%s
pyformat Python extended format codes, e.g. ...WHERE name=%(name)s
"""
ExperimentalBinderNamed = BinderNamed
class Binder_NotImplementedError(Binder):
"""not implemented yet"""
paramstyle = "not implemented"
def __init__(self, *args, **kwds):
raise NotImplementedError()
# This is what decides how the Binder
# will process incoming template substitutions
Binder._di_paramstyle["pyformat"] = Binder_pyformat
Binder._di_paramstyle["named"] = BinderNamed
Binder._di_paramstyle[PARAMSTYLE_QMARK] = BinderQmark
Binder._di_paramstyle["format"] = BinderFormat
Binder._di_paramstyle["experimentalnamed"] = ExperimentalBinderNamed
# and these are not done yet
Binder._di_paramstyle["numeric"] = Binder_NotImplementedError
``` |
{
"source": "jpfairbanks/NewsGraphing",
"score": 3
} |
#### File: src/newsgraphing/content_model_plots.py
```python
from sklearn import metrics
import matplotlib.pyplot as plt
import csv
import os
import sys
from collections import Counter
CLF_ARGS = ['logreg','rf','svm']
LABEL_ARGS = ['bias','cred']
PATH = '../results/'
# files should have format: CLFNAME_LABELNAME_OPTION_results.csv
files = ['logreg_bias_results.csv','rf_bias_results.csv','logreg_cred_results.csv','logreg_bias_paragraph_vectors_results.csv']
def makeROC(data,args):
"""Plot and save ROC curves from given files"""
roc_data = {}
for i,pair in enumerate(data):
CLFNAME = args[i][0]
LABELNAME = args[i][1]
if len(args[i])>2:
OPTION = '('+'-'.join(args[i][2:])+')'
else:
OPTION = '(tf-idf)'
if LABELNAME not in roc_data:
roc_data[LABELNAME] = []
# legend info
roc_label = CLFNAME + ' ' + OPTION
# compute AUC
predictions = [p[0] for p in pair]
truth = [p[1] for p in pair]
fpr, tpr, roc_thresholds = metrics.roc_curve(truth,predictions)
auc = metrics.auc(fpr,tpr)
print("AUC score for "+CLFNAME+" "+LABELNAME+": ", auc)
# plot type and title
if LABELNAME == 'bias':
title = 'Bias Classification Receiver Operating Characteristic'
elif LABELNAME == 'cred':
title = 'Credibility Classification Receiver Operating Characteristic'
# save ROC data
roc_data[LABELNAME].append([[fpr,tpr],roc_label,title,auc])
rocs = []
for label in roc_data:
# new plot
plt.figure()
# plot random line
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
for dataset in roc_data[label]:
# extract data
fpr = dataset[0][0]
tpr = dataset[0][1]
roc_label = dataset[1]
title = dataset[2]
auc = dataset[3]
# plot ROC Curve for each dataset with label
plt.plot(fpr,tpr,lw=2, label=roc_label + '(area = %0.3f)' % auc)
plt.title(title)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.savefig(PATH+'content_model_'+label+'_roc.png',bbox_inches='tight')
return rocs
def distribution_by_label(predictions,truth):
""" Plots histograms showing distributions of predicted labels """
plt.hist([x for i,x in enumerate(predictions) if truth[i]==0])
plt.hist([x for i,x in enumerate(predictions) if truth[i]==1])
plt.xlabel('Probability')
plt.ylabel('Frequency')
plt.title('Prediction Distribution by Label')
plt.grid(True)
plt.legend(loc="upper right")
plt.savefig(PATH+'content_model_predictions_distribution.png',bbox_inches='tight')
plt.show()
return plt
# main
if __name__ == '__main__':
args = []
data = []
# extract parameters from file names
for file in files:
if not os.path.exists(PATH+file):
sys.exit("One or more input files do not exist: " + str(files))
args.append(file.split('_')[:-1])
file_data = []
# read in prediction/truth data
with open(PATH+file,'r',encoding='utf-8') as f:
reader = csv.reader(f)
next(reader) #skip header
for row in reader:
file_data.append([float(i) for i in row])
data.append(file_data)
# create and save ROC curve
figs = makeROC(data,args)
## create and save histogram of predicted labels
#plot = distribution_by_label()
```
#### File: jpfairbanks/NewsGraphing/vectorize.py
```python
import numpy as np
from gensim.models.doc2vec import Doc2Vec
from nltk import word_tokenize
from sklearn import feature_extraction
def remove_all_stopwords(tokens):
return [w for w in tokens if w not in feature_extraction.text.ENGLISH_STOP_WORDS]
def text_to_vector(model, text):
text_words = remove_all_stopwords(word_tokenize(text))
model.random.seed(0)
text_vector = model.infer_vector(text_words)
return text_vector
doc_model = Doc2Vec.load('doc2vec.bin')
example = 'This is a string of text.'
vec = text_to_vector(doc_model, example)
print(vec)
``` |
{
"source": "jpfairchild/DangerDiscordBot",
"score": 3
} |
#### File: jpfairchild/DangerDiscordBot/Dangerbot.py
```python
import discord
# import safygiphy
import asyncio
from discord.ext.commands import Bot
from discord.ext import commands
import platform
# Here you can modify the bot's prefix and description and wether it sends help in direct messages or not.
client = Bot(description="DangerBot", command_prefix="$", pm_help=False)
# ever = dir(safygiphy)
# print(ever)
@client.event
async def on_ready():
print('Logged in as ' + client.user.name + ' (ID:' + client.user.id + ') | Connected to ' + str(len(client.servers)) + ' servers | Connected to ' + str(len(set(client.get_all_members()))) + ' users')
print('--------')
print('Current Discord.py Version: {} | Current Python Version: {}'.format(discord.__version__, platform.python_version()))
print('--------')
print('Use this link to invite {}:'.format(client.user.name))
print('https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=8'.format(client.user.id))
return await client.change_presence(game=discord.Game(name='Tensor Flow 2.3.4')) # This is buggy, let us know if it doesn't work.
@client.event
async def on_message(message):
if message.content.startswith('$spencer'):
imageURL = "http://1.bp.blogspot.com/-jjrcg9oIe4M/UhMzgSuHdrI/AAAAAAAABSY/nrEQA4v3OwI/s1600/Dota+2+Mekanism.bmp"
embed = discord.Embed()
embed.set_image(url=imageURL)
await client.send_message(message.channel, 'SPENCER MEK NOW', embed=embed)
msg = await client.wait_for_message(timeout=5, content='lol')
await client.send_message(message.channel, 'Yea, spencer let me die in Dota so many times')
if message.content.startswith('$notice me senpai'):
author = message.author
authorid = message.author.id
print("@{} user sent a message. (id: {})".format(author, authorid))
if message.content == "$notice me senpai":
print('I noticed you @{}!'.format(authorid))
await client.send_message(message.channel, 'I noticed you @{} !'.format(author))
imageURL = "https://media2.giphy.com/media/zZOakyWLMzDws/giphy.gif"
embed = discord.Embed()
embed.set_image(url=imageURL)
await client.send_message(message.channel, embed=embed)
# if message.content.startswith('$cat'):
# await client.send_message(message.channel, 'Meow', embed='http://thecatapi.com/api/images/get?format=src&type=gif')
# imageURL = "http://thecatapi.com/api/images/get?format=src&type=gif"
# embed = discord.Embed()
# embed.set_image(url=imageURL)
# await client.send_message(message.channel, 'Meow', embed=embed)
client.run('NDA3MzA5NDQwMzEyOTM0NDEw.DbRUGw.lL3KVWP_8PDd7Ljou1FTQo-biQU')
``` |
{
"source": "jpfeiffe/MPFileReader",
"score": 3
} |
#### File: jpfeiffe/MPFileReader/mpfilereader.py
```python
import argparse
from multiprocessing.pool import ThreadPool
import numpy as np
import os
import time
import logging
def GetChunk(dataloc, filename, start, end):
logging.debug(f'Beginning {start} to {end}')
with open(filename, 'rb') as fin:
fin.seek(start)
dataloc[start:end] = np.fromfile(fin, dtype=np.int8, count=end-start)
logging.debug(f'Endings {start} to {end}')
return start,end
def MPFileReader(filename, processes, chunksize, cap=None):
# Threadpool to do the work
pool = ThreadPool(processes)
# Size of the dataset to read
datasize = os.path.getsize(filename)
if cap is not None:
datasize = cap
# Allocate our datasets
dataloc = np.empty((datasize,), dtype=np.int8)
# Location offsets to use
starts = list(range(0, datasize, chunksize))
ends = starts[1:] + [datasize]
arguments = zip([dataloc]*len(starts), [filename]*len(starts), starts, ends)
for i, (start,end) in enumerate(pool.starmap(GetChunk, arguments)):
logging.debug(f'Finished {start} to {end}')
return dataloc
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-d', '--datafile', required=True, help='File to test')
PARSER.add_argument('-p', '--processes', default=8, type=int, help='Number of processes to use')
PARSER.add_argument('-c', '--chunksize', default=1000000, type=int, help='Size of chunks to read')
PARSER.add_argument('-a', '--cap', type=int, default=None, help='Cap the filesize (test)')
PARSER.add_argument('-v', '--validate', action='store_true', help='validatet the files are identical (only use on small data; will reload file!)')
PARSER.set_defaults(validate=False)
ARGS = PARSER.parse_args()
logging.basicConfig(level=logging.DEBUG)
start = time.time()
dloc = MPFileReader(ARGS.datafile, ARGS.processes, ARGS.chunksize, ARGS.cap)
logging.info(f'Time was {time.time() - start}')
if ARGS.validate:
logging.info(f'Starting Validation')
dloc2 = open(ARGS.datafile, 'rb').read(ARGS.cap)
logging.info(f'Validation File Loaded...')
logging.info(f'Validation: {all(a == b for a, b in zip(dloc.tobytes(), dloc2))}')
``` |
{
"source": "jpfeiffe/slackespn",
"score": 3
} |
#### File: jpfeiffe/slackespn/SlackESPN.py
```python
from slackclient import SlackClient
from espnff import League
import argparse, os, time
def handle_command(ARGS, CLIENT, command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
message = '''Commands I know:
list teams
scores <optional week number>
does Brandon suck
'''
message = ""
attachments = ""
if command == "list teams":
message = '\n'.join(map(lambda x: x.team_name, ARGS.league.teams))
elif command == "does brandon suck":
message = 'yes'
elif 'scores' in command:
pieces = command.split(' ')
if len(pieces) == 1:
message = 'Current Scoreboard'
matchups = ARGS.league.scoreboard(projections=True)
else:
message = 'Scoreboard for week ' + pieces[1]
matchups = ARGS.league.scoreboard(pieces[1], projections=True)
attachments = [{
'fallback': 'A textual representation of your table data',
'fields': [
{
'title': 'Home',
'value': '\n'.join(map(lambda x: x.home_team.team_abbrev + " " + str(x.home_score) + " (" + str(x.home_projection) + ")", matchups)),
'short': True
},
{
'title': 'Away',
'value': '\n'.join(map(lambda x: x.away_team.team_abbrev + " " + str(x.away_score) + " (" + str(x.away_projection) + ")", matchups)),
'short': True
}
]
}]
CLIENT.api_call("chat.postMessage", channel=channel, text=message, attachments=attachments, as_user=True)
# CLIENT.api_call("chat.postMessage", channel=channel, text=message, as_user=True)
def parse_slack_output(ARGS, slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and ARGS.atbot in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(ARGS.atbot)[1].strip().lower(), \
output['channel']
return None, None
def startloop(ARGS, client):
if client.rtm_connect():
print(ARGS.botname + " connected and running!")
while True:
command, channel = parse_slack_output(ARGS, client.rtm_read())
if command and channel:
handle_command(ARGS, CLIENT, command.strip(), channel)
time.sleep(ARGS.websocketdelay)
else:
print("Connection failed. Invalid Slack token or bot ID?")
"""
"""
def getfootballbot(ARGS, client):
api_call = client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == ARGS.botname:
print("Bot ID for '" + user['name'] + "' is " + user.get('id'))
return user.get('id')
else:
raise Exception("could not find bot user with the name " + ARGS.botname)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-slacktoken', default='SLACK_FOOTBALL_TOKEN')
PARSER.add_argument('-espnleague', default='ESPN_LEAGUE')
PARSER.add_argument('-botname', default='footballbot')
PARSER.add_argument('-espns2', default='ESPNS2')
PARSER.add_argument('-swid', default='SWID')
PARSER.add_argument('-websocketdelay', type=int, default=1)
ARGS = PARSER.parse_args()
ARGS.league = League(int(os.environ.get(ARGS.espnleague)), 2017, espn_s2=os.environ.get(ARGS.espns2), swid=os.environ.get(ARGS.swid))
# sc = ARGS.league.scoreboard(projections=True)
# home_names = '\n'.join(map(lambda x: x.home_team.team_abbrev, sc))
# home_scores = '\n'.join(map(lambda x: x.home_score, sc))
# home_proj = '\n'.join(map(lambda x: x.home_projection, sc))
# print(home_scores)
# exit()
CLIENT = SlackClient(os.environ.get(ARGS.slacktoken))
BOTID = getfootballbot(ARGS, CLIENT)
ARGS.atbot = "<@" + BOTID + ">"
startloop(ARGS, CLIENT)
``` |
{
"source": "jpfeiffe/yargparse",
"score": 3
} |
#### File: yargparse/tests/test_parser.py
```python
import yargparse
import unittest
class TestYArgumentParser(unittest.TestCase):
def test_multiple_configs(self):
parser = yargparse.YArgumentParser()
args = parser.parse_args('-c config.yaml config2.yaml'.split(' '))
self.assertEqual(len(args.class_weights), 1)
def test_base_config(self):
parser = yargparse.YArgumentParser()
args = parser.parse_args()
self.assertEqual(args.config, ['config.yaml'])
self.assertEqual(args.train_param.deltas, [1, 2, 3])
self.assertEqual(args.train_param.lr, [.1, .2, .3])
self.assertEqual(args.features[0].dim, 100)
self.assertEqual(args.features[0].type, "sparse")
self.assertEqual(args.features[1].dim, 200)
self.assertEqual(args.features[1].type, "dense")
def test_update_vals(self):
parser = yargparse.YArgumentParser()
args = parser.parse_args(['--train_param.deltas=[.5, .6, 7]'])
self.assertEqual(args.config, ['config.yaml'])
self.assertEqual(args.train_param.deltas, [.5, .6, 7])
def test_splittypes(self):
parser = yargparse.YArgumentParser()
args = parser.parse_args(['--train_param.deltas=[.5, .6, 7]'])
self.assertEqual(args.train_param.deltas, [.5, .6, 7])
parser = yargparse.YArgumentParser()
args = parser.parse_args(['--train_param.deltas [.5, .6, 7]'])
self.assertEqual(args.train_param.deltas, [.5, .6, 7])
parser = yargparse.YArgumentParser()
args = parser.parse_args(['--train_param.deltas:[.5, .6, 7]'])
self.assertEqual(args.train_param.deltas, [.5, .6, 7])
def test_list_assign(self):
parser = yargparse.YArgumentParser()
args = parser.parse_args(['--matrix[1][0] 18'])
self.assertEqual(args.matrix[1][0], 18)
def test_nonevals(self):
parser = yargparse.YArgumentParser()
parser.add_argument('--train_epochs', default=None)
args = parser.parse_args(['--matrix[1][0] 18'])
self.assertEqual(args.train_epochs, 10)
def test_lists(self):
parser = yargparse.YArgumentParser()
args = parser.parse_args()
self.assertEqual(len(args.class_weights), 1)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpfeif/keyloggermasterawss3",
"score": 2
} |
#### File: linux/keylogger/s3dump.py
```python
import socket
import boto
import boto.s3
import sys
from boto.s3.key import Key
name = (socket.gethostname())
AWS_ACCESS_KEY_ID = 'Stuff'
AWS_SECRET_ACCESS_KEY = '<KEY>'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-dump'
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(dumper,
location=boto.s3.connection.Location.DEFAULT)
#gives the logger file name
logger = name + "s logger file"
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
k = Key(bucket)
k.key = logger
k.set_contents_from_filename(logger,
cb=percent_cb, num_cb=10)
``` |
{
"source": "jpfeuffer/pyopenms-extra",
"score": 2
} |
#### File: docs/pandoc_filters/admonitionfilter.py
```python
from pandocfilters import toJSONFilter, RawBlock, Div, stringify
# from: https://docutils.sourceforge.io/docs/ref/rst/directives.html#admonitions
# admonition is a special case with arbitrary header
admonition_types = ["attention", "caution", "danger", "error", "hint",
"important", "note", "tip", "warning", "admonition"]
# keywords in arbitrary admonition header
admonition_subtypes = ["goal"]
# colors and icons for admonition_types (without "admonition") and subtypes
admonition_colors = {"attention": "#FFA07A",
"caution": "#FFA07A",
"danger": "#CD5C5C",
"error": "#CD5C5C",
"hint": "#F0F8FF",
"important": "#FFA500",
"note": "#BDE5F8",
"tip": "#F0E68C",
"warning": "#FFA07A",
"goal": "#98FB98"}
admonition_icons = {"attention": "fas fa-exclamation",
"caution": "fas fa-exclamation-triangle",
"danger": "fas fa-exclamation-triangle",
"error": "fas fa-bomb",
"hint": "far fa-lightbulb",
"important": "fas fa-exclamation",
"note": "far fa-sticky-note",
"tip": "far fa-lightbulb",
"warning": "fas fa-exclamation-triangle",
"goal": "far fa-check-square"}
def html(x):
return RawBlock('html', x)
def admonitions(key, value, fmt, meta):
if key == 'Div':
[[ident, classes, kvs], contents] = value
if any(item in classes for item in admonition_types) and fmt == "ipynb":
header = stringify(contents[0])
admonition_subtype = "notfound"
if "admonition" not in classes:
admonition_subtype = header.lower()
else:
for subtype in admonition_subtypes:
if subtype in header.lower():
admonition_subtype = subtype
break
newcontents = [html('<div style="background-color: '
+ admonition_colors[admonition_subtype]
+ '; margin: 10px 0px; padding:12px;"><p style="font-size: x-large"><i class="'
+ admonition_icons[admonition_subtype] + '"></i> <b>'
+ header + '</b></p>')] + contents[1:] + [html('</div>')]
return Div([ident, classes, kvs], newcontents)
if __name__ == "__main__":
toJSONFilter(admonitions)
```
#### File: src/view/ControllerWidget.py
```python
import json
import re
from collections import namedtuple
import numpy as np
import pyopenms
from ErrorWidget import ErrorWidget
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QWidget, QSplitter
from ScanTableWidget import ScanTableWidget
from SequenceIonsWidget import SequenceIonsWidget
from SpectrumWidget import SpectrumWidget
from TICWidget import TICWidget
PeakAnnoStruct = namedtuple(
"PeakAnnoStruct",
"mz intensity text_label \
symbol symbol_color",
)
LadderAnnoStruct = namedtuple(
"LadderAnnoStruct",
"mz_list \
text_label_list color",
)
class ControllerWidget(QWidget):
"""
Used to merge spectrum, table, TIC,
error plot and sequenceIons widgets together.
"""
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.mainlayout = QHBoxLayout(self)
self.isAnnoOn = True
self.clickedRT = None
self.seleTableRT = None
self.mzs = np.array([])
self.ppm = np.array([])
self.colors = np.array([])
self.scanIDDict = {}
self.curr_table_index = None
self.filteredIonFragments = []
self.peakAnnoData = None
def clearLayout(self, layout):
for i in reversed(range(layout.count())):
layout.itemAt(i).widget().setParent(None)
def loadFileMzML(self, file_path):
self.isAnnoOn = False
self.msexperimentWidget = QSplitter(Qt.Vertical)
# data processing
scans = self.readMS(file_path)
# set Widgets
self.spectrum_widget = SpectrumWidget()
self.scan_widget = ScanTableWidget(scans)
self.seqIons_widget = SequenceIonsWidget()
self.error_widget = ErrorWidget()
self.tic_widget = TICWidget()
self.drawTic(scans)
# connected signals
self.scan_widget.sigScanClicked.connect(self.updateWidgetDataFromRow)
self.tic_widget.sigRTClicked.connect(self.ticToTable)
self.msexperimentWidget.addWidget(self.tic_widget)
self.msexperimentWidget.addWidget(self.seqIons_widget)
self.msexperimentWidget.addWidget(self.spectrum_widget)
self.msexperimentWidget.addWidget(self.error_widget)
self.msexperimentWidget.addWidget(self.scan_widget)
self.mainlayout.addWidget(self.msexperimentWidget)
# set widget sizes, where error plot is set smaller
widget_height = self.msexperimentWidget.sizeHint().height()
size_list = [
widget_height,
widget_height,
widget_height,
widget_height * 0.5,
widget_height
]
self.msexperimentWidget.setSizes(size_list)
# default : first row selected.
self.scan_widget.table_view.selectRow(0)
def loadFileIdXML(self, file_path):
prot_ids = []
pep_ids = []
pyopenms.IdXMLFile().load(file_path, prot_ids, pep_ids)
Ions = {}
# extract ID data from file
for peptide_id in pep_ids:
pep_mz = peptide_id.getMZ()
pep_rt = peptide_id.getRT()
for hit in peptide_id.getHits():
pep_seq = str(hit.getSequence().toString())
if "." in pep_seq:
pep_seq = pep_seq[3:-1]
else:
pep_seq = pep_seq[2:-1]
for anno in hit.getPeakAnnotations():
ion_charge = anno.charge
ion_mz = anno.mz
ion_label = anno.annotation
Ions[ion_label] = [ion_mz, ion_charge]
self.scanIDDict[round(pep_rt, 3)] = {
"m/z": pep_mz,
"PepSeq": pep_seq,
"PepIons": Ions,
}
Ions = {}
self.saveIdData()
def saveIdData(self):
# save ID data in table (correct rows) for later usage
rows = self.scan_widget.table_model.rowCount(self.scan_widget)
for row in range(0, rows - 1):
tableRT = round(
self.scan_widget.table_model.index(row, 2).data(), 3)
if tableRT in self.scanIDDict:
index_seq = self.scan_widget.table_model.index(row, 6)
self.scan_widget.table_model.setData(
index_seq, self.scanIDDict[tableRT]["PepSeq"],
Qt.DisplayRole
)
index_ions = self.scan_widget.table_model.index(row, 7)
# data needs to be a string, but reversible ->
# using json.dumps()
self.scan_widget.table_model.setData(
index_ions,
json.dumps(self.scanIDDict[tableRT]["PepIons"]),
Qt.DisplayRole,
)
def readMS(self, file_path):
# read MzML files
exp = pyopenms.MSExperiment()
pyopenms.MzMLFile().load(file_path, exp)
return exp
def drawTic(self, scans):
self.tic_widget.setTIC(scans.calculateTIC())
def ticToTable(self, rt):
# connect Tic info to table, and select specific row
self.clickedRT = round(rt * 60, 3)
if self.clickedRT != self.seleTableRT:
self.scan_widget.table_view.selectRow(self.findClickedRT())
def findClickedRT(self): # find clicked RT in the scan table
rows = self.scan_widget.table_model.rowCount(self.scan_widget)
for row in range(0, rows - 1):
if self.clickedRT == round(
self.scan_widget.table_model.index(row, 2).data(), 3
):
index = self.scan_widget.table_model.index(row, 2)
try:
self.curr_table_index \
= self.scan_widget.proxy.mapFromSource(index)
# use proxy to get from filtered model index
return self.curr_table_index.row()
except ValueError:
print("could not found ModelIndex of row")
# for the future calculate ppm and add it to the table
def errorData(self, ions_data):
if ions_data not in "-":
ions_data_dict = json.loads(ions_data)
if ions_data_dict != {}:
self.colors, self.mzs = self.filterColorsMZIons(ions_data_dict)
mzs_size = len(self.mzs)
self.ppm = np.random.randint(0, 3, size=mzs_size)
self.error_widget.setMassErrors(
self.mzs, self.ppm, self.colors
) # works for a static np.array
else:
self.error_widget.clear()
else:
self.error_widget.clear()
def filterColorsMZIons(
self, ions_data_dict
): # create color/mz array by distinguishing between prefix & suffix ions
self.peakAnnoData = (
{}
) # key is ion annotation (e.g. b2):
# [mz, color distinguishing prefix, suffix]
colors = []
mzs = []
col_red = (255, 0, 0) # suffix
col_blue = (0, 0, 255) # prefix
for fragData in self.filteredIonFragments:
anno = fragData[0]
if anno[0] in "abc":
colors.append(col_blue)
mzs.append(ions_data_dict[anno][0])
self.peakAnnoData[fragData[1]] = [
ions_data_dict[anno][0], col_blue]
elif anno[0] in "xyz":
colors.append(col_red)
mzs.append(ions_data_dict[anno][0])
self.peakAnnoData[fragData[1]] = [
ions_data_dict[anno][0], col_red]
return np.array(colors), np.array(mzs)
def updateWidgetDataFromRow(
self, index
): # after clicking on a new row, update spectrum, error plot, peptideSeq
# current row RT value
self.seleTableRT = round(index.siblingAtColumn(2).data(), 3)
# set new spectrum with setting that all peaks should be displayed
self.spectrum_widget.setSpectrum(
self.scan_widget.curr_spec, zoomToFullRange=True
)
# only draw sequence with given ions for MS2 and error plot
if index.siblingAtColumn(0).data() == "MS2":
self.drawSeqIons(
index.siblingAtColumn(
6).data(), index.siblingAtColumn(7).data()
)
self.errorData(index.siblingAtColumn(7).data())
if (
self.peakAnnoData is not None
): # peakAnnoData created with existing ions in errorData
# (bc of coloring)
self.spectrum_widget.setPeakAnnotations(
self.createPeakAnnotation())
self.spectrum_widget.redrawPlot()
else:
self.spectrum_widget._clear_peak_annotations()
self.spectrum_widget.redrawPlot()
# otherwise delete old data
elif index.siblingAtColumn(0).data() == "MS1":
self.seqIons_widget.clear()
self.error_widget.clear()
self.peakAnnoData = None
self.spectrum_widget._clear_peak_annotations()
self.spectrum_widget.redrawPlot()
def createPeakAnnotation(self):
pStructList = []
# for the future ->
# check clashes like in the TIC widget and then add labels
# (should be done in SpectrumWidget)
for anno, data in self.peakAnnoData.items():
mz, anno_color = data[0], data[1]
index = self.find_nearest_Index(self.spectrum_widget._mzs, mz)
pStructList.append(
PeakAnnoStruct(
mz=self.spectrum_widget._mzs[index],
intensity=self.spectrum_widget._ints[index],
text_label=anno,
symbol=None,
symbol_color=anno_color,
)
)
return pStructList
def find_nearest_Index(self, array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def drawSeqIons(self, seq, ions): # generate provided peptide sequence
seq = re.sub(
r"\([^)]*\)", "", seq
) # remove content in brackets -> easier usage
# only draw sequence for M2 with peptide and ion data
if seq not in "-" and ions not in "-":
self.seqIons_widget.setPeptide(seq)
# transform string data back to a dict
ions_dict = json.loads(ions)
if ions_dict != {}:
self.suffix, self.prefix = self.filterIonsPrefixSuffixData(
ions_dict)
self.seqIons_widget.setPrefix(self.prefix)
self.seqIons_widget.setSuffix(self.suffix)
else: # no ions data
self.prefix, self.suffix = {}, {}
self.seqIons_widget.setPrefix(self.prefix)
self.seqIons_widget.setSuffix(self.suffix)
self.peakAnnoData = None
else:
self.seqIons_widget.clear()
self.peakAnnoData = None
def filterIonsPrefixSuffixData(
self, ions
): # filter raw ion data and return suffix and prefix dicts
suffix = {}
prefix = {}
ions_anno = list(ions.keys())
# annotation(s) of raw ion data (used as key(s))
self.filteredIonFragments = []
for anno in ions_anno:
if anno[1].isdigit() and anno[0] in "abcyxz":
index, anno_short = self.filterAnnotationIon(anno)
if (
(index in suffix) and
(anno[0] in "yxz") and
(anno_short not in suffix[index])
): # avoid double annos e.g. y14
suffix[index].append(anno_short)
elif (
(index in prefix) and
(anno[0] in "abc") and
(anno_short not in prefix[index])
):
prefix[index].append(anno_short)
elif anno[0] in "yxz": # non existing keys
suffix[index] = [anno_short]
elif anno[0] in "abc": # non existing keys
prefix[index] = [anno_short]
return suffix, prefix
def filterAnnotationIon(self, fragment_anno):
# filter from raw ion data annotation index
# and filtered annotation name (e.g. y2)
index = [s for s in re.findall(r"-?\d+\.?\d*", fragment_anno)][0]
ion_anno = fragment_anno.split(index)[0] + index
self.filteredIonFragments.append((fragment_anno, ion_anno))
return int(index), ion_anno
```
#### File: src/view/TICWidget.py
```python
import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QShortcut
from pyqtgraph import PlotWidget
pg.setConfigOption("background", "w") # white background
pg.setConfigOption("foreground", "k") # black peaks
class TICWidget(PlotWidget):
"""
Used for creating a TIC plot
with dynamic zooming to avoid label collisions.
=============================== =========================================
**Signals:**
sigRTClicked Emitted when the user has clicked on TIC
plot and returns the clicked RT value.
sigSeleRTRegionChangeFinished Emitted while the user is double clicking
on a region in TIC plot and creates a
region by dragging a horizontal line.
The signal returns the start and end
RT values within the region.
=============================== =========================================
"""
sigRTClicked = pyqtSignal(float, name="sigRTClicked")
sigSeleRTRegionChangeFinished = pyqtSignal(
float, float, name="sigRTRegionChangeFinished"
)
def __init__(self, parent=None, dpi=100):
PlotWidget.__init__(self)
self.setLimits(yMin=0, xMin=0)
self.setMouseEnabled(y=False)
self.setLabel("bottom", "RT (min)")
self.setLabel("left", "relative intensity (%)")
self._peak_labels = {}
self._existTIC = True
# numpy arrays for fast look-up
self._rts = np.array([])
self._ints = np.array([])
self._peak_indices = np.array([])
self._currentIntensitiesInRange = np.array([])
self._region = None
self.getViewBox().sigXRangeChanged.connect(self._autoscaleYAxis)
self.scene().sigMouseClicked.connect(self._clicked) # emits rt_clicked
# shortcut to init region
self.shortcut1 = QShortcut(QKeySequence("Ctrl+r"), self)
self.shortcut1.activated.connect(self._rgn_shortcut)
# in cases only MS2 spectra are given
def checkExistTIC(self):
if self._rts.size == 0:
self._existTIC = False
def setTIC(self, chromatogram):
"""
Used to set new TIC and with given Information (rts, ints)
:param chromatogram: data from the MSExperiment
"""
if self._peak_labels != {}:
self._clear_labels()
self._peak_labels = {}
self._chrom = chromatogram
self._rts, self._ints = self._chrom.get_peaks()
self.checkExistTIC()
if self._existTIC:
self._rts_in_min()
self._relative_ints()
self._peak_indices = self._find_Peak()
self._autoscaleYAxis()
self.redrawPlot()
def _rts_in_min(self):
self._rts = np.array([x / 60 for x in self._rts])
def _relative_ints(self):
maxInt = np.amax(self._ints)
self._ints = np.array([((x / maxInt) * 100) for x in self._ints])
def redrawPlot(self):
self.plot(clear=True)
self._plot_tic()
self._draw_peak_label()
def _autoscaleYAxis(self):
"""
Used to adjust y axis with the maximal y value
from the current RT values. Also, redraws peak labels
depending on the current displayed RT values.
"""
x_range = self.getAxis("bottom").range
if x_range == [0, 1]: # workaround for axis sometimes not being set
x_range = [np.amin(self._rts), np.amax(self._rts)]
self.currMaxY = self._getMaxIntensityInRange(x_range)
if self.currMaxY:
self.setYRange(0, self.currMaxY, update=False)
self._redrawLabels()
def _getMaxIntensityInRange(self, xrange):
"""
:param xrange: A list of [min, max] bounding RT values.
:return: An float value representing the maximal
intensity current x range.
"""
left = np.searchsorted(self._rts, xrange[0], side="left")
right = np.searchsorted(self._rts, xrange[1], side="right")
self._currentIntensitiesInRange = self._ints[left:right]
return np.amax(self._ints[left:right], initial=1)
def _plot_tic(self):
plotgraph = pg.PlotDataItem(self._rts, self._ints)
self.addItem(plotgraph)
def _find_Peak(self):
"""
Calculates all indices from the intensity values to locate peaks.
This function operates on the principle that it compares peak values
against each other until it founds a maximal turning point.
:return: A numpy array containing all peak indices,
sorted descending (max first -> min last).
"""
data = self._ints
maxIndices = np.zeros_like(data)
peakValue = -np.inf
for indx in range(0, len(data), 1):
if peakValue < data[indx]:
peakValue = data[indx]
for j in range(indx, len(data)):
if peakValue < data[j]:
break
elif peakValue == data[j]:
continue
elif peakValue > data[j]:
peakIndex = indx + np.floor(abs(indx - j) / 2)
# marking found index
maxIndices[peakIndex.astype(int)] = 1
indx = j
break
peakValue = data[indx]
maxIndices = np.where(maxIndices)[0]
# sort indices of high points from largest intensity to smallest
maxIndices = sorted(maxIndices, key=lambda x: data[x], reverse=True)
return maxIndices
def _add_label(self, label_id, label_text, pos_x, pos_y):
label = pg.TextItem(anchor=(0.5, 1))
label.setText(text="{0:.2f}".format(label_text), color=(0, 0, 0))
label.setPos(pos_x, pos_y)
self._peak_labels[label_id] = {"label": label}
self.addItem(label, ignoreBounds=True)
if self._label_clashes(label_id):
self._remove_label(label_id)
def _remove_label(self, label_id):
self.removeItem(self._peak_labels[label_id]["label"])
del self._peak_labels[label_id]
def _clear_labels(self):
for label_id in self._peak_labels.keys():
self.removeItem(self._peak_labels[label_id]["label"])
self._peak_labels = {}
def _label_clashes(self, label_id):
"""
Calculates possible clash of new added label to other existing labels.
The clash is measured by the
collision of the label boundingRects,
which are representing displayed scene positions.
:param label_id: Represents index of peak position in peak_indices.
:return: A boolean indicating if there is a clash or not.
"""
new_label = label_id
clash = False
# scaling the distance with the correct pixel size
pixel_width = self.getViewBox().viewPixelSize()[0]
limit_distance = 20.0 * pixel_width
if self._peak_labels == {}:
return False
for exist_label in list(self._peak_labels):
if exist_label != new_label:
new_label_rect =\
self._peak_labels[new_label]["label"].mapRectToDevice(
self._peak_labels[new_label]["label"].boundingRect()
)
exist_label_rect = self._peak_labels[exist_label][
"label"
].mapRectToDevice(
self._peak_labels[exist_label]["label"].boundingRect()
)
if not new_label_rect.intersects(exist_label_rect):
exist_label_X = self._peak_labels[exist_label]["label"].x()
new_label_X = self._peak_labels[new_label]["label"].x()
distance = abs(new_label_X - exist_label_X)
if distance < limit_distance:
clash = True
break
else:
clash = False
elif new_label_rect.intersects(exist_label_rect):
clash = True
break
else:
if len(self._peak_labels) == 1 and exist_label == new_label:
clash = False
return clash
def _draw_peak_label(self):
"""
Function draws peak labels,
starting with the maximal peak to the minimal peak.
In each addition possible label clashes will be calculated,
if so then delete label.
"""
if self._peak_labels == {}:
for index in self._peak_indices:
if self._ints[index] in self._currentIntensitiesInRange:
self._add_label(
index, self._rts[index],
self._rts[index],
self._ints[index]
)
def _redrawLabels(self):
self._clear_labels()
self._draw_peak_label()
def _clicked(self, event):
if self._existTIC:
pos = event.scenePos()
if self.sceneBoundingRect().contains(pos):
mouse_point = self.getViewBox().mapSceneToView(pos)
closest_datapoint_idx = self._calculate_closest_datapoint(
mouse_point.x()
)
self.sigRTClicked.emit(
self._rts[closest_datapoint_idx]
) # notify observers
# check the selected rt region and return the bounds
if self._region is not None:
self._region.sigRegionChangeFinished.connect(
self._rtRegionBounds)
def mouseDoubleClickEvent(self, event):
super(TICWidget, self).mouseDoubleClickEvent(event)
try:
mouse_point = self.getViewBox().mapSceneToView(event.pos())
closest_datapoint_idx = self._calculate_closest_datapoint(
mouse_point.x())
rgn_start = self._rts[closest_datapoint_idx]
if self._region is None:
region = pg.LinearRegionItem()
region.setRegion((rgn_start, rgn_start))
self._region = region
self.addItem(region, ignoreBounds=True)
# delete the region when hovering over the region per doubleClk
self._delete_region()
except ValueError:
print("No TIC values to click on")
def _calculate_closest_datapoint(self, point_x):
"""
:param point_x: mouse clicked position
:return: closest data point near a peak
"""
larger_idx = np.searchsorted(self._rts, point_x, side="left")
smaller_idx = 0
if larger_idx >= self._rts.size: # to avoid array out of bounds
larger_idx -= 1
if larger_idx > 0:
smaller_idx = larger_idx - 1
if abs(self._rts[larger_idx] - point_x) < \
abs(self._rts[smaller_idx] - point_x):
closest_datapoint_idx = larger_idx
else:
closest_datapoint_idx = smaller_idx
return closest_datapoint_idx
def _rtRegionBounds(self):
region_bounds = self._region.getRegion()
start_rg = region_bounds[0]
stop_rg_idx = self._calculate_closest_datapoint(region_bounds[1])
stop_rg = self._rts[stop_rg_idx]
# set the new region of interest
self._region.setRegion((start_rg, stop_rg))
self.sigSeleRTRegionChangeFinished.emit(
start_rg, stop_rg) # notify observers
def _delete_region(self):
if self._region.mouseHovering:
self.removeItem(self._region)
self._region = None
def _rgn_shortcut(self):
# click region, with following shortcut -> create region
rgn_start = self.getViewBox().mapSceneToView(self.lastMousePos)
if self._region is None:
region = pg.LinearRegionItem()
region.setRegion((rgn_start, rgn_start))
self._region = region
self.addItem(region, ignoreBounds=True)
``` |
{
"source": "jpfeuffer/quantms",
"score": 3
} |
#### File: quantms/bin/scrape_software_versions.py
```python
from __future__ import print_function
import os
import re
results = {}
version_files = [x for x in os.listdir(".") if x.endswith(".version.txt")]
# TODO https://github.com/nf-core/proteomicslfq/pull/165
def get_versions(software, version_file):
semver_regex = r"((?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?)"
regexes = {
'nf-core/quantms': r"(\S+)",
'nextflow': r"(\S+)",
'sdrf-pipelines': semver_regex,
'thermorawfileparser': r"(\S+)",
'fileconverter': semver_regex,
'decoydatabase': semver_regex,
'isobaricanalyzer': semver_regex,
'msgfplusadapter': semver_regex,
'msgfplus': r"\(([^v)]+)\)",
'cometadapter': semver_regex,
'comet': r"\"(.*)\"",
'indexpeptides': semver_regex,
'extractpsmfeature': semver_regex,
'percolatoradapter': semver_regex,
'percolator': r"([0-9].[0-9]{2}.[0-9])",
'idfilter': semver_regex,
'idscoreswitcher': semver_regex,
'falsediscoveryrate': semver_regex,
'IDPosteriorErrorProbability': semver_regex,
'consensusid': semver_regex,
'filemerge': semver_regex,
'pmultiqc': semver_regex,
'idmapper': semver_regex,
'epifany': semver_regex,
'proteininference': semver_regex,
'idconflictresolver': semver_regex,
'proteomicslfq': semver_regex,
'proteinquantifier': semver_regex,
'msstatsconverter': semver_regex,
'msstats': r"(\S+)",
'idfileconverter': semver_regex
}
match = re.search(regexes[software], version_file).group(1)
return match
for version_file in version_files:
software = version_file.replace(".version.txt", "")
if software == "pipeline":
software = "nf-core/quantms"
with open(version_file) as fin:
version = get_versions(software, fin.read().strip())
results[software] = version
# Dump to YAML
print(
"""
id: 'software_versions'
section_name: 'nf-core/quantms Software Versions'
section_href: 'https://github.com/nf-core/quantms'
plot_type: 'html'
description: 'are collected at run time from the software output.'
data: |
<dl class="dl-horizontal">
"""
)
for k, v in sorted(results.items()):
print(" <dt>{}</dt><dd><samp>{}</samp></dd>".format(k, v))
print(" </dl>")
# Write out as tsv file:
with open("software_versions.tsv", "w") as f:
for k, v in sorted(results.items()):
f.write("{}\t{}\n".format(k, v))
``` |
{
"source": "JPFigueredo/Hardware-Monitoring-System_Incomplete-Version",
"score": 3
} |
#### File: JPFigueredo/Hardware-Monitoring-System_Incomplete-Version/TP07 PB.py
```python
import pygame
import psutil
import cpuinfo
import socket
import time
import nmap
from cpuinfo import get_cpu_info
red = (200,0,0)
white = (210,214,217)
blue = (0,0,200)
grey = (105,105,105)
black = (0,0,0)
largura_tela, altura_tela = 1024,760
pygame.init()
pygame.font.init()
font = pygame.font.Font(None, 32)
uso = psutil.cpu_percent(interval=1, percpu=True)
tela = pygame.display.set_mode((largura_tela, altura_tela))
ip = socket.gethostbyname(socket.gethostname())
info = get_cpu_info()
address = psutil.net_if_addrs()
p = psutil.Process()
processos = psutil.pids()
menu = ""
menu1 = True
menu2 = True
menu3 = True
p_lista = []
pos = pygame.mouse.get_pos()
buttons = 30
pygame.display.set_caption("TP07 - Monitoramento do PC")
pygame.display.init()
clock = pygame.time.Clock()
def pc_infos():
font = pygame.font.Font(None, 36)
s1 = pygame.surface.Surface((largura_tela, altura_tela/3))
texto_barra = "Detalhes do Processador"
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 10))
font = pygame.font.Font(None, 28)
texto_barra = ('Nome: {}'.format(info['brand_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 50))
texto_barra = ('Arquitetura: {}'.format(info['arch_string_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 90))
texto_barra = ('Palavra (bits): {}'.format(info['bits']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 120))
texto_barra = ('Frequência (MHz): {}'.format(round(psutil.cpu_freq().current, 2)))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 150))
texto_barra = ('Núcleos (Físicos): {} ({})'.format(psutil.cpu_count(), psutil.cpu_count(logical=False)))
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, 180))
y = 60
for chave in address:
IP = address[chave][1]
addrs = IP[:3]
y+= 30
texto_barra = ('{:12.10}: {} - netmask: {}'.format(chave, addrs[1], addrs[2]))
text = font.render(texto_barra, 1, white)
s1.blit(text, (350, y))
tela.blit(s1, (0, 0))
def cpu_graph():
s2 = pygame.surface.Surface((largura_tela, altura_tela/5))
uso = psutil.cpu_percent(interval=1)
larg = largura_tela - 2*40
pygame.draw.rect(s2, blue, (20, 30, larg, 10))
larg = larg*uso/100
pygame.draw.rect(s2, red, (20, 30, larg, 10))
texto_barra = 'Uso de CPU: {}%'.format(uso)
text = font.render(texto_barra, 1, white)
s2.blit(text, (20, 0))
tela.blit(s2, (0, 250))
def m_graph():
s3 = pygame.surface.Surface((largura_tela, altura_tela/5))
m = psutil.virtual_memory()
larg = largura_tela - 2*40
pygame.draw.rect(s3, blue, (20, 30, larg, 10))
larg = larg*m.percent/100
pygame.draw.rect(s3, red, (20, 30, larg, 10))
total = round(m.total/(1024*1024*1024),2)
texto_barra = 'Uso de Memória: {}% (Total: {} GB)'.format(m.percent, total)
text = font.render(texto_barra, 1, white)
s3.blit(text, (20, 0))
tela.blit(s3, (0, 350))
def disk_graph():
s4 = pygame.surface.Surface((largura_tela, altura_tela/5))
disk = psutil.disk_usage('.')
larg = largura_tela - 2*40
pygame.draw.rect(s4, blue, (20, 30, larg, 10))
larg = larg*disk.percent/100
pygame.draw.rect(s4, red, (20, 30, larg, 10))
total = round(disk.total/(1024*1024*1024), 2)
texto_barra = 'Uso de Disco: {}% (Total: {} GB):'.format(disk.percent,total)
text = font.render(texto_barra, 1, white)
s4.blit(text, (20, 0))
tela.blit(s4, (0, 450))
def threads_graph():
s5 = pygame.surface.Surface((largura_tela, altura_tela))
y = 10
num_cpu = len(uso)
desl = 9
d = y + desl
for i in range(num_cpu):
alt = s5.get_height() - 2*y
larg = (alt - (num_cpu+1)*desl)/num_cpu
pygame.draw.rect(s5, red, (d, y, larg, alt))
pygame.draw.rect(s5, blue, (d, y, larg, (alt*uso[i]/100)))
d = d + larg + desl
tela.blit(s5, (0, 550))
def threads_text():
s5 = pygame.surface.Surface((largura_tela, altura_tela))
texto_barra = 'Uso de Threads:'.format()
text = font.render(texto_barra, 1, white)
s5.blit(text, (20, 0))
tela.blit(s5, (0, 530))
def infos():
s1 = pygame.surface.Surface((largura_tela, altura_tela))
font = pygame.font.Font(None, 36)
texto_barra = "Monitoramento de Uso"
text = font.render(texto_barra, 1, white)
s1.blit(text, (350, 10))
font = pygame.font.Font(None, 28)
texto_barra = ('Nome: {}'.format(info['brand_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 60))
texto_barra = ('Arquitetura: {}'.format(info['arch_string_raw']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 90))
texto_barra = ('Palavra (bits): {}'.format(info['bits']))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 120))
texto_barra = ('Frequência (MHz): {}'.format(round(psutil.cpu_freq().current, 2)))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 150))
texto_barra = ('Núcleos (físicos): {} ({})'.format(str(psutil.cpu_count()), str(psutil.cpu_count(logical=False))))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 180))
texto_barra = ('IP Address: {}'.format(ip))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 210))
font = pygame.font.Font(None, 38)
#CPU
uso = psutil.cpu_percent(interval=0)
texto_barra = ('Uso de CPU: {}% Usado'.format(uso))
text = font.render(texto_barra, 1, white)
s1.blit(text, (230, 275))
#MEMORIA
m = psutil.virtual_memory()
total = round(m.total/(1024*1024*1024), 2)
texto_barra = ('Uso de Memória: {}% (Total: {} GB)'.format(m.percent, total))
text = font.render(texto_barra, 1, white)
s1.blit(text, (230, 325))
#HD
disco = psutil.disk_usage('.')
total = round(disco.total/(1024*1024*1024), 2)
texto_barra = ('Uso de Disco: {}% (Total: {})'.format(disco.percent, total))
text = font.render(texto_barra, 1, white)
s1.blit(text, (230, 375))
tela.blit(s1, (0, 0))
#THREADS
uso2 = psutil.cpu_percent(interval=1, percpu=True)
y = 0
x = 0
for i in range(len(uso2)):
texto_barra = ('Uso de Thread {} : {}% Usado'.format(i + 1, uso2[i]))
text = font.render(texto_barra, 1, white)
s1.blit(text, (20+x, 450+y))
tela.blit(s1, (0, 0))
y += 30
if i == 7:
x += 500
y -= 240
def dir_header():
s1 = pygame.surface.Surface((largura_tela, altura_tela/10))
font = pygame.font.Font(None, 36)
texto = '{}'.format("Detalhes de Arquivos/Diretórios")
text = font.render(texto, 1, white)
s1.blit(text, (650, 10))
tela.blit(s1, (0, 0))
def process_header():
s6 = pygame.surface.Surface((largura_tela, altura_tela/8))
font = pygame.font.Font(None, 16)
texto_barra = '{:<6}'.format("PID") + " "
texto_barra = texto_barra + '{:10}'.format("Threads") + " "
texto_barra = texto_barra + '{:30}'.format("Data de Criação") + " "
texto_barra = texto_barra + '{:25}'.format("CPU - UT")
# UT - User Time
# ST - System Time
texto_barra = texto_barra + '{:26}'.format("CPU - ST")
texto_barra = texto_barra + '{:25}'.format("Memory(%)") + " "
texto_barra = texto_barra + '{:10}'.format("RSS") + " "
# Vss = virtual set size
# Rss = resident set size
texto_barra = texto_barra + '{:25}'.format("VMS") + " "
texto_barra = texto_barra + '{:20}'.format("Executável")
text = font.render(texto_barra, 1, white)
s6.blit(text, (20, 80))
tela.blit(s6, (0, 0))
def arq_dir():
s1 = pygame.surface.Surface((largura_tela, altura_tela))
p = psutil.Process()
font = pygame.font.Font(None, 14)
y = 100
for i in processos:
texto_barra = '{:<6}'.format(i) + " "
texto_barra = texto_barra + '{:^12}'.format(p.num_threads()) + " "
texto_barra = texto_barra + '{:26}'.format(time.ctime(p.create_time()))
texto_barra = texto_barra + '{:20.2f}'.format(p.cpu_times().user)
texto_barra = texto_barra + '{:30.2f}'.format(p.cpu_times().system)
texto_barra = texto_barra + '{:30.2f}'.format(p.memory_percent()) + " MB"
rss = p.memory_info().rss/1024/1024
texto_barra = texto_barra + '{:30.2f}'.format(rss) + " MB"
# Vss = virtual set size
# Rss = resident set size
vms = p.memory_info().vms/1024/1024
texto_barra = texto_barra + '{:15.2f}'.format(vms) + " MB" + " "
texto_barra = texto_barra + '{:15}'.format(p.exe())
text = font.render(texto_barra, 1, white)
s1.blit(text, (30, y))
tela.blit(s1, (0, 0))
y+= 15
if y >= 600:
break
# if (i % 3 == 0) and (i % 5 == 0):
# break
def arq_dir_button():
s1 = pygame.surface.Surface((largura_tela, altura_tela))
font = pygame.font.Font(None, 32)
pygame.draw.rect(s1, grey, (20, 30, 125, 30))
texto_barra = "Próximo"
text = font.render(texto_barra, 1, white)
s1.blit(text, (38, 35))
tela.blit(s1, (670, 670))
def menu_init():
s0 = pygame.surface.Surface((largura_tela, altura_tela))
s0.fill(white)
font = pygame.font.Font(None, 50)
texto_barra = ("OPÇOES DE TELA")
text = font.render(texto_barra, 1, black)
s0.blit(text, (350, 20))
tela.blit(s0, (0, 0))
texto_barra = ("Botão esquerdo do mouse - Gráfico de Uso")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 140))
tela.blit(s0, (0, 0))
texto_barra = ("Botão direito do mouse - Monitoramento de Uso Geral")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 260))
tela.blit(s0, (0, 0))
texto_barra = ("ESPAÇO - Detalhes de Arquivos/Diretórios")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 380))
tela.blit(s0, (0, 0))
texto_barra = ("SHIFT - ESCANEAMENTO DE IP")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 500))
tela.blit(s0, (0, 0))
texto_barra = ("TAB - Voltar a Tela Inicial")
text = font.render(texto_barra, 1, black)
s0.blit(text, (70, 620))
tela.blit(s0, (0, 0))
def ping_ip(host):
s1 = pygame.surface.Surface((largura_tela, altura_tela))
font = pygame.font.Font(None, 32)
nmp = nmap.PortScanner()
nmp.scan(host)
y = 0
for proto in nmp[host].all_protocols():
texto_barra = 'Protocolo : {}'.format(proto)
text = font.render(texto_barra, 1, white)
s1.blit(text, (20, 20))
tela.blit(s1, (0, 0))
lport = nmp[host][proto].keys()
for port in lport:
texto_barra = 'Porta: {:<15} Estado: {:>10}'.format(port, nmp[host][proto][port]['state'])
text = font.render(texto_barra, 1, white)
s1.blit(text, (70, 120+y))
tela.blit(s1, (0, 0))
y+= 30
menu_init()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
pos_x, pos_y = pygame.mouse.get_pos()
if pos_x >= 691 and pos_x <= 815 and pos_y >= 700 and pos_y <= 730:
buttons += 30
else:
menu = "menu1"
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
menu = "menu2"
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
menu = "menu3"
if event.type == pygame.KEYDOWN and event.key == pygame.K_TAB:
menu = ""
menu_init()
if event.type == pygame.KEYDOWN and event.key == pygame.K_LSHIFT:
ping_ip(ip)
if menu == "menu1":
pc_infos()
cpu_graph()
m_graph()
disk_graph()
threads_text()
threads_graph()
if menu != "menu1":
break
if menu == "menu2":
infos()
if menu != "menu2":
break
if menu == "menu3":
arq_dir()
process_header()
dir_header()
arq_dir_button()
time.sleep(0.1)
if menu != "menu3":
break
pygame.display.update()
clock.tick(50)
pygame.display.quit()
``` |
{
"source": "jpfilhooo/speprice",
"score": 3
} |
#### File: jpfilhooo/speprice/speprice.py
```python
from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
import time
import colorama
from colorama import Fore
from colorama import Style
from twilio.rest import Client
colorama.init()
dev = " R$0.05"
global ok
global confere
def request():
global content
html = requests.get("https://coinmarketcap.com/pt-br/currencies/space-crypto-spe/").content
'''html_dec = html.read().decode(encoding="iso-8859-1")'''
bs = BeautifulSoup(html, 'html.parser')
#Procura pelo elemento div que mostra o preço do token na página estática
aim = bs.find("div", class_="priceValue")
content = aim.string
request()
if content >= 'R$0,20':
print(Fore.LIGHTGREEN_EX + Style.BRIGHT + '"{}"'.format(content) + Style.RESET_ALL + ' Preço atual do token')
elif content >= 'R$0.15' and content < 'R$0.20':
print(Fore.LIGHTYELLOW_EX + Style.BRIGHT + '"{}"'.format(content) + Style.RESET_ALL + ' Preço atual do token')
elif content <= 'R$0.10':
print(Fore.LIGHTRED_EX + Style.BRIGHT + '"{}"'.format(content) + Style.RESET_ALL + ' Preço atual do token')
def _main_():
global ok
global confere
while True:
request()
count = []
print('--------------------)\n')
print('Atualizando:')
print('...')
time.sleep(1)
print('..')
time.sleep(1)
print('.')
time.sleep(1)
print('..')
time.sleep(1)
print('...\n')
time.sleep(0.5)
print('--------------------\n')
time.sleep(0.2)
if content >= 'R$0.20':
time.sleep(0.5)
print(Fore.LIGHTGREEN_EX + Style.BRIGHT + '"{}"'.format(content) + Style.RESET_ALL + ' Novo preço do token\n')
time.sleep(0.5)
elif content >= 'R$0.15' and content < 'R$0.20':
time.sleep(0.5)
print(Fore.LIGHTYELLOW_EX + Style.BRIGHT + '"{}"'.format(content) + Style.RESET_ALL + ' Novo preço do token\n')
time.sleep(0.5)
elif content <= 'R$0.14' and content >= 'R$0.10':
time.sleep(1)
print(Fore.LIGHTRED_EX + Style.BRIGHT + '"{}"'.format(content) + Style.RESET_ALL + ' Novo Preço do token\n')
time.sleep(0.5)
elif content <= 'R$0.06':
account_sid = 'ACa4ae46a23ee183f2c9ffabdc3125b8de'
auth_token = '<PASSWORD>'
client = Client(account_sid, auth_token)
bd = str('\n\nPreço dessa jabirosca chamada SPE tá zerando CUIDA!!\n\nJá tá'+content+'\n\njpfilhooo btw')
client.messages.create(from_="+17408411652", body=bd, to="+5588988562749")
print('Mensagem enviada')
ok = True
break
def recall():
global ok
if ok == True:
_main_()
_main_()
recall()
#print(content)
#print('"{}"'.format(bs.title.string)+" é o nome da tag")
``` |
{
"source": "jpfleischer/pi",
"score": 3
} |
#### File: iot/images/temperature.py
```python
from grovepi import *
from grove_rgb_lcd import *
import time
import smbus
import RPi.GPIO as GPIO
from grove_i2c_barometic_sensor_BMP180 import BMP085
class WeatherStation(object):
def __init__(self, port=7):
self.dht_sensor_port = port
setRGB(0, 255, 0)
def get(self):
try:
temp, hum = dht(self.dht_sensor_port, 0)
# Get the temperature and Humidity from the DHT sensor
t = str(temp)
h = str(hum)
print("Temp:" + t + "C " + "Humidity :" + h + "%")
setText("Temp:" + t + "C " + "Humidity :" + h + "%")
return t, h
except (IOError, TypeError) as e:
print
"Error"
class Barometer(object):
def __init__(self, mode=1):
print("a")
# Initialise the BMP085 and use STANDARD mode (default value)
# bmp = BMP085(0x77, debug=True)
self.bmp = BMP085(0x77, mode)
# To specify a different operating mode, uncomment one of the following:
# bmp = BMP085(0x77, 0) # ULTRALOWPOWER Mode
# bmp = BMP085(0x77, 1) # STANDARD Mode
# bmp = BMP085(0x77, 2) # HIRES Mode
# bmp = BMP085(0x77, 3) # ULTRAHIRES Mode
rev = GPIO.RPI_REVISION
if rev == 2 or rev == 3:
bus = smbus.SMBus(1)
else:
bus = smbus.SMBus(0)
def get(self):
try:
print("a")
temp = self.bmp.readTemperature()
print("b")
# Read the current barometric pressure level
pressure = self.bmp.readPressure() / 100.0
# To calculate altitude based on an estimated mean sea level pressure
# (1013.25 hPa) call the function as follows, but this won't be very accurate
# altitude = bmp.readAltitude()
# To specify a more accurate altitude, enter the correct mean sea level
# pressure level. For example, if the current pressure level is 1023.50 hPa
# enter 102350 since we include two decimal places in the integer value
print("c")
altitude = self.bmp.readAltitude(101560)
print("Temperature: %.2f C" % temp)
print("Pressure: %.2f hPa" % pressure)
print("Altitude: %.2f m" % altitude)
return temp, pressure, altitude
except Exception as e:
pass
barometer = Barometer()
# station= WeatherStation()
while True:
time.sleep(2)
# print(station.get())
print(barometer.get())
``` |
{
"source": "jpflueger/MistyOpenApi",
"score": 2
} |
#### File: homerobot/models/drive_velocity_request.py
```python
import pprint
import re # noqa: F401
import six
class DriveVelocityRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'angular_velocity': 'float',
'linear_velocity': 'float'
}
attribute_map = {
'angular_velocity': 'angularVelocity',
'linear_velocity': 'linearVelocity'
}
def __init__(self, angular_velocity=None, linear_velocity=None): # noqa: E501
"""DriveVelocityRequest - a model defined in OpenAPI""" # noqa: E501
self._angular_velocity = None
self._linear_velocity = None
self.discriminator = None
self.angular_velocity = angular_velocity
self.linear_velocity = linear_velocity
@property
def angular_velocity(self):
"""Gets the angular_velocity of this DriveVelocityRequest. # noqa: E501
The angular velocity from -100.0 to 100.0 in percentage of maximum possible velocity # noqa: E501
:return: The angular_velocity of this DriveVelocityRequest. # noqa: E501
:rtype: float
"""
return self._angular_velocity
@angular_velocity.setter
def angular_velocity(self, angular_velocity):
"""Sets the angular_velocity of this DriveVelocityRequest.
The angular velocity from -100.0 to 100.0 in percentage of maximum possible velocity # noqa: E501
:param angular_velocity: The angular_velocity of this DriveVelocityRequest. # noqa: E501
:type: float
"""
if angular_velocity is None:
raise ValueError("Invalid value for `angular_velocity`, must not be `None`") # noqa: E501
self._angular_velocity = angular_velocity
@property
def linear_velocity(self):
"""Gets the linear_velocity of this DriveVelocityRequest. # noqa: E501
The linear velocity from -100.0 to 100.0 in percentage of maximum possible velocity # noqa: E501
:return: The linear_velocity of this DriveVelocityRequest. # noqa: E501
:rtype: float
"""
return self._linear_velocity
@linear_velocity.setter
def linear_velocity(self, linear_velocity):
"""Sets the linear_velocity of this DriveVelocityRequest.
The linear velocity from -100.0 to 100.0 in percentage of maximum possible velocity # noqa: E501
:param linear_velocity: The linear_velocity of this DriveVelocityRequest. # noqa: E501
:type: float
"""
if linear_velocity is None:
raise ValueError("Invalid value for `linear_velocity`, must not be `None`") # noqa: E501
self._linear_velocity = linear_velocity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DriveVelocityRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: PythonApi/test/test_locomotion_api.py
```python
from __future__ import absolute_import
import unittest
import homerobot
from homerobot.api.locomotion_api import LocomotionApi # noqa: E501
from homerobot.rest import ApiException
class TestLocomotionApi(unittest.TestCase):
"""LocomotionApi unit test stubs"""
def setUp(self):
self.api = homerobot.api.locomotion_api.LocomotionApi() # noqa: E501
def tearDown(self):
pass
def test_drive_velocity(self):
"""Test case for drive_velocity
Command the robot to drive using velocity parameters # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpfluger/radiucal",
"score": 3
} |
#### File: radiucal/tools/netconf.py
```python
import argparse
import os
import users
import users.__config__
import importlib
import csv
# file indicators
IND_DELIM = "_"
USER_INDICATOR = "user" + IND_DELIM
VLAN_INDICATOR = "vlan" + IND_DELIM
AUTH_PHASE_ONE = "PEAP"
AUTH_PHASE_TWO = "MSCHAPV2"
class ConfigMeta(object):
"""configuration meta information."""
def __init__(self):
"""init the instance."""
self.passwords = []
self.macs = []
self.vlans = []
self.all_vlans = []
self.user_name = []
self.vlan_users = []
self.vlan_initiate = []
self.extras = []
def password(self, password):
"""password group validation(s)."""
if password in self.passwords:
print("password duplicated")
exit(-1)
self.passwords.append(password)
def extra(self, macs):
"""Limited macs."""
for mac in macs:
if mac in self.extras:
print("mac already known as extra: " + mac)
exit(-1)
self.extras.append(mac)
def user_macs(self, macs):
"""user+mac combos."""
self.macs = self.macs + macs
self.macs = list(set(self.macs))
def verify(self):
"""verify meta data."""
for mac in self.macs:
if mac in self.extras:
print("mac is flagged extra: " + mac)
exit(-1)
for mac in self.extras:
if mac in self.macs:
print("mac is user assigned: " + mac)
exit(-1)
used_vlans = set(self.vlans + self.vlan_initiate)
if len(used_vlans) != len(set(self.all_vlans)):
print("unused vlans detected")
exit(-1)
for ref in used_vlans:
if ref not in self.all_vlans:
print("reference to unknown vlan: " + ref)
exit(-1)
def vlan_user(self, vlan, user):
"""indicate a vlan was used."""
self.vlans.append(vlan)
self.vlan_users.append(vlan + "." + user)
self.user_name.append(user)
def vlan_to_vlan(self, vlan_to):
"""VLAN to VLAN mappings."""
self.vlan_initiate.append(vlan_to)
def _get_mod(name):
"""import the module dynamically."""
return importlib.import_module("users." + name)
def _load_objs(name, typed):
mod = _get_mod(name)
for key in dir(mod):
obj = getattr(mod, key)
if not isinstance(obj, typed):
continue
yield obj
def _get_by_indicator(indicator):
"""get by a file type indicator."""
return [x for x in sorted(users.__all__) if x.startswith(indicator)]
def _common_call(common, method, entity):
"""make a common mod call."""
obj = entity
if common is not None and method in dir(common):
call = getattr(common, method)
if call is not None:
obj = call(obj)
return obj
def check_object(obj):
"""Check an object."""
return obj.check()
def _process(output):
"""process the composition of users."""
common_mod = None
try:
common_mod = _get_mod("common")
print("loaded common definitions...")
except Exception as e:
print("defaults only...")
vlans = None
meta = ConfigMeta()
for v_name in _get_by_indicator(VLAN_INDICATOR):
print("loading vlan..." + v_name)
for obj in _load_objs(v_name, users.__config__.VLAN):
if vlans is None:
vlans = {}
if not check_object(obj):
exit(-1)
num_str = str(obj.num)
for vk in vlans.keys():
if num_str == vlans[vk]:
print("vlan number defined multiple times...")
exit(-1)
vlans[obj.name] = num_str
if obj.initiate is not None and len(obj.initiate) > 0:
for init_to in obj.initiate:
meta.vlan_to_vlan(init_to)
if vlans is None:
raise Exception("missing required config settings...")
meta.all_vlans = vlans.keys()
store = Store()
for f_name in _get_by_indicator(USER_INDICATOR):
print("composing..." + f_name)
for obj in _load_objs(f_name, users.__config__.Assignment):
obj = _common_call(common_mod, 'ready', obj)
key = f_name.replace(USER_INDICATOR, "")
if not key.isalnum():
print("does not meet naming requirements...")
exit(-1)
vlan = obj.vlan
if vlan not in vlans:
raise Exception("no vlan defined for " + key)
store.add_vlan(vlan, vlans[vlan])
meta.vlan_user(vlan, key)
fqdn = vlan + "." + key
if not check_object(obj):
print("did not pass check...")
exit(-1)
if obj.disabled:
print("account is disabled")
continue
macs = sorted(obj.macs)
password = <PASSWORD>
bypassed = sorted(obj.bypassed())
owned = sorted(obj.owns)
# meta checks
meta.user_macs(macs)
if not obj.inherits:
meta.password(password)
meta.extra(bypassed)
meta.extra(owned)
store.add_user(fqdn, macs, password)
if obj.mab_only:
store.set_mab(fqdn)
if len(bypassed) > 0:
for m in bypassed:
store.add_mab(m, obj.bypass_vlan(m))
user_all = []
for l in [obj.macs, obj.owns, bypassed]:
user_all += list(l)
store.add_audit(fqdn, sorted(set(user_all)))
meta.verify()
# audit outputs
with open(output + "audit.csv", 'w') as f:
csv_writer = csv.writer(f, lineterminator=os.linesep)
for a in sorted(store.get_tag(store.audit)):
p = a[0].split(".")
for m in a[1]:
csv_writer.writerow([p[1], p[0], m])
# eap_users and preauth
manifest = []
with open(output + "eap_users", 'w') as f:
for u in store.get_eap_user():
f.write('"{}" {}\n\n'.format(u[0], AUTH_PHASE_ONE))
f.write('"{}" {} hash:{} [2]\n'.format(u[0], AUTH_PHASE_TWO, u[1]))
write_vlan(f, u[2])
for u in store.get_eap_mab():
up = u[0].upper()
f.write('"{}" MD5 "{}"\n'.format(up, up))
write_vlan(f, u[1])
manifest.append((u[0], u[0]))
for u in store.get_tag(store.umac):
manifest.append((u[0], u[1]))
with open(output + "manifest", 'w') as f:
for m in sorted(manifest):
f.write("{}.{}\n".format(m[0], m[1]).lower())
def write_vlan(f, vlan_id):
"""Write vlan assignment for login."""
f.write('radius_accept_attr=64:d:13\n')
f.write('radius_accept_attr=65:d:6\n')
f.write('radius_accept_attr=81:s:{}\n\n'.format(vlan_id))
class Store(object):
"""Storage object."""
def __init__(self):
"""Init the instance."""
self._data = []
self.umac = "UMAC"
self.pwd = "<PASSWORD>"
self.mac = "MAC"
self.audit = "AUDIT"
self._users = []
self._mab = []
self._macs = []
self._vlans = {}
def set_mab(self, username):
"""Set a user as MAB-only, no login set."""
self._mab.append(username)
def get_tag(self, tag):
"""Get tagged items."""
for item in self._data:
if item[0] == tag:
yield item[1:]
def add_vlan(self, vlan_name, vlan_id):
"""Add a vlan item."""
self._vlans[vlan_name] = vlan_id
def _add(self, tag, key, value):
"""Backing tagged add."""
self._data.append([tag, key, value])
def add_user(self, username, macs, password):
"""Add a user definition."""
if username in self._users:
raise Exception("{} already defined".format(username))
self._users.append(username)
for m in macs:
self._add(self.umac, username, m)
self._add(self.pwd, username, password)
def add_mab(self, mac, vlan):
"""Add a MAB."""
if mac in self._macs:
raise Exception("{} already defined".format(mac))
self._macs.append(mac)
self._add(self.mac, mac, vlan)
def add_audit(self, user, objs):
"""Add an audit entry."""
self._add(self.audit, user, objs)
def get_eap_mab(self):
"""Get eap entries for MAB."""
for m in self.get_tag(self.mac):
v = m[1]
if not isinstance(v, int):
v = self._get_vlan(v)
yield [m[0], v]
def get_eap_user(self):
"""Get eap users."""
for u in self.get_tag(self.pwd):
if u[0] in self._mab:
continue
vlan = u[0].split(".")[0]
yield [u[0], u[1], self._get_vlan(vlan)]
def _get_vlan(self, name):
"""Get vlans."""
return self._vlans[name]
def main():
"""main entry."""
success = False
try:
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args()
_process(args.output)
success = True
except Exception as e:
print('unable to compose')
print(str(e))
if success:
print("success")
exit(0)
else:
print("failure")
exit(1)
if __name__ == "__main__":
main()
``` |
{
"source": "jpf/okta-jwks-to-pem",
"score": 3
} |
#### File: jpf/okta-jwks-to-pem/jwks_to_pem.py
```python
import argparse
import base64
import six
import struct
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import requests
arg_parser = argparse.ArgumentParser(
description='JWK to PEM conversion tool')
arg_parser.add_argument('--org',
dest='org',
help='Domain for Okta org',
required=True)
args = arg_parser.parse_args()
def intarr2long(arr):
return int(''.join(["%02x" % byte for byte in arr]), 16)
def base64_to_long(data):
if isinstance(data, six.text_type):
data = data.encode("ascii")
# urlsafe_b64decode will happily convert b64encoded data
_d = base64.urlsafe_b64decode(bytes(data) + b'==')
return intarr2long(struct.unpack('%sB' % len(_d), _d))
print("Fetching JWKS from {}".format(args.org))
r = requests.get("https://{}/oauth2/v1/keys".format(args.org))
jwks = r.json()
for jwk in jwks['keys']:
exponent = base64_to_long(jwk['e'])
modulus = base64_to_long(jwk['n'])
numbers = RSAPublicNumbers(exponent, modulus)
public_key = numbers.public_key(backend=default_backend())
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
print "PEM for KID '{}'".format(jwk['kid'])
print pem
``` |
{
"source": "Jpfonseca/Adaptive_Live_Streaming",
"score": 3
} |
#### File: src/scripts/start_stream_gopro.py
```python
import sys
import json
import socket
import requests
from time import sleep
#This script allows you to maintain a connection to the Go Pro over Wifi
#Generally the Go Pro can't sustain a wifi connection for more than 20 mins,
#and while streaming that capacity drops to only 2 mins.
#In order to use the script make sure you are connected to the GoPro's wifi
def get_command_msg(id):
return "_GPHD_:%u:%u:%d:%1lf\n" % (0, 0, 2, 0)
if __name__ == "__main__":
#Go Pro Livestream start link
#http://10.5.5.9/gp/gpExec?p1=gpStreamA9&c1=restart
uri_status='http://10.5.5.9:8080/gp/gpControl/execute?p1=gpStream&c1=restart'
req=requests.get(uri_status)
#Try to send the signal to start the stream
if req.status_code != 200:
print"Cannot connect to Go Pro" + "\n" +"Check connection-manager"
sys.exit()
else:
# Keep stream Alive python script from:
#https://gist.github.com/3v1n0/38bcd4f7f0cb3c279bad#file-hero4-udp-keep-alive-send-py
#as on 20 July 2017 22:48
UDP_IP = "10.5.5.9"
UDP_PORT = 8554 #port where the Go Pro Hero4 video streams
KEEP_ALIVE_PERIOD = 2500
KEEP_ALIVE_CMD = 2
MESSAGE = get_command_msg(KEEP_ALIVE_CMD)
print("UDP target IP:", UDP_IP)
print("UDP target port:", UDP_PORT)
print("message:", MESSAGE)
if sys.version_info.major >= 3:
MESSAGE = bytes(MESSAGE, "utf-8")
while True:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(MESSAGE, (UDP_IP, UDP_PORT))
sleep(KEEP_ALIVE_PERIOD/1000)
``` |
{
"source": "Jpfonseca/Blockchain_auction_management",
"score": 3
} |
#### File: Blockchain_auction_management/src/blockchain.py
```python
import json
import os
import re
from ast import literal_eval
from logging import DEBUG, ERROR, INFO
from log import LoggyLogglyMcface
class Block:
def __init__(self, key=None, cert=None, serial=None, hash=None, hash_prev=None, amount=None, name=None, id=None, timestamp=None):
self.mylogger = LoggyLogglyMcface(name=Block.__name__)
self.mylogger.log(INFO, "Entering Block interface")
# parameters of a bid (block of the linked list - blockchain)
self.key = key
self.cert = cert
self.serial = serial
self.hash = hash
self.hash_prev = hash_prev
self.amount = amount
self.name = name
self.id = id
self.timestamp = timestamp
self.next = None
self.previous = None
self.block_to_file = {'key': key, 'cert': cert, 'serial': serial, 'hash': hash, 'hash_prev': hash_prev,
'amount': amount, 'name': name, 'id': id, 'timestamp': timestamp}
self.block_to_user = {'serial': serial, 'hash': hash, 'hash_prev': hash_prev,
'amount': amount, 'name': name, 'id': id, 'timestamp': timestamp}
def info(self):
"""
Get information on a block/bid (1)
"""
return str(self.block_to_file)
def info_user(self):
"""
Get information on a block/bid (2)
"""
return str(self.block_to_user)
class Blockchain:
def __init__(self, key=None, cert=None, serial=None, id=None, timestamp=None, name=None, time_limit=None, description=None, type=None,
state=None, winner=None, winner_amount=None):
self.mylogger = LoggyLogglyMcface(name=Blockchain.__name__)
self.mylogger.log(INFO, "Entering Blockchain interface")
# parameters of a blockchain (linked list)
self.head_block = None
self.tail_block = None
self.key = key
self.cert = cert
self.serial = serial
self.id = id
self.timestamp = timestamp
self.name = name
self.time_limit = time_limit
self.description = description
self.type = type
self.state = state
self.winner = winner
self.winner_amount = winner_amount
self.blockchain_to_file = {'key': key, 'cert': cert, 'serial': serial, 'id': id, 'timestamp': timestamp, 'name': name,
'time-limit': time_limit, 'description': description, 'type': type, 'state': state,
'winner': winner, 'winner_amount': winner_amount}
self.blockchain_to_user = {'serial': serial, 'id': id, 'timestamp': timestamp, 'name': name,
'time-limit': time_limit, 'description': description, 'type': type, 'state': state,
'winner': winner, 'winner_amount': winner_amount}
def info(self):
"""
Get information on a blockchain (1)
"""
self.mylogger.log(INFO, "The Blockchain with the serial {} current state is :\n {}".format(self.serial, str(
self.blockchain_to_file)))
return str(self.blockchain_to_file)
def info_user(self):
"""
Get information on a blockchain (1)
"""
self.mylogger.log(INFO, "The Blockchain with the serial {} current state is :\n {}".format(self.serial, str(
self.blockchain_to_user)))
return str(self.blockchain_to_user)
def chain_length(self):
"""
Return the number of blocks in the blockchain
"""
counter = 0
current_block = self.head_block
while current_block is not None:
counter = counter + 1
current_block = current_block.next
self.mylogger.log(INFO, "The Blockchain with the serial {} has : {} blocks".format(self.serial, counter))
return counter
def bids_client(self, id):
"""
Get all bids (blocks) of a client
"""
result = []
current_block = self.head_block
self.mylogger.log(INFO, "The client with the id {} has these Bids:".format(id))
while current_block is not None:
if current_block.id == str(id):
result.append(current_block.info_user())
current_block = current_block.next
return result
def bids_auction(self, serial):
"""
Get all bids (blocks) of an auction
"""
result = []
current_block = self.head_block
self.mylogger.log(INFO, "The Blockchain with the serial {} has these Bids:".format(serial))
while current_block is not None:
if current_block.serial == str(serial) or current_block.serial == int(serial):
result.append(current_block.info_user())
current_block = current_block.next
return result
def bid_info(self, hash):
"""
Get information on a bid
"""
self.mylogger.log(INFO, "Getting information on the bid: {}\n".format(hash))
current_block = self.head_block
if current_block is not None:
while current_block is not None:
if current_block.hash == hash:
return current_block.block_to_file
current_block = current_block.next
return ""
def save_to_file(self, file):
"""
Write the blockchain into a file
"""
self.mylogger.log(INFO,"\nThe Blockchain will be saved into the file: {}\n".format(file))
current_path = os.getcwd()
path = "{}/auctions/{}".format(current_path, file)
text_file = open(path, "w")
text_file.write("%s\n" % self.info())
current_block = self.head_block
if current_block is not None:
while current_block is not None:
text_file.write("%s\n" % current_block.info())
current_block = current_block.next
text_file.close()
self.mylogger.log(INFO, "The Blockchain was saved into the file: {}\n".format(file))
def add_block(self, block):
"""
Add block to the linked list (blockchain)
"""
self.mylogger.log(INFO,
"Adding block into the blockchain: \n Hash: {}, Amount: {}, Identity: {}, Timestamp: {}"
"".format(block.hash, block.amount, block.id, block.timestamp))
if isinstance(block, Block):
if self.head_block is None:
self.head_block = block
block.previous = None
block.next = None
self.tail_block = block
else:
self.tail_block.next = block
block.previous = self.tail_block
self.tail_block = block
return
def remove_blocks(self):
"""
Remove all blocks/bids of the blockchain
"""
counter = self.chain_length()
self.mylogger.log(INFO,
"Removing all blocks from the Blockchain: {:d} blocks inside \n".format(counter))
while counter > 0:
current_block = self.tail_block
self.tail_block = current_block.previous
current_block.next = None
current_block = None
counter -= 1
if counter == 0:
self.head_block = None
self.mylogger.log(INFO,"Removed all blocks from the Blockchain\n")
return
```
#### File: Blockchain_auction_management/src/client.py
```python
import copy, hashlib, json, random, string, sys, base64, datetime
import os
from ast import literal_eval
from socket import *
from pathlib import Path
from logging import DEBUG, ERROR, INFO
from log import LoggyLogglyMcface
from cc_interface import PortugueseCitizenCard
from security import CertificateOperations, CryptoUtils
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.backends import default_backend
from cryptography.fernet import Fernet
HOST = "127.0.0.1"
# port client uses to communicate with client
PORT_MAN = 8080
PORT_REPO = 8081
MAX_BUFFER_SIZE = 10000
class Client:
def __init__(self, host, port_man, port_repo):
self.mylogger = LoggyLogglyMcface(name=Client.__name__)
self.mylogger.log(INFO, "Entering Client interface")
self.host = host
self.port_man = port_man
self.port_repo = port_repo
# public keys and certificates
self.client_cert = None
self.client_pubk = None
self.man_pubkey = None
self.repo_pubkey = None
# symmetric key associated with hash of the bid in an auction
self.bid_keys = {}
# my bids
self.bids = []
# addresses of the servers
self.repo_address = None
self.man_address = None
# socket to be used
self.sock = socket(AF_INET, SOCK_DGRAM)
# portuguese citizen card and CryptoUtils instance
self.cc = PortugueseCitizenCard()
self.crypto = CryptoUtils()
self.slot = -1
# id and name of the client
self.id = None
self.name = None
def start(self):
"""
Servers and Client exchange public keys
"""
try:
# ask user which slot to use
fullnames = self.cc.getSmartcardsNames()
slot = -1
if len(self.cc.sessions) > 0:
temp = ''.join('Slot{:3d}-> Fullname: {:10s}\n'.format(i, fullnames[i]) for i in range(0, len(fullnames)))
while slot < 0 or slot > len(self.cc.sessions):
slot = input("Available Slots: \n{:40s} \n\nWhich Slot do you wish to use? ".format(temp))
if slot.isdigit():
slot = int(slot)
else:
slot = -1
self.slot = slot
for i in range(0, len(self.cc.sessions)):
if slot != i:
self.cc.sessions[i].closeSession()
cert = self.cc.PTEID_GetCertificate(self.slot)
self.client_cert = cert
self.name = self.cc.GetNameFromCERT(cert)
digest = hashes.Hash(hashes.MD5(), backend=default_backend())
digest.update(self.cc.PTEID_GetBI(slot).encode())
self.id = base64.b64encode(digest.finalize()).decode()
self.mylogger.log(INFO, "Client ID: {}".format(self.id))
# calculate md5 digest of the citizen card number (id of the user)
certop = CertificateOperations()
certop.getCertfromPem(cert)
self.client_pubk = certop.getPubKey().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
msg = json.dumps({'c_pubk': self.client_pubk.decode(), 'id': self.id})
self.mylogger.log(INFO, "Exchanging pubkey's with the Repo")
bytes = self.sock.sendto(msg.encode(), (self.host, self.port_repo))
data1, address = self.sock.recvfrom(MAX_BUFFER_SIZE)
print("> repository pubkey received")
self.mylogger.log(INFO, "Repo Pubkey received")
self.mylogger.log(INFO, "Exchanging pubkey with the Manager")
bytes = self.sock.sendto(msg.encode(), (self.host, self.port_man))
data2, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
print("> manager pubkey received")
self.mylogger.log(INFO, "Manager Pubkey received")
data1 = json.loads(data1)
data2 = json.loads(data2)
self.repo_pubkey = data1['repo_pubk']
self.man_pubkey = data2['man_pubk']
if 'repo_pubk' in data1:
self.repo_address = address
if 'man_pubk' in data2:
self.man_address = server
self.mylogger.log(INFO, "Repo Pubkey : \n{}\nManager Pubkey : \n{}".format(self.repo_pubkey, self.man_pubkey))
self.loop()
except:
self.mylogger.log(INFO, "Cannot start client")
raise
# menu of the client
def loop(self):
"""
The main loop of the client. It displays the menu and calls
functions according to the option selected by the user
"""
try:
self.mylogger.log(INFO, "Entered Client Menu ")
while (True):
print("\n----Menu----\n1) Create auction\n2) Place bid\n3) List active auctions\n"
"4) List closed auctions\n5) Display my bids\n6) Display bids of an auction\n"
"7) Display bids of a client\n8) Check receipt\n9) Display my information\n"
"10) Display ids of all clients\n11) Close")
option = input(">")
if option == '1':
self.create_auction()
elif option == '2':
self.place_bid()
elif option == '3':
self.list_active_auctions()
elif option == '4':
self.list_closed_auctions()
elif option == '5':
self.display_bids()
elif option == '6':
self.bids_auction()
elif option == '7':
self.bids_client()
elif option == '8':
self.check_receipt()
elif option == '9':
self.display_client()
elif option == '10':
self.display_ids()
elif option == '11':
self.exit(0)
else:
print("Not a valid option!\n")
except:
self.mylogger.log(INFO, "Exception on client's loop")
raise
def create_auction(self):
"""
Send new auction parameters to the manager server and wait for
an ok or not ok answer
"""
try:
self.mylogger.log(INFO, "Creating auction ")
file_exists = False
name = input("name: ")
time_limit = input("time limit: ") # format: _h_m_s
description = input("description: ")
type_auction = input("(e)nglish or (b)lind):")
file = input("dynamic code to be uploaded:")
while not file_exists:
current_path = os.getcwd()
path = "{}/dynamicCode/{}".format(current_path,file)
my_file = Path(path)
if my_file.is_file():
file_exists = True
with open(path) as f:
dynamic_code = f.read()
break
else:
print("Nonexistent file")
file = input("dynamic code to be uploaded:")
date_time = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
key = Fernet.generate_key()
f = Fernet(key)
# cert = base64.b64encode(self.client_cert).decode()
# certs and symmetric keys are saved in base64 format
encryptedSymCert = base64.b64encode(f.encrypt(self.client_cert)).decode()
encryptedSymKey = base64.b64encode(
self.crypto.RSAEncryptData(self.crypto.loadPubk(self.man_pubkey), key)).decode()
msg = {'payload': {'auction': {'key': encryptedSymKey, 'cert': encryptedSymCert, 'serial': None,
'id': self.id, 'timestamp': date_time, 'name': name,
'time-limit': time_limit,
'description': description, 'type': type_auction}, 'dynamic_code': dynamic_code}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), (self.host, self.port_man))
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
ack = json.dumps(data['payload'])
if self.valid_signature(self.man_pubkey, ack, signature):
if data['payload']['ack'] == 'ok':
print("\nNew auction created!")
else:
print("The auction was NOT created. Error: {}".format(data['payload']['info']))
self.exit(1)
else:
print("Manager pubkey not verified")
self.exit(1)
except:
# print("Cannot create auction")
self.mylogger.log(INFO, "Cannot create auction")
raise
# request a bid, calculate proof-of-work, send parameters to repository
def place_bid(self):
"""
Send a bid request to the repository server, which answers with a proof-of-work.
The client computes the proof-of-work, sends the answer to the repository and if
it is accepted, he/she may send the bid parameters. The repository acknowledges
the bid by sending a receipt signed by the 3 entities
"""
try:
self.mylogger.log(INFO, "Placing bid ")
serial = input("Serial number of auction:")
amount = input("Amount: ")
msg = {'payload': {'command': 'bid_request', 'id': self.id, 'serial': serial}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'ack' in data['payload']:
if data['payload']['ack'] == 'nok':
if 'info' in data['payload']:
print(data['payload']['info'])
else:
string, digest = self.hash_cash(data['payload']['r_string'], int(data['payload']['numZeros']))
print("Digest: " + digest)
msg = {'payload': {'string': string, 'digest': digest, 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if data['payload']['ack'] == 'ok':
print("Cryptopuzzle result accepted by the server")
bid_key = Fernet.generate_key()
f = Fernet(bid_key)
encrypted_cert = base64.b64encode(f.encrypt(self.client_cert)).decode()
encrypted_key = base64.b64encode(
self.crypto.RSAEncryptData(self.crypto.loadPubk(self.man_pubkey), bid_key)).decode()
type = data['payload']['type']
date_time = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
hash_str = str(encrypted_key) + str(encrypted_cert) + str(serial) + \
str(data['payload']['hash_prev']) + str(amount) + str(self.id) + \
str(date_time)
hash = hashlib.md5(hash_str.encode()).hexdigest()
self.bids.append(json.dumps({'serial': str(serial), 'hash': str(hash),
'hash_prev': str(data['payload']['hash_prev']),
'amount': str(amount), 'name': str(self.name),
'id': str(self.id), 'timestamp': str(date_time)}))
if type == 'e':
msg = {'payload': {'bid': {'key': encrypted_key, 'cert': encrypted_cert, 'serial': serial,
'hash': hash, 'hash_prev': data['payload']['hash_prev'],
'amount': amount, 'name': "", 'id': self.id, 'timestamp': date_time}}}
elif type == 'b':
encrypted_amount = base64.b64encode(f.encrypt(amount.encode())).decode()
msg = {'payload': {'bid': {'key': encrypted_key, 'cert': encrypted_cert, 'serial': serial,
'hash': hash, 'hash_prev': data['payload']['hash_prev'],
'amount': encrypted_amount, 'name': "", 'id': self.id,
'timestamp': date_time}}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['payload']['sig_c'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if data['payload']['ack'] == 'ok':
repo_valid = False
manager_valid = False
client_valid = False
print("\nReceipt validation:")
data_v = copy.deepcopy(data['payload']['receipt'])
signature = base64.b64decode(data_v.pop('sig_r'))
if self.valid_signature(self.repo_pubkey, json.dumps(data_v), signature):
repo_valid = True
print("Repository's signature -> valid")
signature = base64.b64decode(data_v.pop('sig_m'))
if self.valid_signature(self.man_pubkey, json.dumps(data_v), signature):
manager_valid = True
print("Manager's signature -> valid")
signature = base64.b64decode(data_v.pop('sig_c'))
if self.crypto.verifySignatureCC(self.client_pubk, json.dumps(data_v), signature):
client_valid = True
print("Client's signature -> valid")
if repo_valid and manager_valid and client_valid:
if serial not in self.bid_keys:
self.bid_keys[serial] = {str(hash): bid_key}
else:
self.bid_keys[serial][str(hash)] = bid_key
current_path = os.getcwd()
file = "auction_{}_bid_{}.txt".format(serial, hash)
path = "{}/receipts/{}".format(current_path, file)
text_file = open(path, "w")
text_file.write("%s\n" % json.dumps(data['payload']['receipt']))
print("\nBid created successfully")
else:
print("Receipt signatures are not valid. Exiting compromised system...")
sys.exit(-1)
else:
print("\nBid not created")
self.mylogger.log(INFO, "Bid was not created")
if 'info' in data['payload']:
print("info: " + data['payload']['info'])
else:
print("valid bid: " + str(data['payload']['valid']))
else:
print("\n Bid not created, wrong result of proof-of-work")
self.mylogger.log(INFO, "Bid was not created")
self.exit(1)
except:
#print("Bid was not created")
self.mylogger.log(INFO, "Bid was not created")
raise
def display_bids(self):
"""
Display the bids performed by the current user
"""
try:
self.mylogger.log(INFO, "Displaying bids of the current client")
for bid in self.bids:
print(bid + "\n")
except:
#print("Cannot list current client's bids")
self.mylogger.log(INFO, "Cannot list bids of current client")
raise
def check_receipt(self):
"""
Verify if the information stored on the repository server is the
same as in the receipt previously received. The hash of the bid is
calculated both with the receipt information and the information received.
If the hash is equal, the information stored in the server is correct.
"""
try:
self.mylogger.log(INFO, "Checking Receipt ")
file_exists = False
serial = input("Auction:")
hash = input("Bid: ")
file = "auction_{}_bid_{}.txt".format(serial, hash)
while not file_exists:
current_path = os.getcwd()
path = "{}/receipts/{}".format(current_path, file)
my_file = Path(path)
if my_file.is_file():
file_exists = True
with open(path) as f:
lines = f.readlines()
break
else:
print("Nonexistent file")
serial = input("Auction:")
hash = input("Bid: ")
file = "auction_{}_bid_{}.txt".format(serial, hash)
receipt_dict = literal_eval(lines[0])
hash_str = receipt_dict['bid']['key'] + receipt_dict['bid']['cert'] + receipt_dict['bid']['serial'] +\
receipt_dict['bid']['hash_prev'] + receipt_dict['bid']['amount'] + receipt_dict['bid']['name'] +\
receipt_dict['bid']['id'] + receipt_dict['bid']['timestamp']
digest = hashlib.md5(hash_str.encode()).hexdigest()
msg = {'payload': {'command': 'check_receipt', 'id': self.id, 'serial': serial, 'hash': hash}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'info' not in data['payload']:
data = data['payload']
bid = data['bid']
repo_info = bid['key'] + bid['cert'] + bid['serial'] + bid['hash_prev'] + \
bid['amount'] + bid['id'] + bid['timestamp']
digest_repo = hashlib.md5(repo_info.encode()).hexdigest()
print("Hash computed from receipt: " + digest)
print("Hash computed from repository information: " + digest_repo)
if digest == digest_repo:
print("\nThe receipt's information is identical to the information stored on the server")
else:
print("\nThe receipt's information is NOT identical to the information stored on the server")
self.exit(0)
else:
print("info: " + data['payload']['info'])
except:
#print("Cannot check the receipt")
self.mylogger.log(INFO, "Cannot check the receipt")
raise
def display_ids(self):
"""
Display the IDs of the clients currently active in the system
"""
try:
self.mylogger.log(INFO, "Listing ids of active clients")
msg = {'payload': {'command': 'list_ids', 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'ack' in data['payload']:
if data['payload']['ack'] == 'nok':
print("\nNo active clients at the moment")
else:
for id in data['payload']['ids']:
print("\n" + id + "\n")
except:
print("Can't list ids of active clients")
self.mylogger.log(INFO, "Cannot list ids")
raise
def list_active_auctions(self):
"""
List the currently active auctions on the repository server
"""
try:
self.mylogger.log(INFO, "Listing active auctions ")
msg = {'payload': {'command': 'list_open', 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'ack' in data['payload']:
if data['payload']['ack'] == 'nok':
print("\nNo active auctions at the moment")
else:
print(data['payload'])
except:
#print("Can't list active auctions")
self.mylogger.log(INFO, "Cannot list active auctions")
raise
def list_closed_auctions(self):
"""
List the closed auctions on the repository server
"""
try:
self.mylogger.log(INFO, "Listing closed auctions ")
msg = {'payload': {'command': 'list_closed', 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'ack' in data['payload']:
if data['payload']['ack'] == 'nok':
print("\nNo closed auctions at the moment")
else:
print(data['payload'])
except:
print("Cannot list closed auctions")
self.mylogger.log(INFO, "Cannot list closed auctions ")
raise
def bids_auction(self):
"""
List all bids of an auction, given its serial number
"""
try:
serial = input("Serial number of auction:")
self.mylogger.log(INFO, "Listing bids of an auction {}".format(serial))
msg = {'payload': {'command': 'bid_auction', 'serial': serial, 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'ack' in data['payload']:
if data['payload']['ack'] == 'nok':
print("Auction has no bids")
else:
print("\nBids of auction {}:".format(serial))
for key in data['payload'].keys():
print(data['payload'][key] + "\n")
except:
#print("Cannot list bids of an auction")
self.mylogger.log(INFO, "Cannot list bids of an auction")
raise
def bids_client(self):
"""
List all bids of a client, given his/her ID
"""
try:
id = input("Id of the client:")
self.mylogger.log(INFO, "Listing bids of client {}".format(id))
msg = {'payload': {'command': 'bid_client', 'c_id': id, 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
data, server = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
payload = json.dumps(data['payload'])
if self.valid_signature(self.repo_pubkey, payload, signature):
if 'ack' in data['payload']:
if data['payload']['ack'] == 'nok':
print("Client has no bids")
else:
print("\nBids of client {}:".format(id))
for key in data['payload'].keys():
print(data['payload'][key] + "\n")
except:
#print("Cannot show bids of auction")
self.mylogger.log(INFO, "Cannot show bids of auction")
raise
def display_client(self):
"""
Display client's information (ID and name)
"""
try:
self.mylogger.log(INFO, "Displaying client's information")
print("Name: {}, Id: {}".format(self.name, self.id))
except:
print("Cannot display client's information")
self.mylogger.log(INFO, "Cannot display client's information")
raise
def hash_cash(self, r_string, numZeros):
"""
Proof of work function that receives a random string from the repository
and a number of zeros.
First, a random string with 50 characters is computed. This string will then be
joined with the string of the repository and a counter. The function will
compute successive digests (SHA256) of that string and when the digest
starts with numZeros 0's, the result was found.
"""
try:
self.mylogger.log(INFO, "Calculating proof-of-work: digest with {} zeros".format(numZeros))
print("\n...calculating hash using hash-cash system")
loop = True
ctr = 0
while (loop):
ctr += 1
solution = False
_string = r_string + ":" + str(ctr)
hash_object = hashlib.sha256(_string.encode('utf-8'))
digest = hash_object.hexdigest()
for i in range(0, int(numZeros)):
if digest[i] == "0":
solution = True
else:
solution = False
break
if solution:
loop = False
return _string, digest
except:
self.mylogger.log(INFO, "Exception on hash cash")
raise
def valid_signature(self, pubk, message, signature):
"""
Validate an entity's signature on a message
"""
try:
pubk = self.crypto.loadPubk(pubk)
if not self.crypto.verifySignatureServers(pubk, message, signature):
return False
return True
except:
print("Cannot validate signature")
self.mylogger.log(INFO, "Cannot validate signature")
raise
def exit(self, type):
"""
Shutdown the client
"""
try:
self.mylogger.log(INFO, "Exiting client")
msg = {'payload': {'exit': 'client exit', 'id': self.id}}
signature = base64.b64encode(self.cc.sign_data(self.slot, json.dumps(msg['payload']))).decode()
msg['signature'] = signature
sent = self.sock.sendto(json.dumps(msg).encode(), self.man_address)
sent = self.sock.sendto(json.dumps(msg).encode(), self.repo_address)
self.mylogger.log(INFO, "Exiting Client")
print("Exiting...")
self.sock.close()
sys.exit(type)
except:
self.mylogger.log(INFO, "Cannot exit client")
raise
if __name__ == "__main__":
c = Client(HOST, PORT_MAN, PORT_REPO)
try:
c.start()
except KeyboardInterrupt:
c.exit('0')
```
#### File: src/dynamicCode/limit_bids.py
```python
def foo(id_client, num_bids):
limit_bids = {'WVzMbdOi9f+xgWZ5+jJ7TQ==': 2, 'IzHRqoS1SirYWHLmtinmvw==': 1, 'WuIaYf+KjvlGyJdCkGP7fA==': 2}
if id_client in limit_bids.keys():
if num_bids <= limit_bids[id_client]:
return True
return False
valid = foo(id_client, num_bids)
```
#### File: Blockchain_auction_management/src/repo.py
```python
import hashlib
import os, datetime, sys, json, base64, re, copy
import random
import string
from os import listdir
from ast import literal_eval
from socket import *
from blockchain import *
from logging import DEBUG, ERROR, INFO
from log import LoggyLogglyMcface
from security import *
from cc_interface import PortugueseCitizenCard
HOST = "127.0.0.1"
PORT_MAN = 8080
PORT_REPO = 8081
MAX_BUFFER_SIZE = 10000
class Repository():
def __init__(self, host, port):
LOG = "./log.txt"
for filename in listdir("./"):
if filename == "log.txt":
os.remove(LOG)
self.mylogger = LoggyLogglyMcface(name=Repository.__name__)
self.mylogger.log(INFO, "Entering Repository interface")
# repository information
self.name = Repository.__name__
self.privKname = "privK" + self.name
self.password = "<PASSWORD>"
self.repo_pubkey = None
self.man_pubkey = None
self.host = host
self.port = port
self.loggedInClient = 0
# client public keys
self.clients_pubkey = set()
# Addresses of clients and manager
self.address_client = []
self.manager_address = None
# list of active and closed auctions
self.active_auctions = []
self.closed_auctions = []
self.all_auctions = []
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.bind((self.host, self.port))
# incremental serial number of the auctions
self.serial = 0
# hash of the previous block (auction serial, previous hash)
self.hash_prev = {}
# generate public and private key
self.certgen = GenerateCertificates()
self.certops = CertificateOperations()
self.crypto = CryptoUtils()
# dictionary of id of the client and public key
self.pubkey_dict = {}
# client is waiting for message (after sending proof-of-work result)
self.current_client = None
self.client_waiting = False
def start(self):
"""
Servers and Client exchange public keys
"""
try:
# verify if repository private key already exists. load if true
if self.certgen.checkExistence(self.name):
self.certgen.loadPrivateKeyFromFile(self.privKname, password=self.password)
else:
self.certgen.writePrivateKeyToFile(self.privKname, password=self.password)
self.repo_pubkey = self.certgen.publicKeyToBytes()
print("Listening...")
self.mylogger.log(INFO, "Exchanging public key with the manager")
data1, self.manager_address = self.sock.recvfrom(MAX_BUFFER_SIZE)
print("> manager pubkey received")
msg = json.dumps({'repo_pubk': self.repo_pubkey.decode()})
bytes = self.sock.sendto(msg.encode(), self.manager_address)
self.mylogger.log(INFO, "Manager public key received")
data1 = json.loads(data1)
if 'man_pubk' in data1:
self.man_pubkey = data1['man_pubk']
self.mylogger.log(INFO, "Man Pubkey : \n{}".format(self.man_pubkey))
self.mylogger.log(INFO, "Exchanging public key with the client")
data2, client_addr = self.sock.recvfrom(MAX_BUFFER_SIZE)
print("> client pubkey received")
bytes = self.sock.sendto(msg.encode(), client_addr)
self.mylogger.log(INFO, "Client public key received")
data2 = json.loads(data2)
self.client_login(data2, client_addr)
self.loop()
except:
self.mylogger.log(INFO, "Cannot start repository")
raise
def loop(self):
"""
The main loop of the repository. It waits for messages of clients
(both system clients or servers) and calls functions according
to the received messages
"""
try:
while (True):
date_time = datetime.datetime.now()
for auction in self.active_auctions:
timestamp_auction = datetime.datetime.strptime(auction.timestamp, '%m/%d/%Y, %H:%M:%S')
delta = date_time - timestamp_auction
seconds = delta.days * 24 * 3600 + delta.seconds
time_limit = re.findall('\d+', auction.time_limit)
time_limit = (int(time_limit[0]) * 3600) + (int(time_limit[1]) * 60) + int(time_limit[2])
print("info: {} seconds have passed on auction {}".format(seconds, auction.serial))
if seconds > time_limit:
print("> auction {} has ended".format(auction.serial))
self.closed_auctions.append(auction)
self.active_auctions.remove(auction)
file = "auction{}.txt".format(auction.serial)
current_path = os.getcwd()
path = "{}/auctions/{}".format(current_path, file)
msg = {'payload': {'end': path}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.manager_address)
data, addr = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
signature = base64.b64decode(data['signature'])
if self.valid_signature(self.man_pubkey, json.dumps(data['payload']), signature):
if data['payload']['ack'] == 'ok':
with open(path) as f:
lines = f.readlines()
lines = [x.strip("\n") for x in lines]
blockchain = None
for i in range(len(lines)):
lines_dict = literal_eval(lines[i])
if i == 0:
current_serial = lines_dict['serial']
blockchain = Blockchain(lines_dict['key'], lines_dict['cert'], lines_dict['serial'],
lines_dict['id'], lines_dict['timestamp'],
lines_dict['name'], lines_dict['time-limit'],
lines_dict['description'], lines_dict['type'],
lines_dict['state'], lines_dict['winner'],
lines_dict['winner_amount'])
else:
block = Block(lines_dict['key'], lines_dict['cert'], lines_dict['serial'],
lines_dict['hash'], lines_dict['hash_prev'], lines_dict['amount'],
lines_dict['name'], lines_dict['id'], lines_dict['timestamp'])
blockchain.add_block(block)
for a in range(len(self.closed_auctions)):
if auction.serial == self.closed_auctions[a].serial:
self.closed_auctions[a] = blockchain
for a in range(len(self.all_auctions)):
if auction.serial == self.all_auctions[a].serial:
self.all_auctions[a] = blockchain
if self.client_waiting:
msg = {'payload': {'ack': 'nok', 'info': 'busy: bid no created'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.current_client)
else:
print("> no bids on ended auction {} -> no possible winner".format(auction.serial))
else:
print("> couldn't find the winner")
data, addr = self.sock.recvfrom(MAX_BUFFER_SIZE)
data = json.loads(data)
if (addr not in self.address_client) and (addr != self.manager_address):
print("> client pubkey received")
msg = json.dumps({'repo_pubk': self.repo_pubkey.decode()})
bytes = self.sock.sendto(msg.encode(), addr)
self.client_login(data, addr)
else:
self.client_waiting = False
if 'auction' in data['payload']:
signature = base64.b64decode(data['signature'])
if data['payload']['valid']:
if self.valid_signature(self.man_pubkey, json.dumps(data['payload']), signature):
data2 = data['payload']
self.create_auction(addr, data2['auction']['key'], data2['auction']['cert'],
self.serial + 1, data2['auction']['id'], data2['auction']['timestamp'],
data2['auction']['name'], data2['auction']['time-limit'],
data2['auction']['description'], data2['auction']['type'])
elif 'bid' in data['payload']:
data2 = copy.deepcopy(data)
signature = base64.b64decode(data2['payload'].pop('sig_c'))
if self.crypto.verifySignatureCC(self.pubkey_dict[data['payload']['bid']['id']], json.dumps(data2['payload']), signature):
self.place_bid(addr, data['payload'])
elif 'command' in data['payload']:
signature = base64.b64decode(data['signature'])
data2 = data['payload']
payload = json.dumps(data2)
if 'bid_request' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.send_pow(addr, data2)
elif 'list_open' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.list_open(addr)
elif 'list_closed' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.list_closed(addr)
elif 'bid_auction' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.bids_auction(addr, data2['serial'])
elif 'bid_client' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.bids_client(addr, data2['c_id'])
elif 'check_receipt' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.check_receipt(addr, data2['serial'], data2['hash'])
elif 'list_ids' in data2['command']:
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['id']], payload, signature):
self.list_ids(addr)
if 'exit' in data['payload']:
msg = json.dumps({'payload': {'exit': 'client exit'}})
signature = base64.b64decode(data['signature'])
if self.crypto.verifySignatureCC(self.pubkey_dict[data['payload']['id']], json.dumps(data['payload']), signature):
self.loggedInClient -= 1
if self.loggedInClient <= 0:
self.mylogger.log(INFO, "Exiting Repository")
self.exit(0)
for auction in self.active_auctions:
file = "auction{}.txt".format(auction.serial)
auction.save_to_file(file)
except:
self.mylogger.log(INFO, "Exception on repository server's loop ")
raise
def create_auction(self, addr, key, cert, serial, id, timestamp, name, timelimit, description, type):
"""
Create an auction (new blockchain) and store it in a file
after receiving its parameters from the manager server
"""
try:
self.mylogger.log(INFO, "Create auction ")
blockchain = Blockchain(key, cert, serial, id, timestamp, name, timelimit, description, type, state='active')
self.serial = self.serial + 1
print("> auction creation: OK")
self.active_auctions.append(blockchain)
self.all_auctions.append(blockchain)
self.hash_prev[str(serial)] = '0'
msg = {'payload': {'ack': 'ok', 'info': 'auction', 'id': id, 'serial': str(serial)}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), addr)
except:
self.mylogger.log(INFO, "Auction cannot be created ")
print("> auction creation: NOT OK\n")
msg = {'payload': {'ack': 'nok', 'info': 'auction', 'id': id}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), addr)
# send the proof-of-work to client. The cryptopuzzle is a hash-cash
def send_pow(self, address_client, data):
"""
Send proof-of-work to the client (random string and number of zeros required).
A response with a string and a digest is received and the function calculates
the SHA256 digest of the string and compares it with the digest, also sent by the client.
If equal, the client may send the bid parameters.
"""
try:
self.mylogger.log(INFO, "Sending proof-of-work to client ")
type = ""
auction_exists = False
for auction in self.active_auctions:
if str(auction.serial) == data['serial']:
type = auction.type
auction_exists = True
if auction_exists is False:
msg = {'payload': {'ack': 'nok', 'info': 'auction does not exist'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
else:
r_string = ''.join(
random.choice(string.digits + string.ascii_lowercase + string.ascii_uppercase) for c in range(6))
msg = {'payload': {'ack': 'ok', 'r_string': r_string, 'numZeros': '5', 'type': type,
'hash_prev': self.hash_prev[data['serial']]}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
data2, addr = self.sock.recvfrom(MAX_BUFFER_SIZE)
data2 = json.loads(data2)
signature = base64.b64decode(data2['signature'])
if self.crypto.verifySignatureCC(self.pubkey_dict[data2['payload']['id']],
json.dumps(data2['payload']), signature):
if 'digest' in data2['payload']:
print("> proof-of-work result of client: " + json.dumps(data2['payload']['digest']))
hash_object = hashlib.sha256(data2['payload']['string'].encode('utf-8'))
digest = hash_object.hexdigest()
if data2['payload']['digest'] == digest:
msg2 = {'payload': {'ack': 'ok', 'type': type, 'hash_prev': self.hash_prev[data['serial']]}}
self.current_client = addr
self.client_waiting = True
else:
msg2 = {'payload': {'ack': 'nok'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg2['payload']))).decode()
msg2['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg2).encode(), address_client)
else:
msg2 = {'payload': {'ack': 'nok', 'info': 'busy: could not send proof-of-work'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg2['payload']))).decode()
msg2['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg2).encode(), address_client)
except:
print("Cannot send proof-of-work to client")
self.mylogger.log(INFO, "Cannot send proof-of-work to client ")
raise
def place_bid(self, addr, data):
"""
Receives the new bid parameters, creates a new block and
inserts it in the blockchain of the respective auction
"""
try:
self.mylogger.log(INFO, "Place a bid ")
client_address = addr
for auction in self.active_auctions:
if data['bid']['serial'] == str(auction.serial):
block = Block(data['bid']['key'], data['bid']['cert'], data['bid']['serial'], data['bid']['hash'],
data['bid']['hash_prev'], data['bid']['amount'], data['bid']['name'],
data['bid']['id'], data['bid']['timestamp'])
self.hash_prev[data['bid']['serial']] = data['bid']['hash']
msg = {'payload': {'bid_valid': data}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), self.manager_address)
data2, addr = self.sock.recvfrom(MAX_BUFFER_SIZE)
data2 = json.loads(data2)
signature = base64.b64decode(data2['signature'])
payload = json.dumps(data2['payload'])
if self.valid_signature(self.man_pubkey, payload, signature):
if data2['payload']['valid'] is True:
auction.add_block(block)
print("> bid creation in auction {}: OK".format(auction.serial))
signature = base64.b64encode(self.certgen.signData(json.dumps(data2['payload']['receipt']))).decode()
data2['payload']['receipt']['sig_r'] = signature
msg = {'payload': {'ack': 'ok', 'receipt': data2['payload']['receipt']}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), client_address)
break
else:
print("> bid creation in auction {}: NOK".format(auction.serial))
if 'info' in data2['payload']:
msg = {'payload': {'ack': 'nok', 'info': data2['payload']['info']}}
else:
msg = {'payload': {'ack': 'nok', 'valid': data2['payload']['valid']}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), client_address)
break
else:
print("> bid creation in auction {}: NOK".format(auction.serial))
msg = {'payload': {'ack': 'nok', 'info': 'non active'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), client_address)
except:
print("Cannot create bid")
self.mylogger.log(INFO, "Cannot create bid ")
raise
def list_ids(self, address_client):
"""
Send list of the IDs of the clients of the system
"""
try:
self.mylogger.log(INFO, "Listing active auctions")
if self.pubkey_dict:
msg = {'payload': {'ack': 'ok', 'ids': list(self.pubkey_dict.keys())}}
else:
msg = msg = {'payload': {'ack': 'nok'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
except:
self.mylogger.log(INFO, "Cannot list ids of clients")
raise
def list_open(self, address_client):
"""
Send list of the currently active auctions
"""
try:
self.mylogger.log(INFO, "Listing active auctions")
auctions = ""
for auction in self.active_auctions:
auctions = auctions + str(auction.info_user()) + "\n"
if auctions != "":
msg = {'payload': auctions}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
print("> sending list of active auctions")
else:
msg = {'payload': {'ack': 'nok'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
except:
print("Cannot send active auctions")
self.mylogger.log(INFO, "Cannot send active auctions ")
raise
def list_closed(self, address_client):
"""
Send list of the closed auctions
"""
try:
self.mylogger.log(INFO, "Listing closed auctions ")
auctions = ""
for auction in self.closed_auctions:
auctions = auctions + str(auction.info_user()) + "\n"
if auctions != "":
msg = {'payload': auctions}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
print("> sending list of closed auctions")
else:
msg = {'payload': {'ack': 'nok'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
except:
print("Can't send active auctions")
self.mylogger.log(INFO, "Cannot send active auctions ")
raise
def bids_auction(self, address_client, serial):
"""
Send list of all the bids of an auction
"""
try:
self.mylogger.log(INFO, "Listing bids of auction {} ".format(serial))
msg = {}
i = 0
result = None
auctions_exists = False
for auction in self.all_auctions:
if auction.serial == int(serial):
auctions_exists = True
result = auction.bids_auction(serial)
if auctions_exists:
for bid in result:
bid_number = "bid_{}".format(i)
msg[bid_number] = bid
i = i + 1
msg = {'payload': msg}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
print("\n> sent list of bids of auction {}".format(serial))
else:
msg = {'payload': {'ack': 'nok'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
except:
print("> cannot send list of bids of auction {}".format(serial))
self.mylogger.log(INFO, "Cannot list bids of auction {}".format(serial))
raise
def bids_client(self, address_client, id):
"""
Send list of all the bids of a client
"""
try:
self.mylogger.log(INFO, "Listing bids of client {} ".format(id))
msg = {}
i = 0
result = None
client_exists = False
for auction in self.all_auctions:
if str(auction.id) == id:
client_exists = True
result = auction.bids_client(id)
if client_exists:
for bid in result:
bid_number = "bid_{}".format(i)
msg[bid_number] = bid
i = i + 1
msg = {'payload': msg}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
print("\n> sent list of bids of client {}".format(id))
else:
msg = {'payload': {'ack': 'nok'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
except:
print("> can't send list of bids of client {}".format(id))
self.mylogger.log(INFO, "Listing bids of client {} ".format(id))
raise
def check_receipt(self, address_client, serial, hash):
"""
Send bid information to a client requesting it, for
validation against a receipt.
"""
try:
self.mylogger.log(INFO, "Sending bid information for receipt validation ")
print("> sending bid information for receipt validation")
closed_auctions = False
for auction in self.closed_auctions:
if str(auction.serial) == serial:
closed_auctions = True
info = auction.bid_info(hash)
if info != "":
msg = {'payload': {'bid': info}}
else:
msg = {'payload': {'ack': 'nok', 'info': 'no info about bid'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
else:
msg = {'payload': {'ack': 'nok', 'info': 'the auction is not closed'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
if not closed_auctions:
msg = {'payload': {'ack': 'nok', 'info': 'no closed auctions'}}
signature = base64.b64encode(self.certgen.signData(json.dumps(msg['payload']))).decode()
msg['signature'] = signature
bytes = self.sock.sendto(json.dumps(msg).encode(), address_client)
except:
print("> cannot send bid information for receipt validation")
self.mylogger.log(INFO, "Cannot send bid information for receipt validation ")
def client_login(self, message, client_addr):
"""
Storing information on a new client of the system
"""
try:
self.mylogger.log(INFO, "Adding new client ")
cert = None
if 'c_pubk' in message:
self.mylogger.log(INFO, "Client Pubkey : \n{}".format(message['c_pubk']))
self.loggedInClient += 1
self.pubkey_dict[message['id']] = message['c_pubk']
self.address_client.append(client_addr)
except:
print("Cannot sign up new client")
self.mylogger.log(INFO, "Cannot signup new client ")
raise
def valid_signature(self, pubk, message, signature):
"""
Validate an entity's signature on a message
"""
try:
pubk = self.crypto.loadPubk(pubk)
if not self.crypto.verifySignatureServers(pubk, message, signature):
return False
return True
except:
print("Cannot validate signature")
self.mylogger.log(INFO, "Cannot validate signature ")
raise
def exit(self, type):
"""
Shutdown the repository
"""
try:
self.mylogger.log(INFO, "Exiting repository ")
print("Exiting...")
self.sock.close()
sys.exit(type)
except:
self.mylogger.log(INFO, "Cannot exit repository ")
raise
if __name__ == "__main__":
r = Repository(HOST, PORT_REPO)
try:
r.start()
except KeyboardInterrupt:
print("Exiting...")
``` |
{
"source": "jp-fosterson/pandocblog",
"score": 3
} |
#### File: pandocblog/bin/utils.py
```python
import yaml
def parse_metadata(doc,join='\n'):
"""
Parse the metadata from a document and parse it
as a YAML dict and return it.
"""
doc = doc.strip()
if doc.startswith('---\n') and ('...' in doc or '---' in doc[4:]):
# found starting yaml block
yblock = doc[4:].split('...')[0].split('---')[0]
meta = yaml.load(yblock, Loader=yaml.SafeLoader)
for k in meta.keys():
val = meta[k]
if isinstance(val,list):
meta[k] = join.join(val)
meta['metadata_yaml_length'] = len(yblock) + 7
if 'description' not in meta:
body = doc[meta['metadata_yaml_length']:]
first_para = body.strip().split('\n')[0]
meta['description'] = first_para
return meta
else:
# No yaml
return {}
``` |
{
"source": "jpfranci/StockChecker",
"score": 3
} |
#### File: StockChecker/model/stock_options.py
```python
import math
from typing import List, Iterable
class StockOptions:
def __init__(self, price_threshold: float, official_sites_only: bool, size_requirement: List[str]):
self.price_threshold = price_threshold
self.official_sites_only = official_sites_only
self.size_requirement = size_requirement
def set_size_requirement(self, size_requirement: Iterable[str]):
self.size_requirement = list(size_requirement)
@classmethod
def from_json(cls, data):
return cls(**data)
@staticmethod
def create_default():
return StockOptions(
math.inf,
True,
[])
```
#### File: services/tasks/chrono_task.py
```python
import time
class ChronoTask:
# interval is in minutes
def __init__(self, interval):
self.interval = interval
def execute(self):
raise Exception("Execute for task not implemented")
@staticmethod
def format_time():
return time.strftime('%Y%m%d-%H%M')
```
#### File: StockChecker/stock_checkers/cc_stock_checker.py
```python
import logging
import re
import traceback
import aiohttp
import lxml.html
from conversions.conversions import get_float_from_price_str
from model.user_exception import UserException
from stock_checkers.abstract_request_stock_checker import AbstractRequestStockChecker
from stock_checkers.stock_check_result import StockCheckResult
base_headers = {
'DNT': "1",
"Cache-Control": "max-age=0",
"Upgrade-Insecure-Requests": "1",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
class CanadaComputersStockChecker(AbstractRequestStockChecker):
def to_stock_check_url(self, url: str, domain: str, suffix: str):
try:
return f'https://www.canadacomputers.com/product_info.php?cPath=13&item_id={CanadaComputersStockChecker.get_sku(url)}'
except:
raise UserException("The url was invalid.")
def get_base_headers(self) -> dict:
return base_headers
def get_item_name(self, doc) -> str:
item_name = super().get_item_name(doc)
if item_name in "Welcome - Canada Computers & Electronics":
return ""
else:
return item_name
@staticmethod
def get_sku(url):
return re.search(r"item_id=\w*", url).group().split("item_id=")[1]
@staticmethod
def assert_is_item(doc):
# checking if id codes exist
if not doc.xpath("//p[contains(@class, 'm-0 text-small')]"):
raise Exception("The item was not valid")
@staticmethod
def get_in_stock_by_store_name(doc, attribute_name: str, store_name: str):
try:
element = doc.xpath(f"//{attribute_name}[text() = '{store_name}']/../../..//span[@class = 'stocknumber']")[0]
if element.text:
stock_str = element.text.strip()
else:
stock_str = element.xpath(".//strong")[0].text.strip()
stock_count = int(stock_str.split('+')[0])
return stock_count > 0
except:
return False
@staticmethod
def get_price(doc):
price_str = doc.xpath("//div[contains(@class, 'order-md-1')]//strong")[-1].text.strip()
price = get_float_from_price_str(price_str)
return price
async def check_stock(self, item_url: str) -> StockCheckResult:
stores_to_check = [{'store_location': 'Burnaby', 'attribute_name': 'a'},
{'store_location': 'Coquitlam', 'attribute_name': 'a'},
{'store_location': 'Grandview', 'attribute_name': 'a'},
{'store_location': 'Richmond', 'attribute_name': 'a'},
{'store_location': 'Vancouver Broadway', 'attribute_name': 'a'},
{'store_location': 'Online Store', 'attribute_name': 'p'}]
stock_check_result = StockCheckResult.create_default(item_url)
session = aiohttp.ClientSession()
try:
request_response = await self.fetch(session, item_url)
doc = lxml.html.fromstring(request_response)
self.assert_is_item(doc)
stock_check_result.item_name = self.get_item_name(doc)
stock_check_result.is_item_available = True
price = CanadaComputersStockChecker.get_price(doc)
stock_check_result.set_all_prices(price)
for store in stores_to_check:
in_stock = CanadaComputersStockChecker.get_in_stock_by_store_name(doc, store['attribute_name'], store['store_location'])
if in_stock:
stock_check_result.is_in_stock = True
stock_check_result.in_stock_stores.append(store['store_location'])
except Exception as e:
logging.error(traceback.format_exc())
logging.error(e)
finally:
await session.close()
return stock_check_result
```
#### File: jpfranci/StockChecker/stock_check_result_reporter.py
```python
import asyncio
import logging
import math
import traceback
from typing import List, Set
from datetime import datetime
import tldextract
from services import discord_user_service
import sql_item_persistence
from conversions.message_formatters import chunk_messages
from conversions.size_formatters import get_size_requirement_str
from model.item import Item
from model.notification_user import NotificationUser
from model.website import Website, bannable_websites
from settings.settings import ADMINISTRATOR_ID
from stock_checkers.stock_check_result import StockCheckResult, MAX_FAILURES, NO_IN_STOCK_SIZES
def handle_stock_check_result(conn, stock_check_result: StockCheckResult, item: Item, main_thread_loop, stock_check_time: datetime, bot):
item.item_name = stock_check_result.item_name if stock_check_result.item_name is not None and stock_check_result.item_name != "" else item.item_name
item.last_stock_check = stock_check_time
stock_check_result.item_name = item.item_name
stock_check_result.fail_count = 0 if item.last_stock_check_result is None else item.last_stock_check_result.fail_count
if not stock_check_result.is_item_available:
stock_check_result.fail_count += 1
else:
stock_check_result.fail_count = 0
if item.last_stock_check_result != stock_check_result:
sql_item_persistence.sqlite_item_persistence.insert_price_history(conn, stock_check_result, int(item.last_stock_check.timestamp()))
item.stock_status = stock_check_result.is_in_stock
item.last_stock_check_result = stock_check_result
sql_item_persistence.sqlite_item_persistence.upsert_item(conn, item)
subscribed_users = sql_item_persistence.sqlite_item_persistence.get_subscribed_users_for_item(conn, item)
for subscribed_user in subscribed_users:
asyncio.run_coroutine_threadsafe(notify_valid_subscribers(subscribed_user, item, stock_check_result, bot), main_thread_loop)
async def send_message(user_id, message, bot):
logging.info(f"Sending message: {message}")
for total_fail_count in range(0, 5):
try:
user = bot.get_user(int(user_id))
if user is None:
user = await bot.fetch_user(int(user_id))
await user.send(message)
break
except Exception as e:
logging.error(e)
logging.error(traceback.format_exc(limit=5))
async def notify_valid_subscribers(subscribed_user: NotificationUser, item: Item, stock_result: StockCheckResult, bot):
is_unsubscribed = False
if item.item_name != subscribed_user.item_name:
subscribed_user.item_name = item.item_name
if stock_result.is_item_available:
is_unsubscribed, messages = handle_available_stock_check_result(subscribed_user, item, stock_result)
message_chunks = chunk_messages(messages, 2)
send_message_tasks = list(map(lambda message: send_message(subscribed_user.id, message, bot), message_chunks))
await asyncio.gather(*send_message_tasks)
else:
logging.error(stock_result.format_log())
website = Website(tldextract.extract(stock_result.item_url).domain)
if stock_result.fail_count >= MAX_FAILURES:
if website not in bannable_websites:
is_unsubscribed = True
await discord_user_service.unsubscribe(stock_result.item_url, subscribed_user)
await send_message(subscribed_user.id, f'You have been unsubscribed from tracking {item.item_name} for {stock_result.item_url} because it may no longer be available or the bot got banned.', bot)
elif stock_result.fail_count == MAX_FAILURES:
await send_message(ADMINISTRATOR_ID, f'The bot has been banned from {website.value} and url {stock_result.item_url}', bot)
if not is_unsubscribed:
await discord_user_service.upsert_user(subscribed_user)
else:
await discord_user_service.unsubscribe(stock_result.item_url, subscribed_user)
def handle_available_stock_check_result(subscribed_user: NotificationUser, item: Item, stock_check_result: StockCheckResult):
messages = []
item_name = item.item_name
in_stock_sizes_for_user_set = set(subscribed_user.stock_options.size_requirement).intersection(set(stock_check_result.in_stock_sizes))
is_unsubscribed = False
price_to_use, price_str_to_use, previous_price_to_use = get_prices_to_use(subscribed_user, stock_check_result, item)
if should_check_size_requirement(subscribed_user.stock_options.size_requirement, stock_check_result.available_sizes):
are_all_sizes_valid, message_portion = check_validity_of_subscribed_sizes(subscribed_user, stock_check_result)
is_unsubscribed = not are_all_sizes_valid
messages.append(message_portion)
if is_stock_check_result_different_for_user(stock_check_result, subscribed_user, in_stock_sizes_for_user_set):
if (not verify_price(subscribed_user.stock_options.price_threshold, price_to_use) or not stock_check_result.is_in_stock) and subscribed_user.last_stock_status:
subscribed_user.last_stock_status = False
messages.append(f'{item_name} at {item.url} just went out of stock')
elif stock_check_result.is_in_stock:
messages.extend(handle_valid_in_stock_result(subscribed_user, stock_check_result, item, in_stock_sizes_for_user_set))
return is_unsubscribed, messages
def handle_valid_in_stock_result(subscribed_user: NotificationUser, stock_check_result: StockCheckResult, item: Item, in_stock_sizes_for_user_set):
messages = []
price_to_use, price_str_to_use, previous_price_to_use = get_prices_to_use(subscribed_user, stock_check_result, item)
item_name = item.item_name
if verify_price(subscribed_user.stock_options.price_threshold, price_to_use):
messages.extend(handle_valid_price(subscribed_user, stock_check_result, in_stock_sizes_for_user_set, price_str_to_use))
subscribed_user.set_last_in_stock_stores_for_user(stock_check_result.in_stock_stores)
subscribed_user.set_last_in_stock_sizes_for_user(in_stock_sizes_for_user_set)
elif is_price_over_threshold_for_first_time(subscribed_user, previous_price_to_use):
official_sources_str = f"when sold by {item.website.value} directly" if subscribed_user.stock_options.official_sites_only else f"any seller on {item.website.value}"
messages.extend(f'The price for {item_name} ({price_str_to_use}) {official_sources_str} at {stock_check_result.item_url} has exceeded your limit of ${subscribed_user.stock_options.price_threshold}')
subscribed_user.set_last_in_stock_sizes_for_user([])
subscribed_user.set_last_in_stock_stores_for_user([])
subscribed_user.last_stock_status = False
return messages
def handle_valid_price(
subscribed_user: NotificationUser,
stock_check_result: StockCheckResult,
in_stock_sizes_for_user_set: Set[str],
price_str_to_use: str) -> List[str]:
messages = []
newly_in_stock_stores_for_user = set(stock_check_result.in_stock_stores).difference(set(subscribed_user.last_in_stock_stores_for_user))
no_longer_in_stock_stores_for_user = set(subscribed_user.last_in_stock_stores_for_user).difference(set(stock_check_result.in_stock_stores))
no_longer_in_stock_sizes = set(subscribed_user.last_in_stock_sizes_for_user).difference(in_stock_sizes_for_user_set)
newly_in_stock_sizes_for_user = in_stock_sizes_for_user_set.difference(set(subscribed_user.last_in_stock_sizes_for_user))
formatted_in_stock_sizes_for_user = f"In total, tracked size(s), {get_size_requirement_str(in_stock_sizes_for_user_set)}, are in stock."
if found_new_stock(subscribed_user, newly_in_stock_sizes_for_user, newly_in_stock_stores_for_user):
newly_in_stock_sizes_str = get_size_requirement_str(newly_in_stock_sizes_for_user)
available_store_str = "" if not stock_check_result.in_stock_stores else f"at location(s) {', '.join(newly_in_stock_stores_for_user)}"
if newly_in_stock_sizes_for_user:
messages.append(f'Size(s) {newly_in_stock_sizes_str} for {subscribed_user.item_name} just went in stock for {price_str_to_use} at ' + \
f'{stock_check_result.item_url} {available_store_str}.\n{formatted_in_stock_sizes_for_user}')
else:
messages.append(f'Found stock for {stock_check_result.item_name} for {price_str_to_use} at {stock_check_result.item_url} {available_store_str}')
subscribed_user.last_stock_status = True
if no_longer_in_stock_sizes:
if not in_stock_sizes_for_user_set:
subscribed_user.last_stock_status = False
messages.append(f'All size(s) being tracked for {stock_check_result.item_name} at {stock_check_result.item_url} are out of stock')
else:
no_longer_in_stock_sizes_str = get_size_requirement_str(no_longer_in_stock_sizes)
size_requirement_msg = f"with sizes {no_longer_in_stock_sizes_str}"
messages.append(f'Size(s) {size_requirement_msg} just went out of stock for {stock_check_result.item_name} at {stock_check_result.item_url}.\n{formatted_in_stock_sizes_for_user}')
if no_longer_in_stock_stores_for_user:
if not stock_check_result.in_stock_stores:
subscribed_user.last_stock_status = False
messages.append(f'{stock_check_result.item_name} ({stock_check_result.item_url} just went out of stock for all stores')
else:
formatted_in_stock_stores = f"It is still in stock at locations {', '.join(stock_check_result.in_stock_stores)}"
no_longer_in_stock_stores_str = f"at location(s) {', '.join(no_longer_in_stock_stores_for_user)}"
messages.append(f'{stock_check_result.item_name} ({stock_check_result.item_url} just went out of stock {no_longer_in_stock_stores_str}.\n{formatted_in_stock_stores}')
return messages
def check_validity_of_subscribed_sizes(subscribed_user: NotificationUser, stock_check_result: StockCheckResult) -> (bool, str):
message = ""
size_requirement_set = set(subscribed_user.stock_options.size_requirement)
available_sizes_for_user_set = size_requirement_set.intersection(set(stock_check_result.available_sizes))
unavailable_sizes_for_user_set = size_requirement_set.difference(set(stock_check_result.available_sizes))
if not available_sizes_for_user_set:
size_requirement_str = get_size_requirement_str(subscribed_user.stock_options.size_requirement)
message = f'You have been unsubscribed from {subscribed_user.item_name} at {subscribed_user.item_url} because all of ' + \
f'the size(s), {size_requirement_str}, you specified are not valid for the item. If the size is more than one word please wrap the size in quotations (ex. "US 0").' + \
f'Please subscribe again with correct size(s).'
return False, message
elif unavailable_sizes_for_user_set:
unavailable_sizes_str = get_size_requirement_str(unavailable_sizes_for_user_set)
available_sizes_str = get_size_requirement_str(available_sizes_for_user_set)
subscribed_user.stock_options.set_size_requirement(available_sizes_for_user_set)
message = f'Size(s) {unavailable_sizes_str} for item {subscribed_user.item_name} at {subscribed_user.item_url} do not exist. ' + \
f'You are still subscribed for size(s) {available_sizes_str}'
return True, message
def found_new_stock(subscribed_user: NotificationUser, newly_in_stock_sizes: Set[str], newly_in_stock_stores_for_user: Set[str]) -> bool:
return (not subscribed_user.last_stock_status and not subscribed_user.stock_options.size_requirement) or \
newly_in_stock_sizes or \
newly_in_stock_stores_for_user
def is_price_over_threshold_for_first_time(subscribed_user: NotificationUser, previous_price_to_use: float) -> bool:
return verify_price(subscribed_user.stock_options.price_threshold, previous_price_to_use)
def is_stock_check_result_different_for_user(stock_check_result: StockCheckResult, subscribed_user: NotificationUser, in_stock_sizes_for_user_set: Set[str]) -> bool:
return stock_check_result.is_in_stock != subscribed_user.last_stock_status or \
in_stock_sizes_for_user_set != subscribed_user.last_in_stock_sizes_for_user or \
set(stock_check_result.in_stock_stores) != set(subscribed_user.last_in_stock_stores_for_user)
def get_prices_to_use(subscribed_user: NotificationUser, stock_check_result: StockCheckResult, item: Item) -> (float, str, float):
if subscribed_user.stock_options.official_sites_only:
price_to_use = stock_check_result.stock_price.min_official_price
price_str_to_use = stock_check_result.stock_price.min_official_price_str
previous_price_to_use = math.inf if item.last_stock_check_result == None else item.last_stock_check_result.stock_price.min_official_price
else:
price_to_use = stock_check_result.stock_price.min_price
price_str_to_use = stock_check_result.stock_price.min_price_str
previous_price_to_use = math.inf if item.last_stock_check_result == None else item.last_stock_check_result.stock_price.min_price
return price_to_use, price_str_to_use, previous_price_to_use
def verify_price(price_threshold: float, price: float) -> bool:
return (price_threshold == math.inf and not price == math.inf) or (price != math.inf and price_threshold != math.inf and math.ceil(price) <= math.ceil(price_threshold))
def should_check_size_requirement(size_requirement: List[str], available_sizes: List[str]) -> bool:
return len(size_requirement) > 0 and available_sizes != NO_IN_STOCK_SIZES
``` |
{
"source": "JPFrancoia/aws-data-wrangler",
"score": 2
} |
#### File: testing/test_awswrangler/test_metadata.py
```python
import awswrangler as wr
def test_metadata():
assert wr.__version__ == "1.0.3"
assert wr.__title__ == "awswrangler"
assert wr.__description__ == "Pandas on AWS."
assert wr.__license__ == "Apache License 2.0"
``` |
{
"source": "JPFrancoia/micropython",
"score": 3
} |
#### File: tests/multi_bluetooth/ble_gap_device_name.py
```python
from micropython import const
import time, machine, bluetooth
TIMEOUT_MS = 5000
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)
_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)
_IRQ_GATTC_READ_RESULT = const(15)
_IRQ_GATTC_READ_DONE = const(16)
GAP_DEVICE_NAME_UUID = bluetooth.UUID(0x2A00)
waiting_events = {}
def irq(event, data):
if event == _IRQ_CENTRAL_CONNECT:
print("_IRQ_CENTRAL_CONNECT")
waiting_events[event] = data[0]
elif event == _IRQ_CENTRAL_DISCONNECT:
print("_IRQ_CENTRAL_DISCONNECT")
elif event == _IRQ_PERIPHERAL_CONNECT:
print("_IRQ_PERIPHERAL_CONNECT")
waiting_events[event] = data[0]
elif event == _IRQ_PERIPHERAL_DISCONNECT:
print("_IRQ_PERIPHERAL_DISCONNECT")
elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT:
if data[-1] == GAP_DEVICE_NAME_UUID:
print("_IRQ_GATTC_CHARACTERISTIC_RESULT", data[-1])
waiting_events[event] = data[2]
else:
return
elif event == _IRQ_GATTC_CHARACTERISTIC_DONE:
print("_IRQ_GATTC_CHARACTERISTIC_DONE")
elif event == _IRQ_GATTC_READ_RESULT:
print("_IRQ_GATTC_READ_RESULT", bytes(data[-1]))
if event not in waiting_events:
waiting_events[event] = None
def wait_for_event(event, timeout_ms):
t0 = time.ticks_ms()
while time.ticks_diff(time.ticks_ms(), t0) < timeout_ms:
if event in waiting_events:
result = waiting_events[event]
del waiting_events[event]
return result
machine.idle()
raise ValueError("Timeout waiting for {}".format(event))
# Acting in peripheral role.
def instance0():
multitest.globals(BDADDR=ble.config("mac"))
# Test setting and getting the GAP device name before registering services.
ble.config(gap_name="GAP_NAME")
print(ble.config("gap_name"))
# Create an empty service and start advertising.
ble.gatts_register_services([])
print("gap_advertise")
multitest.next()
try:
# Do multiple iterations to test changing the name.
for iteration in range(2):
# Set the GAP device name and start advertising.
ble.config(gap_name="GAP_NAME{}".format(iteration))
print(ble.config("gap_name"))
ble.gap_advertise(20_000)
# Wait for central to connect, then wait for it to disconnect.
wait_for_event(_IRQ_CENTRAL_CONNECT, TIMEOUT_MS)
wait_for_event(_IRQ_CENTRAL_DISCONNECT, 4 * TIMEOUT_MS)
finally:
ble.active(0)
# Acting in central role.
def instance1():
multitest.next()
try:
value_handle = None
for iteration in range(2):
# Wait for peripheral to start advertising.
time.sleep_ms(500)
# Connect to peripheral.
print("gap_connect")
ble.gap_connect(*BDADDR)
conn_handle = wait_for_event(_IRQ_PERIPHERAL_CONNECT, TIMEOUT_MS)
if iteration == 0:
# Only do characteristic discovery on the first iteration,
# assume value_handle is unchanged on the second.
print("gattc_discover_characteristics")
ble.gattc_discover_characteristics(conn_handle, 1, 65535)
value_handle = wait_for_event(_IRQ_GATTC_CHARACTERISTIC_RESULT, TIMEOUT_MS)
wait_for_event(_IRQ_GATTC_CHARACTERISTIC_DONE, TIMEOUT_MS)
# Read the peripheral's GAP device name.
print("gattc_read")
ble.gattc_read(conn_handle, value_handle)
wait_for_event(_IRQ_GATTC_READ_RESULT, TIMEOUT_MS)
# Disconnect from peripheral.
print("gap_disconnect:", ble.gap_disconnect(conn_handle))
wait_for_event(_IRQ_PERIPHERAL_DISCONNECT, TIMEOUT_MS)
finally:
ble.active(0)
ble = bluetooth.BLE()
ble.active(1)
ble.irq(irq)
``` |
{
"source": "JPFrancoia/micropython-waveshare-epaper",
"score": 2
} |
#### File: JPFrancoia/micropython-waveshare-epaper/epaper2in7.py
```python
from micropython import const
from time import sleep_ms
# Display resolution
EPD_WIDTH = const(176)
EPD_HEIGHT = const(264)
# Display commands
PANEL_SETTING = const(0x00)
POWER_SETTING = const(0x01)
#POWER_OFF = const(0x02)
#POWER_OFF_SEQUENCE_SETTING = const(0x03)
POWER_ON = const(0x04)
#POWER_ON_MEASURE = const(0x05)
BOOSTER_SOFT_START = const(0x06)
DEEP_SLEEP = const(0x07)
DATA_START_TRANSMISSION_1 = const(0x10)
#DATA_STOP = const(0x11)
DISPLAY_REFRESH = const(0x12)
DATA_START_TRANSMISSION_2 = const(0x13) # not in datasheet
#PARTIAL_DATA_START_TRANSMISSION_1 = const(0x14)
#PARTIAL_DATA_START_TRANSMISSION_2 = const(0x15)
PARTIAL_DISPLAY_REFRESH = const(0x16)
LUT_FOR_VCOM = const(0x20) # LUT for VCOM(LUT1)
LUT_WHITE_TO_WHITE = const(0x21) # White to white LUT (LUTWW)
LUT_BLACK_TO_WHITE = const(0x22) # Black to white LUT (LUTBW/LUTR)
LUT_WHITE_TO_BLACK = const(0x23) # White to Black LUT (LUTWB/LUTW)
LUT_BLACK_TO_BLACK = const(0x24) # Black to Black LUT (LUTBB/LUTB)
PLL_CONTROL = const(0x30)
#TEMPERATURE_SENSOR_COMMAND = const(0x40)
#TEMPERATURE_SENSOR_CALIBRATION = const(0x41)
#TEMPERATURE_SENSOR_WRITE = const(0x42)
#TEMPERATURE_SENSOR_READ = const(0x43)
#VCOM_AND_DATA_INTERVAL_SETTING = const(0x50)
#LOW_POWER_DETECTION = const(0x51)
#TCON_SETTING = const(0x60)
#TCON_RESOLUTION = const(0x61)
#SOURCE_AND_GATE_START_SETTING = const(0x62)
#GET_STATUS = const(0x71)
#AUTO_MEASURE_VCOM = const(0x80)
#VCOM_VALUE = const(0x81)
VCM_DC_SETTING_REGISTER = const(0x82)
#PROGRAM_MODE = const(0xA0)
#ACTIVE_PROGRAM = const(0xA1)
#READ_OTP_DATA = const(0xA2)
POWER_OPTIMIZATION = const(0xF8) # Power optimization in flow diagram
BUSY = const(0) # 0=busy, 1=idle
class EPD:
def __init__(self, spi, cs, dc, rst, busy):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.busy = busy
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=0)
self.busy.init(self.busy.IN)
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
LUT_VCOM_DC = bytearray(b'\x00\x00\x00\x0F\x0F\x00\x00\x05\x00\x32\x32\x00\x00\x02\x00\x0F\x0F\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
LUT_WW = bytearray(b'\x50\x0F\x0F\x00\x00\x05\x60\x32\x32\x00\x00\x02\xA0\x0F\x0F\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # R21H
LUT_BW = LUT_WW # R22H r
LUT_BB = bytearray(b'\xA0\x0F\x0F\x00\x00\x05\x60\x32\x32\x00\x00\x02\x50\x0F\x0F\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') # R24H b
LUT_WB = LUT_BB # R23H w
def _command(self, command, data=None):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
def _data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def init(self):
self.reset()
self._command(POWER_SETTING, b'\x03\x00\x2B\x2B\x09') # VDS_EN VDG_EN, VCOM_HV VGHL_LV[1] VGHL_LV[0], VDH, VDL, VDHR
self._command(BOOSTER_SOFT_START, b'\x07\x07\x17')
self._command(POWER_OPTIMIZATION, b'\x60\xA5')
self._command(POWER_OPTIMIZATION, b'\x89\xA5')
self._command(POWER_OPTIMIZATION, b'\x90\x00')
self._command(POWER_OPTIMIZATION, b'\x93\x2A')
self._command(POWER_OPTIMIZATION, b'\xA0\xA5')
self._command(POWER_OPTIMIZATION, b'\xA1\x00')
self._command(POWER_OPTIMIZATION, b'\x73\x41')
self._command(PARTIAL_DISPLAY_REFRESH, b'\x00')
self._command(POWER_ON)
self.wait_until_idle()
self._command(PANEL_SETTING, b'\xAF') # KW-BF KWR-AF BWROTP 0f
self._command(PLL_CONTROL, b'\x3A') # 3A 100HZ 29 150Hz 39 200HZ 31 171HZ
self._command(VCM_DC_SETTING_REGISTER, b'\x12')
sleep_ms(2)
self.set_lut()
def wait_until_idle(self):
while self.busy.value() == BUSY:
sleep_ms(100)
def reset(self):
self.rst(0)
sleep_ms(200)
self.rst(1)
sleep_ms(200)
def set_lut(self):
self._command(LUT_FOR_VCOM, self.LUT_VCOM_DC) # vcom
self._command(LUT_WHITE_TO_WHITE, self.LUT_WW) # ww --
self._command(LUT_BLACK_TO_WHITE, self.LUT_BW) # bw r
self._command(LUT_WHITE_TO_BLACK, self.LUT_BB) # wb w
self._command(LUT_BLACK_TO_BLACK, self.LUT_WB) # bb b
# draw the current frame memory
def display_frame(self, frame_buffer):
if (frame_buffer != None):
self._command(DATA_START_TRANSMISSION_1)
sleep_ms(2)
for i in range(0, self.width * self.height // 8):
self._data(bytearray([0xFF]))
sleep_ms(2)
self._command(DATA_START_TRANSMISSION_2)
sleep_ms(2)
for i in range(0, self.width * self.height // 8):
self._data(bytearray([frame_buffer[i]]))
sleep_ms(2)
self._command(DISPLAY_REFRESH)
self.wait_until_idle()
# to wake call reset() or init()
def sleep(self):
self._command(DEEP_SLEEP, b'\xA5')
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.