metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "josejorgers/bobo-script",
"score": 3
} |
#### File: src/utils/movements.py
```python
from robobo.movement.simple_movements import *
from constants.sensors_config import DISTANCE_CLOSE, DISTANCE_MEDIUM, DISTANCE_FAR, EDGE_DISTANCE, DISTANCE_TOO_CLOSE, DISTANCE_GOAL_CLOSE
from robobopy.utils.IR import IR
DIRECTIONS = {
'forward': move_forward,
'right': turn_right,
'left': turn_left,
'backward': move_backward,
'custom': diagonal_movement
}
def is_obstacle_far(robot):
return robot.readIRSensor(IR.FrontC) < DISTANCE_FAR and\
robot.readIRSensor(IR.FrontRR) < DISTANCE_FAR and\
robot.readIRSensor(IR.FrontLL) < DISTANCE_FAR
def is_obstacle_medium(robot):
return robot.readIRSensor(IR.FrontC) < DISTANCE_MEDIUM and\
robot.readIRSensor(IR.FrontRR) < DISTANCE_MEDIUM and\
robot.readIRSensor(IR.FrontLL) < DISTANCE_MEDIUM
def is_obstacle_close(robot):
return robot.readIRSensor(IR.FrontC) > DISTANCE_CLOSE or\
robot.readIRSensor(IR.FrontRR) > DISTANCE_TOO_CLOSE or\
robot.readIRSensor(IR.FrontLL) > DISTANCE_TOO_CLOSE
def is_far_from_edge(robot):
return robot.readIRSensor(IR.FrontL) > EDGE_DISTANCE or\
robot.readIRSensor(IR.FrontR) > EDGE_DISTANCE or\
robot.readIRSensor(IR.BackL) > EDGE_DISTANCE or\
robot.readIRSensor(IR.BackR) > EDGE_DISTANCE
def is_goal_close(robot):
return robot.readIRSensor(IR.FrontC) > DISTANCE_GOAL_CLOSE
``` |
{
"source": "josejorgers/gimme-the-code",
"score": 3
} |
#### File: tests/scrapper/test_google_search.py
```python
import sys
sys.path.insert(0, 'C:\\Users\\JJ\\Desktop\\gimme-the-code\\')
from gtcode.scrapper.google_search import search, get_code_from_results
def test_google_search():
query = "create a class in python"
results = search(query)
prefix = [True for r in results if r[:4] == 'http']
assert len(results) > 0 and len(prefix) == len(results)
def test_get_code_from_results():
links = [
'https://stackoverflow.com/questions/5041008/how-to-find-elements-by-class',
'https://www.kite.com/python/answers/how-to-find-html-elements-by-class-with-beautifulsoup-in-python'
]
code = get_code_from_results(links)
assert len(code) > 0
``` |
{
"source": "JoseJorgeXL/CoolPyler",
"score": 3
} |
#### File: CoolPyler/checksemantic/scope.py
```python
from parsing import cool_ast
class Scope:
def __init__(self, parent=None, inside=None):
self.inside = inside
self.locals = {}
self.methods = []
self.parent = parent
self.children = []
self.self_type = []
self.types = []
if not parent:
self.basic_types()
def basic_types(self):
self.types.append(('Object', None, []))
self.types.append(('Int', 'Object', []))
self.types.append(('Void', 'Object', []))
self.types.append(('Bool', 'Object', []))
self.types.append(('String', 'Object', []))
io_methods = [
cool_ast.MethodNode('out_string',[cool_ast.ParamNode('x','String')],'SELF_TYPE',None),
cool_ast.MethodNode('out_int', [cool_ast.ParamNode('x','Int')], 'SELF_TYPE',None),
cool_ast.MethodNode('in_string', [], 'String', None),
cool_ast.MethodNode('in_int', [], 'Int', None)
]
self.types.append(('IO', 'Object', io_methods))
def add_type(self, type_name, methods, parent = 'Object'):
if not self.check_type(type_name) and self.check_type(parent):
self.types.append((type_name, parent, []))
for m in methods:
if not self.define_method(type_name, m):
return False
return True
return False
def define_method(self, type_name, method):
curr = self.look_for_type(type_name)
for tp in curr.types:
if tp[0] == type_name and not curr.look_for_method(type_name, method.name) and not self.is_defined(method.name):
tp[2].append(method)
if method.return_type == 'SELF_TYPE':
self.self_type.append(method.name)
return True
return False
def add_method(self, method):
self.methods.append(method)
def get_type(self, vname):
if vname == 'self':
return self.inside
if not self.is_defined(vname):
return False
curr = self
while curr != None:
if curr.is_local(vname):
return curr.locals[vname]
curr = curr.parent
return False
def local_type(self, type_name):
for t in self.types:
if t[0] == type_name:
return t
return False
def check_type(self, type_name):
if type_name == 'SELF_TYPE':
return self.inside
curr = self
while curr:
t = curr.local_type(type_name)
if t:
return t
curr = curr.parent
return False
def define_variable(self, vname, vtype):
if vtype == 'SELF_TYPE' or self.check_type(vtype):
self.locals[vname] = vtype
if vtype == 'SELF_TYPE':
self.self_type.append(vname)
return True
return False
def create_child_scope(self, inside=None):
child_scope = Scope(self, inside=self.inside) if not inside else Scope(self, inside)
self.children.append(child_scope)
return child_scope
def is_defined(self, vname):
if vname == 'self':
return True
current = self
while current:
if vname in [v for v in self.locals.keys()]:
return True
current = current.parent
return False
def is_local(self, vname):
return vname in self.locals.keys()
def is_local_feature(self, mname):
return mname in [m.name.value for m in self.methods]
def is_defined_in_type(self, t, mname):
curr = self
while curr:
for _type in curr.types:
if _type[0] == t:
for m in _type[2]:
if m.name.value == mname:
return m
if _type[1]:
return self.is_defined_in_type(_type[1], mname)
curr = curr.parent
return False
def get_local_method(self, mname):
for f in self.methods:
if f.name.value == mname:
return f
return False
def get_method_by_name(self, mname):
curr = self
while curr and not curr.get_local_method(mname):
curr = curr.parent
return False if not curr else curr.get_local_method(mname)
def get_method(self, tp, mname):
curr = self
while curr and not curr.is_local_feature(mname):
curr = curr.parent
if not curr:
return False
return False
def look_for_method(self, t, mname):
currt = t
curr = self
while currt:
tp = curr.check_type(currt)
if not tp:
return False
for mn in tp[2]:
if mn.name.value == mname:
return mn
currt = tp[1]
return False
def look_for_type(self, t):
curr = self
while curr and not curr.local_type(t):
curr = curr.parent
return curr
def inherits(self, t1, t2, level):
curr = self.look_for_type(t1)
if not curr:
return False, -1
if t1 == t2:
return True, level
p = [t[1] for t in curr.types if t[0] == t1]
if not p:
return False, -1
return curr.inherits(p[0], t2, level + 1), level + 1
def join(self, t1, t2):
if self.inherits(t1,t2,0)[0]:
return t2
if self.inherits(t2, t1,0)[0]:
return t1
curr = self.look_for_type(t1)
p = [t[1] for t in curr.types if t[0] == t1][0]
return self.join(p, t2)
``` |
{
"source": "jose-jvmas/sota-music-tagging-models",
"score": 3
} |
#### File: sota-music-tagging-models/training/corpora.py
```python
import os
import numpy as np
class SingleCorpus():
def read_partition(self, config, partition):
# Source path:
read_path = os.path.join('embeddings', config.dataset, config.model_type)
# Embeddings:
with open(os.path.join(read_path, partition + '_emb.csv')) as fin:
FileIn = fin.readlines()
x = np.array([[float(u) for u in line.split(",")] for line in FileIn])
# GT:
with open(os.path.join(read_path, partition + '_gt.csv')) as fin:
FileIn = fin.readlines()
y = np.array([[float(u) for u in line.split(",")] for line in FileIn])
return x, y
def __init__(self, config):
self.x_train_NC, self.y_train_onehot = self.read_partition(config, partition = 'train')
self.train_indexes = list(range(len(self.x_train_NC)))
self.x_valid_NC, self.y_valid_onehot = self.read_partition(config, partition = 'valid')
self.valid_indexes = list(range(len(self.x_valid_NC)))
self.x_test_NC, self.y_test_onehot = self.read_partition(config, partition = 'test')
self.test_indexes = list(range(len(self.x_test_NC)))
``` |
{
"source": "josekang/prfile-django-api",
"score": 2
} |
#### File: prfile-django-api/profiles_api/views.py
```python
from django.shortcuts import render
from rest_framework import status, viewsets, filters
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from profiles_api import serializers
from profiles_api.models import UserProfile, ProfileFeedItem
from profiles_api import permissions
# Create your views here.
class HelloAPIView(APIView):
""" Return features of an API """
serializer_class = serializers.HelloSerializer
def get(self, request, formart=None):
""" Shows the methods used """
api_views = [
"Similar to traditional django mehods",
"Methods are GET, POST, PUT, PATCH, DELETE"
]
response = {
"status": status.HTTP_200_OK,
"message": "Shows the output as a dictionary, power of API",
"data": api_views
}
return Response(response)
def post(self, request, format=None):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f"hello {name}"
response = {
"status": status.HTTP_200_OK,
"message": message
}
return Response(response)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
def put(self, request, pk=None):
response = {
"status": status.HTTP_200_OK,
"message": "Updated successfully"
}
return Response(response)
def patch(self, request, pk=None):
response = {
"status": status.HTTP_200_OK,
"message": "Patched successfully"
}
return Response(response)
def delete(self, request, pk=None):
response = {
"status": status.HTTP_200_OK,
"message": "Deleted successfully"
}
return Response(response)
class HelloViewSet(viewsets.ModelViewSet):
serializer_class = serializers.HelloSerializer
def list(self, request):
a_viewset = [
"This is a vieset",
"Provides more functionality with less code"
]
response = {
"status":status.HTTP_200_OK,
"mesage": "A list of all viewset",
"data": a_viewset
}
return Response(response)
def create(self, request, format=None):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f"hello {name}"
response = {
"status": status.HTTP_200_OK,
"message": message
}
return Response(response)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST,
)
def retrieve(self, request, pk=None):
return Response({"Method": "RETRIEVE"})
def update(self, request, pk=None):
return Response({"Method": "PUT"})
def partial_updates(self, request, pk=None):
return Response({"Method": "PATCH"})
def destroy(self, request, pk=None):
return Response({"Method": "delete"})
class UserProfileViewSet(viewsets.ModelViewSet):
queryset = UserProfile.objects.all()
serializer_class = serializers.UserProfileSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnPermission,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginViewSet(ObtainAuthToken):
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ProfileFeedItemViewSet(viewsets.ModelViewSet):
queryset = ProfileFeedItem.objects.all()
serializer_class = serializers.ProfileFeedItemSerializer
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnPermission, IsAuthenticated)
filter_backends = (filters.SearchFilter,)
search_fields = ('user_profile', 'status_text',)
def perform_create(self, serializer):
serializer.save(user_profile=self.request.user)
``` |
{
"source": "josekang/recipe-app-api",
"score": 3
} |
#### File: app/app/tests.py
```python
from django.test import TestCase
from app.calc import add, subtract
class CalcTest(TestCase):
def test_add_numbers(self):
""" Add two numbers """
self.assertEqual(add(5,5), 10)
def test_subtract_numbers(self):
""" Minus """
self.assertEqual(subtract(10, 5), 5)
``` |
{
"source": "JoseKilo/click-plugins",
"score": 2
} |
#### File: click-plugins/tests/test_plugins.py
```python
from pkg_resources import EntryPoint
from pkg_resources import iter_entry_points
from pkg_resources import working_set
import click
from click_plugins import with_plugins
import pytest
# Create a few CLI commands for testing
@click.command()
@click.argument('arg')
def cmd1(arg):
"""Test command 1"""
click.echo('passed')
@click.command()
@click.argument('arg')
def cmd2(arg):
"""Test command 2"""
click.echo('passed')
# Manually register plugins in an entry point and put broken plugins in a
# different entry point.
# The `DistStub()` class gets around an exception that is raised when
# `entry_point.load()` is called. By default `load()` has `requires=True`
# which calls `dist.requires()` and the `click.group()` decorator
# doesn't allow us to change this. Because we are manually registering these
# plugins the `dist` attribute is `None` so we can just create a stub that
# always returns an empty list since we don't have any requirements. A full
# `pkg_resources.Distribution()` instance is not needed because there isn't
# a package installed anywhere.
class DistStub(object):
def requires(self, *args):
return []
working_set.by_key['click']._ep_map = {
'_test_click_plugins.test_plugins': {
'cmd1': EntryPoint.parse(
'cmd1=tests.test_plugins:cmd1', dist=DistStub()),
'cmd2': EntryPoint.parse(
'cmd2=tests.test_plugins:cmd2', dist=DistStub())
},
'_test_click_plugins.broken_plugins': {
'before': EntryPoint.parse(
'before=tests.broken_plugins:before', dist=DistStub()),
'after': EntryPoint.parse(
'after=tests.broken_plugins:after', dist=DistStub()),
'do_not_exist': EntryPoint.parse(
'do_not_exist=tests.broken_plugins:do_not_exist', dist=DistStub())
}
}
# Main CLI groups - one with good plugins attached and the other broken
@with_plugins(iter_entry_points('_test_click_plugins.test_plugins'))
@click.group()
def good_cli():
"""Good CLI group."""
pass
@with_plugins(iter_entry_points('_test_click_plugins.broken_plugins'))
@click.group()
def broken_cli():
"""Broken CLI group."""
pass
def test_registered():
# Make sure the plugins are properly registered. If this test fails it
# means that some of the for loops in other tests may not be executing.
assert len([ep for ep in iter_entry_points('_test_click_plugins.test_plugins')]) > 1
assert len([ep for ep in iter_entry_points('_test_click_plugins.broken_plugins')]) > 1
def test_register_and_run(runner):
result = runner.invoke(good_cli)
assert result.exit_code is 0
for ep in iter_entry_points('_test_click_plugins.test_plugins'):
cmd_result = runner.invoke(good_cli, [ep.name, 'something'])
assert cmd_result.exit_code is 0
assert cmd_result.output.strip() == 'passed'
def test_broken_register_and_run(runner):
result = runner.invoke(broken_cli)
assert result.exit_code is 0
assert u'\U0001F4A9' in result.output or u'\u2020' in result.output
for ep in iter_entry_points('_test_click_plugins.broken_plugins'):
cmd_result = runner.invoke(broken_cli, [ep.name])
assert cmd_result.exit_code is not 0
assert 'Traceback' in cmd_result.output
def test_group_chain(runner):
# Attach a sub-group to a CLI and get execute it without arguments to make
# sure both the sub-group and all the parent group's commands are present
@good_cli.group()
def sub_cli():
"""Sub CLI."""
pass
result = runner.invoke(good_cli)
assert result.exit_code is 0
assert sub_cli.name in result.output
for ep in iter_entry_points('_test_click_plugins.test_plugins'):
assert ep.name in result.output
# Same as above but the sub-group has plugins
@with_plugins(plugins=iter_entry_points('_test_click_plugins.test_plugins'))
@good_cli.group(name='sub-cli-plugins')
def sub_cli_plugins():
"""Sub CLI with plugins."""
pass
result = runner.invoke(good_cli, ['sub-cli-plugins'])
assert result.exit_code is 0
for ep in iter_entry_points('_test_click_plugins.test_plugins'):
assert ep.name in result.output
# Execute one of the sub-group's commands
result = runner.invoke(good_cli, ['sub-cli-plugins', 'cmd1', 'something'])
assert result.exit_code is 0
assert result.output.strip() == 'passed'
def test_exception():
# Decorating something that isn't a click.Group() should fail
with pytest.raises(TypeError):
@with_plugins([])
@click.command()
def cli():
"""Whatever"""
``` |
{
"source": "JoseKilo/week_parser",
"score": 3
} |
#### File: tests/functional/test_week_parser.py
```python
from __future__ import unicode_literals
import os
from argparse import Namespace
import pytest
from week_parser.main import get_options, main
expected_output_1 = (
"""[{'day': 'mon', 'description': 'first_desc 1', 'square': 1, 'value': 1},
{'day': 'tue', 'description': 'first_desc 25', 'square': 25, 'value': 5},
{'day': 'wed', 'description': 'first_desc 4', 'square': 4, 'value': 2},
{'day': 'thu', 'description': 'first_desc 6', 'double': 6, 'value': 3},
{'day': 'fri', 'description': 'first_desc 6', 'double': 6, 'value': 3}]
"""
)
expected_output_2 = (
"""[{'day': 'mon', 'description': 'second_desc 4', 'square': 4, 'value': 2},
{'day': 'tue', 'description': 'second_desc 4', 'square': 4, 'value': 2},
{'day': 'wed', 'description': 'second_desc 4', 'square': 4, 'value': 2},
{'day': 'thu', 'description': 'second_desc 4', 'double': 4, 'value': 2},
{'day': 'fri', 'description': 'second_desc 6', 'double': 6, 'value': 3}]
"""
)
expected_output_3 = (
"""[{'day': 'mon', 'description': 'third_desc 9', 'square': 9, 'value': 3},
{'day': 'tue', 'description': 'third_desc 9', 'square': 9, 'value': 3},
{'day': 'wed', 'description': 'third_desc 4', 'square': 4, 'value': 2},
{'day': 'thu', 'description': 'third_desc 4', 'double': 4, 'value': 2},
{'day': 'fri', 'description': 'third_desc 2', 'double': 2, 'value': 1}]
"""
)
missing_days_output = (
"""[{'day': 'mon', 'description': 'third_desc 9', 'square': 9, 'value': 3},
{'day': 'tue', 'description': 'third_desc 9', 'square': 9, 'value': 3},
{'day': 'wed', 'description': 'third_desc 4', 'square': 4, 'value': 2},
{'day': 'fri', 'description': 'third_desc 2', 'double': 2, 'value': 1}]
"""
)
non_ascii_output = (
"""[{'day': 'mon', 'description': 'café 9', 'square': 9, 'value': 3},
{'day': 'tue', 'description': 'café 9', 'square': 9, 'value': 3},
{'day': 'wed', 'description': 'café 4', 'square': 4, 'value': 2},
{'day': 'thu', 'description': 'café 4', 'double': 4, 'value': 2},
{'day': 'fri', 'description': 'café 2', 'double': 2, 'value': 1}]
"""
)
escaped_chars_output = (
"""[{'day': 'mon', 'description': '\\'A\\', "b" 9', 'square': 9, 'value': 3},
{'day': 'tue', 'description': '\\'A\\', "b" 9', 'square': 9, 'value': 3},
{'day': 'wed', 'description': '\\'A\\', "b" 4', 'square': 4, 'value': 2},
{'day': 'thu', 'description': '\\'A\\', "b" 4', 'double': 4, 'value': 2},
{'day': 'fri', 'description': '\\'A\\', "b" 2', 'double': 2, 'value': 1}]
"""
)
test_files = {
os.path.join('csv_files', '1.csv'): expected_output_1,
os.path.join('csv_files', '2.csv'): expected_output_2,
os.path.join('csv_files', '3.csv'): expected_output_3,
os.path.join('csv_files', 'column_no_name.csv'): expected_output_3,
os.path.join('csv_files', 'multiple_rows.csv'): expected_output_3,
os.path.join('csv_files', 'missing_days.csv'): missing_days_output,
os.path.join('csv_files', 'empty.csv'): '[]\n',
os.path.join('csv_files', 'only_headers.csv'): '[]\n',
os.path.join('csv_files', 'non_ascii.csv'): non_ascii_output,
os.path.join('csv_files', 'escaped_chars.csv'): escaped_chars_output,
}
def test_main(capsys):
"""
All the example input files produce the expected output
"""
for filename, expected_output in test_files.items():
main([filename])
out, err = capsys.readouterr()
assert err == ''
assert out == expected_output
def test_main_file_does_not_exist(capsys):
"""
If the input file doesn't exist, we get an error message
"""
filename = 'does_not_exist.csv'
main([filename])
out, err = capsys.readouterr()
assert out == ''
assert err == 'No such file or directory\n'
def test_main_not_int_value(capsys):
"""
If the day value is not an integer, we get an error message
"""
filename = os.path.join('csv_files', 'not_int_value.csv')
main([filename])
out, err = capsys.readouterr()
assert out == ''
assert err == ("Invalid file format: "
"invalid literal for int() with base 10: 'not an int'\n")
def test_get_options():
"""
A valid path argument will produce a valid argparse object
"""
filename = 'anything.csv'
options = get_options([filename])
assert options == Namespace(filename=filename)
def test_get_options_help(capsys):
"""
A '--help' flag shows some usage help
"""
with pytest.raises(SystemExit):
get_options(['--help'])
out, err = capsys.readouterr()
assert err == ''
assert out.startswith('usage:')
assert 'WeekParser' in out
def test_get_options_invalid(capsys):
"""
An invalid sequence of arguments will produce some usage help
"""
with pytest.raises(SystemExit):
get_options(['one_file.csv', 'another_file.csv'])
out, err = capsys.readouterr()
assert out == ''
assert err.startswith('usage:')
assert 'unrecognized argument' in err
```
#### File: week_parser/tests/utils.py
```python
from contextlib import contextmanager
from mock import mock_open as original_mock_open
from mock import patch
@contextmanager
def mock_open(file_content):
"""
Mock '__builtin__.open' with the content provided
Bug work-around: https://bugs.python.org/issue21258
"""
mock = original_mock_open(read_data=file_content)
with patch('six.moves.builtins.open', mock) as mocked_open:
mock.return_value.__iter__ = lambda self: iter(self.readline, '')
yield mocked_open
```
#### File: week_parser/week_parser/base.py
```python
import csv
import six
DAYS = ('mon', 'tue', 'wed', 'thu', 'fri')
SQUARE_DAYS = ('mon', 'tue', 'wed')
DOUBLE_DAYS = ('thu', 'fri')
DAY_TO_NUMBER = {day: i for i, day in enumerate(DAYS)}
NUMBER_TO_DAY = {i: day for i, day in enumerate(DAYS)}
def parse_week(filename):
"""
We open an input filename, parse it and return its data.
"""
week_data = {}
with open(filename) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
week_data = parse_row(row)
return [week_data[day] for day in DAYS if week_data.get(day) is not None]
def parse_row(row):
"""
Parse a row from an input CSV file and return its data.
The expected input format is a dictionary of column_names -> column_values.
"""
week_data = {}
description = None
for column, value in six.iteritems(row):
if column == 'description':
description = value
elif column in DAYS:
week_data[column] = {'day': column, 'value': int(value)}
elif column is not None and '-' in column:
start, end = column.split('-')
start, end = DAY_TO_NUMBER.get(start), DAY_TO_NUMBER.get(end)
if start is not None and end is not None:
for number in six.moves.xrange(start, end + 1):
day = NUMBER_TO_DAY[number]
week_data[day] = {'day': day, 'value': int(value)}
populate_extra_data(week_data, description)
return week_data
def populate_extra_data(week_data, description):
"""
Once the daily data has been collected, we need to append the extra value
and the description to every day.
"""
for day, day_week_data in six.iteritems(week_data):
value = day_week_data['value']
if day in SQUARE_DAYS:
extra_value = value ** 2
day_week_data['square'] = extra_value
elif day in DOUBLE_DAYS:
extra_value = value * 2
day_week_data['double'] = extra_value
day_week_data['description'] = '{} {}'.format(description, extra_value)
``` |
{
"source": "josekudiyirippil/queue-management",
"score": 2
} |
#### File: bookings/exam/exam_download.py
```python
from flask import g, Response
from flask_restx import Resource
import io
import logging
import urllib
from werkzeug.wsgi import FileWrapper
from sqlalchemy import exc
from app.models.theq import CSR
from app.models.bookings import Exam
from app.utilities.bcmp_service import BCMPService
from qsystem import api, my_print
from app.auth.auth import jwt
@api.route("/exams/<int:exam_id>/download/", methods=["GET"])
class ExamStatus(Resource):
bcmp_service = BCMPService()
@jwt.requires_auth
def get(self, exam_id):
csr = CSR.find_by_username(g.jwt_oidc_token_info['username'])
try:
exam = Exam.query.filter_by(exam_id=exam_id).first()
if not (exam.office_id == csr.office_id or csr.ita2_designate == 1):
return {"The Exam Office ID and CSR Office ID do not match!"}, 403
job = self.bcmp_service.check_exam_status(exam)
my_print(job)
if job['jobStatus'] == 'PACKAGE_GENERATED':
package_url = job["jobProperties"]["EXAM_PACKAGE_URL"]
req = urllib.request.Request(package_url)
response = urllib.request.urlopen(req).read()
exam_file = io.BytesIO(response)
file_wrapper = FileWrapper(exam_file)
return Response(file_wrapper,
mimetype="application/pdf",
direct_passthrough=True,
headers={
"Content-Disposition": 'attachment; filename="%s.csv"' % exam.exam_id,
"Content-Type": "application/pdf"
})
else:
return {'message': 'Package not yet generated', 'status': job['jobStatus']}, 400
except exc.SQLAlchemyError as error:
logging.error(error, exc_info=True)
return {'message': 'API is down'}, 500
```
#### File: services/feedback/feedback_camunda_service.py
```python
import os, requests, json
from typing import Dict
from jinja2 import Environment, FileSystemLoader
from .feedback_base_service import FeedbackBaseService
from flask import jsonify
class FeedbackCamundaService(FeedbackBaseService):
"""Implementation from FeedbackService."""
def submit(self, payload):
"""Submit feedback to Camunda API"""
camunda_service_endpoint = os.getenv('FEEDBACK_CAMUNDA_URL')
keycloak_endpoint = os.getenv('FEEDBACK_AUTH_URL')
keycloak_client_id = os.getenv('FEEDBACK_AUTH_CLIENT_ID')
keycloak_client_secret = os.getenv('FEEDBACK_AUTH_CLIENT_SECRET')
auth_payload = {"grant_type":"client_credentials",
"client_id":keycloak_client_id,
"client_secret":keycloak_client_secret}
try:
auth_response = requests.post(keycloak_endpoint,data=auth_payload)
access_token = auth_response.json()['access_token']
headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {access_token}'}
feedback_response = requests.post(camunda_service_endpoint,
headers=headers,
data=json.dumps(payload), timeout=10.0)
response_code = feedback_response.status_code
if (response_code != 200 and response_code != 201 and response_code != 202) :
raise Exception('Camunda API Failure')
return feedback_response.status_code
except Exception as e:
feedback_type = payload['variables']['engagement']['value']
feedback_message = payload['variables']['citizen_comments']['value']
response_required = payload['variables']['response']['value']
citizen_name = payload['variables']['citizen_name']['value']
citizen_contact = payload['variables']['citizen_contact']['value']
citizen_email = payload['variables']['citizen_email']['value']
service_date = payload['variables']['service_date']['value']
submit_date_time = payload['variables']['submit_date_time']['value']
ENV = Environment(loader=FileSystemLoader('.'), autoescape=True)
template = ENV.get_template('camunda_email_template.template')
body = template.render(feedback_type =feedback_type,
feedback_message =feedback_message,
response_required =response_required,
citizen_name =citizen_name,
citizen_contact =citizen_contact,
citizen_email =citizen_email,
service_date =service_date,
submit_date_time =submit_date_time)
application_auth_url = os.getenv('APP_AUTH_URL')
application_client_id = os.getenv('APP_AUTH_CLIENT_ID')
application_client_secret = os.getenv('APP_AUTH_CLIENT_SECRET')
notification_email_url = os.getenv('NOTIFICATION_EMAIL_URL')
email_to = (os.getenv('NOTIFICATION_EMAIL_TO')).split(",")
app_auth_payload = {"grant_type":"client_credentials",
"client_id":application_client_id,
"client_secret":application_client_secret}
email_payload = {
'bodyType': 'text',
'body': body,
'subject': 'Citizen Feedback - Camunda API failure',
'to': email_to
}
app_auth_response = requests.post(application_auth_url,data=app_auth_payload)
app_access_token = app_auth_response.json()['access_token']
email_headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {app_access_token}'}
email_response = requests.post(notification_email_url,
headers=email_headers,
data=json.dumps(email_payload))
print(email_response)
print(e)
return email_response.status_code
```
#### File: jobs/appointment_reminder/flush_expired_draft_appointments.py
```python
from utils.logging import setup_logging
import os
import sys
from flask import Flask
import config
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
"""Return a configured Flask App using the Factory method."""
from qsystem import db
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
app.logger.info(f'<<<< Starting Flush Expired Drafts Jobs >>>>')
db.init_app(app)
register_shellcontext(app)
return app
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'app': app
} # pragma: no cover
app.shell_context_processor(shell_context)
def run():
application = create_app()
application.app_context().push()
flush_drafts(application)
def flush_drafts(app):
app.logger.debug('<<< Starting flush_drafts job')
# todo - get appointment query, for now just select *
return 'todo'
if __name__ == "__main__":
run()
``` |
{
"source": "Joselacerdajunior/digital-processing-of-automotive-plates",
"score": 3
} |
#### File: digital-processing-of-automotive-plates/src/main.py
```python
from lib.myLib import *
import os
nameImages = [
'img1.jpg',
'img2.jpg',
'img3.jpg',
'img4.jpg'
]
platesInDataBase = ["EWK-7037", "RIO2A18"]
plates = ["", "", "", ""]
authorization = ["", "", "", ""]
image = ""
height = [ 0, 0, 0, 0]
width = [ 0, 0, 0, 0]
channels = [ 0, 0, 0, 0]
red = '\033[91m'
green = '\033[92m'
yellow = '\033[93m'
default = '\033[0m'
bold = '\033[1m'
def showInfo():
os.system('cls' if os.name == 'nt' else 'clear')
print('\033[95m' + "{:#^92}".format(" START ") + '\033[0m')
print("")
print("List of authorized plates:")
for index, plate in enumerate(platesInDataBase):
print(" - Plate %d -> %s%s%s" % (index+1, bold, platesInDataBase[index], default))
print("")
print( ".------------------------------------------------------------------------------------------.")
print(f"|" + yellow + "{:^15}".format("Image") + "{:^15}".format("Plate") + "{:^15}".format("Status") + "{:^15}".format("Width") + "{:^15}".format("Height") + "{:^15}".format("Channels") + default + "|")
print( "|------------------------------------------------------------------------------------------|")
print(f"|" + "{:^15}".format(nameImages[0]) + bold + "{:^15}".format(plates[0]) + default + (red if authorization[0] == "Refused" else green) + "{:^15}".format(authorization[0]) + default + "{:^15}".format("%d%s" % (width[0], " px")) + "{:^15}".format("%d%s" % (height[0], " px")) + "{:^15}".format(channels[0]) + "|")
print(f"|" + "{:^15}".format(nameImages[1]) + bold + "{:^15}".format(plates[1]) + default + (red if authorization[1] == "Refused" else green) + "{:^15}".format(authorization[1]) + default + "{:^15}".format("%d%s" % (width[1], " px")) + "{:^15}".format("%d%s" % (height[1], " px")) + "{:^15}".format(channels[1]) + "|")
print(f"|" + "{:^15}".format(nameImages[2]) + bold + "{:^15}".format(plates[2]) + default + (red if authorization[2] == "Refused" else green) + "{:^15}".format(authorization[2]) + default + "{:^15}".format("%d%s" % (width[2], " px")) + "{:^15}".format("%d%s" % (height[2], " px")) + "{:^15}".format(channels[2]) + "|")
print(f"|" + "{:^15}".format(nameImages[3]) + bold + "{:^15}".format(plates[3]) + default + (red if authorization[3] == "Refused" else green) + "{:^15}".format(authorization[3]) + default + "{:^15}".format("%d%s" % (width[3], " px")) + "{:^15}".format("%d%s" % (height[3], " px")) + "{:^15}".format(channels[3]) + "|")
print( "'------------------------------------------------------------------------------------------'")
print()
print('\033[95m' + "{:#^92}".format(" STOP ") + '\033[0m')
for x in range(0, len(nameImages)):
image = cv2.imread('./img/%s' % nameImages[x])
height[x] = image.shape[0]
width[x] = image.shape[1]
channels[x] = image.shape[2]
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
threshImage = thresholding(image)
noiseImage = remove_noise(threshImage)
#cv2.imshow("IMG-%d"%x, noiseImage)
cv2.imwrite("imageToConvert.jpg", noiseImage)
custom_config = r'-c tessedit_char_blacklist=abcdefghijklmnopqrstuvwxyz%#@$“‘\|/ --psm 6'
plateText = pytesseract.image_to_string('imageToConvert.jpg', config=custom_config).replace(" ", "").replace("“", "").replace("\n","")
if (len(plateText) > 8):
plates[x] = plateText[:8]
elif (len(plateText) > 7):
plates[x] = plateText[:7]
else:
print ("Não identificado")
if (plates[x].replace(" ", "") in platesInDataBase):
authorization[x] = "Authorized"
else:
authorization[x] = "Refused"
showInfo()
``` |
{
"source": "joselejinpj/Algorithm",
"score": 4
} |
#### File: Algorithm/Python/RomanToInteger.py
```python
class Solution(object):
def value(self, roman):
if (roman == 'I'):
return 1
if (roman == 'V'):
return 5
if (roman == 'X'):
return 10
if (roman == 'L'):
return 50
if (roman == 'C'):
return 100
if (roman == 'D'):
return 500
if (roman == 'M'):
return 1000
return -1
def romanToInt(self, str):
result = 0
i = 0
while (i < len(str)):
s1 = self.value(str[i])
if (i + 1 < len(str)):
s2 = self.value(str[i + 1])
if (s1 >= s2):
result = result + s1
i = i + 1
else:
result = result + s2 - s1
i = i + 2
else:
result = result + s1
i = i + 1
return result
``` |
{
"source": "JoseleSolis/Proceso-de-aprendizaje",
"score": 3
} |
#### File: Proceso-de-aprendizaje/Agents/adviser.py
```python
from random import *
class Adviser:
def stop(self, count):
if count > 7:
r = random()
if r < 0.5:
return True
if count > 5:
r = random()
if r < 0.35:
return True
return False
```
#### File: Compilation/AST/Evaluate.py
```python
from Parser.ShiftReduce import ShiftReduce
def evaluate_reverse_parse(parser_lr1, operations, tokens):
if not parser_lr1 or not operations or not tokens:
return # Nada que eval!!!!
right_parse = iter(parser_lr1)
tokens = iter(tokens)
stack = []
for operation in operations:
if operation == ShiftReduce.SHIFT:
token = next(tokens)
stack.append(token.value)
elif operation == ShiftReduce.REDUCE:
production = next(right_parse)
head, body = production
attributes = production.attributes
assert all(rule is None for rule in attributes[1:]), 'There must be only synteticed attributes.'
rule = attributes[0]
if len(body):
synteticed = [None] + stack[-len(body):]
value = rule(None, synteticed)
stack[-len(body):] = [value]
else:
stack.append(rule(None, None))
else:
raise Exception('error')
assert len(stack) == 1 and next(tokens).type == 'eof', 'el token final no es eof'
return stack[0]
```
#### File: Compilation/Parser/State.py
```python
class State:
def __init__(self, state, final=False, formatter=lambda x: str(x)):
self.state = state
self.final = final
self.transitions = {}
self.epsilon_transitions = set()
self.tag = None
self.formatter = formatter
def set_formatter(self, value, attr='formatter', visited=None):
if visited is None:
visited = set()
elif self in visited:
return
visited.add(self)
self.__setattr__(attr, value)
for destinations in self.transitions.values():
for node in destinations:
node.set_formatter(value, attr, visited)
for node in self.epsilon_transitions:
node.set_formatter(value, attr, visited)
return self
def add_transition(self, symbol, state):
try:
self.transitions[symbol].append(state)
except KeyError:
self.transitions[symbol] = [state]
return self
def __getitem__(self, symbol):
if symbol == '':
return self.epsilon_transitions
try:
return self.transitions[symbol]
except KeyError:
return None
def __iter__(self):
yield from self._visit()
def _visit(self, visited=None):
if visited is None:
visited = set()
elif self in visited:
return
visited.add(self)
yield self
for destinations in self.transitions.values():
for node in destinations:
yield from node._visit(visited)
for node in self.epsilon_transitions:
yield from node._visit(visited)
def multiline_formatter(state):
return '\n'.join(str(item) for item in state)
```
#### File: Proceso-de-aprendizaje/Entities/activity.py
```python
class Activity:
def __init__(self, name, estimated_time):
self.name = name
self.elements = dict()
self.estimated_time = estimated_time
def add_element(self, element, points):
if not self.elements.keys().__contains__(element):
self.elements.__setitem__(element, points)
return True
return False
```
#### File: Proceso-de-aprendizaje/Entities/element.py
```python
class Element:
dependencies = []
def __init__(self, name):
self.name = name
def add_dependencies(self, *elements):
for element in elements:
if not self.dependencies.__contains__(element):
self.dependencies.append(element)
def remove_dependencies(self, *elements):
for element in elements:
if self.dependencies.__contains__(element):
self.dependencies.remove(element)
``` |
{
"source": "JoseLGomez/detectron2",
"score": 3
} |
#### File: data/transforms/sem_seg_augmentations.py
```python
import numpy as np
import torch
import random
import cv2
from detectron2.data import transforms as T
from fvcore.transforms.transform import Transform
class CutMix(T.Augmentation):
def get_transform(self, image1, image2, label1, label2):
pass
class CutOutPolicy(T.Augmentation):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.min_holes, self.max_holes = n_holes
self.min_length, self.max_length = length
def get_transform(self, image):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
h = image.shape[0]
w = image.shape[1]
holes = np.random.randint(self.min_holes, high=self.max_holes)
mask = np.ones(image.shape, np.float32)
for n in range(holes):
xlength = np.random.randint(self.min_length, high=self.max_length)
ylength = np.random.randint(self.min_length, high=self.max_length)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - ylength // 2, 0, h)
y2 = np.clip(y + ylength // 2, 0, h)
x1 = np.clip(x - xlength // 2, 0, w)
x2 = np.clip(x + xlength // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
return CutOut(mask)
class CutOut(Transform):
def __init__(self, mask):
self.mask = mask
def apply_image(self, img, seg_mode=False):
if seg_mode:
img = img * self.mask[:,:,0]
img = img + ((1-self.mask[:,:,0])*200) # CutOut pixels set to 200 to detect them later and create a mask for the loss
else:
img = img * self.mask
return img
def apply_segmentation(self, segmentation):
return segmentation #self.apply_image(segmentation, seg_mode=True)
def apply_coords(self, coords):
return coords
class TrainScalePolicy(T.Augmentation):
def __init__(self, train_scale):
self.lscale, self.hscale = train_scale
def get_transform(self, image):
f_scale = self.lscale + random.randint(0, int((self.hscale-self.lscale)*10)) / 10.0
return TrainScale(f_scale)
class TrainScale(Transform):
def __init__(self, f_scale):
self.f_scale = f_scale
def apply_image(self, image):
image = cv2.resize(image, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_LINEAR)
return image
def apply_segmentation(self, segmentation):
segmentation = cv2.resize(segmentation, None, fx=self.f_scale, fy=self.f_scale, interpolation = cv2.INTER_NEAREST)
return segmentation
def apply_coords(self, coords):
return coords
```
#### File: detectron2/evaluation/sem_seg_evaluation.py
```python
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import PIL.Image as Image
import pycocotools.mask as mask_util
import torch
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
class SemSegEvaluator(DatasetEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(self, dataset_name, distributed=True, output_dir=None, *, num_classes=None, ignore_label=None, write_outputs=False):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
distributed (bool): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): an output directory to dump results.
num_classes, ignore_label: deprecated argument
"""
self._logger = logging.getLogger(__name__)
if num_classes is not None:
self._logger.warn(
"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
)
if ignore_label is not None:
self._logger.warn(
"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
)
self._dataset_name = dataset_name
self._distributed = distributed
self._output_dir = output_dir
self._write_outputs = write_outputs
self._cpu_device = torch.device("cpu")
self.input_file_to_gt_file = {
dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
for dataset_record in DatasetCatalog.get(dataset_name)
}
meta = MetadataCatalog.get(dataset_name)
# Dict that maps contiguous training ids to COCO category ids
try:
c2d = meta.stuff_dataset_id_to_contiguous_id
self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
except AttributeError:
self._contiguous_id_to_dataset_id = None
self._class_names = meta.stuff_classes
self._num_classes = len(meta.stuff_classes)
if num_classes is not None:
assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
from cityscapesscripts.helpers.labels import trainId2label
pred_output = os.path.join(self._output_dir, 'predictions')
if not os.path.exists(pred_output):
os.makedirs(pred_output)
pred_colour_output = os.path.join(self._output_dir, 'colour_predictions')
if not os.path.exists(pred_colour_output):
os.makedirs(pred_colour_output)
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=np.uint8)
pred64 = np.array(output, dtype=np.int64) # to use it on bitcount for conf matrix
with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f:
gt = np.array(Image.open(f), dtype=np.int64)
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred64.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._write_outputs:
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(pred_output, basename + '.png')
Image.fromarray(pred).save(pred_filename)
# colour prediction
output = output.numpy()
pred_colour_filename = os.path.join(pred_colour_output, basename + '.png')
pred_colour = 255 * np.ones([output.shape[0],output.shape[1],3], dtype=np.uint8)
for train_id, label in trainId2label.items():
#if label.ignoreInEval:
# continue
#pred_colour[np.broadcast_to(output == train_id, pred_colour.shape)] = 0 #label.color
pred_colour[(output == train_id),0] = label.color[0]
pred_colour[(output == train_id),1] = label.color[1]
pred_colour[(output == train_id),2] = label.color[2]
Image.fromarray(pred_colour).save(pred_colour_filename)
#self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
if self._distributed:
synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
self._predictions = all_gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not is_main_process():
return
self._conf_matrix = np.zeros_like(self._conf_matrix)
for conf_matrix in conf_matrix_list:
self._conf_matrix += conf_matrix
'''if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._predictions))'''
print(self._conf_matrix)
acc = np.full(self._num_classes, np.nan, dtype=np.float)
iou = np.full(self._num_classes, np.nan, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
pacc = np.sum(tp) / np.sum(pos_gt)
res = {}
res["mIoU"] = 100 * miou
res["fwIoU"] = 100 * fiou
for i, name in enumerate(self._class_names):
res["IoU-{}".format(name)] = 100 * iou[i]
res["mACC"] = 100 * macc
res["pACC"] = 100 * pacc
for i, name in enumerate(self._class_names):
res["ACC-{}".format(name)] = 100 * acc[i]
'''if self._output_dir:
file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(res, f)'''
results = OrderedDict({"sem_seg": res})
self._logger.info(results)
return results
def encode_json_sem_seg(self, sem_seg, input_file_name):
"""
Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.
See http://cocodataset.org/#format-results
"""
json_list = []
for label in np.unique(sem_seg):
if self._contiguous_id_to_dataset_id is not None:
assert (
label in self._contiguous_id_to_dataset_id
), "Label {} is not in the metadata info for {}".format(label, self._dataset_name)
dataset_id = self._contiguous_id_to_dataset_id[label]
else:
dataset_id = int(label)
mask = (sem_seg == label).astype(np.uint8)
mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0]
mask_rle["counts"] = mask_rle["counts"].decode("utf-8")
json_list.append(
{"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle}
)
return json_list
```
#### File: modeling/custom_models/deeplabV2_bis.py
```python
from __future__ import absolute_import, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Callable, Dict, Optional, Tuple, Union
from PIL import Image
from .resnet_deeplabV2_bis import _ConvBnReLU, _ResLayer, _Stem
from ..backbone import (Backbone, BACKBONE_REGISTRY)
from ..meta_arch import SEM_SEG_HEADS_REGISTRY
from detectron2.layers import ShapeSpec
class _ASPP(nn.Module):
"""
Atrous spatial pyramid pooling (ASPP)
"""
def __init__(self, in_ch, out_ch, rates):
super(_ASPP, self).__init__()
for i, rate in enumerate(rates):
self.add_module(
"c{}".format(i),
nn.Conv2d(in_ch, out_ch, 3, 1, padding=rate, dilation=rate, bias=True),
)
for m in self.children():
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x):
return sum([stage(x) for stage in self.children()])
@BACKBONE_REGISTRY.register()
class DeepLabV2_backbone(nn.Sequential):
"""
DeepLab v2: Dilated ResNet + ASPP
Output stride is fixed at 8
"""
def __init__(self, cfg, input_shape):
super(DeepLabV2_backbone, self).__init__()
n_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
n_blocks = [3, 4, 23, 3]
atrous_rates = [6, 12, 18, 24]
self.ch = [64 * 2 ** p for p in range(6)]
self.size_divisibility = 0
self.add_module("layer1", _Stem(self.ch[0]))
self.add_module("layer2", _ResLayer(n_blocks[0], self.ch[0], self.ch[2], 1, 1))
self.add_module("layer3", _ResLayer(n_blocks[1], self.ch[2], self.ch[3], 2, 1))
self.add_module("layer4", _ResLayer(n_blocks[2], self.ch[3], self.ch[4], 1, 2))
self.add_module("layer5", _ResLayer(n_blocks[3], self.ch[4], self.ch[5], 1, 4))
self.add_module("aspp", _ASPP(self.ch[5], n_classes, atrous_rates))
def output_shape(self):
return {"aspp": ShapeSpec(channels=self.ch[5], stride=1)}
'''def freeze_bn(self):
for m in self.modules():
if isinstance(m, _ConvBnReLU.BATCH_NORM):
m.eval()'''
@SEM_SEG_HEADS_REGISTRY.register()
class DeepLabV2_head(nn.Module):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT
def forward(self, logits, targets=None, masks=None):
iter_loss = 0
print(logits.shape)
for logit in logits:
# Resize labels for {100%, 75%, 50%, Max} logits
_, H, W = logit.shape
print(H)
print(W)
labels_ = self.resize_labels(targets, size=(W, H))
iter_loss += self.losses(logit, labels_)
def losses(self, predictions, targets, masks=None):
print(predictions.shape)
print(targets.shape)
if masks is not None:
for idx in range(len(predictions)):
aux_mask = masks[idx].unsqueeze(0).expand(predictions[idx].size())
predictions[idx] = predictions[idx] * aux_mask
loss = F.cross_entropy(
predictions, targets, reduction="mean", ignore_index=self.ignore_value
)
losses = {"loss_sem_seg": loss * self.loss_weight}
return losses
def resize_labels(self, labels, size):
"""
Downsample labels for 0.5x and 0.75x logits by nearest interpolation.
Other nearest methods result in misaligned labels.
-> F.interpolate(labels, shape, mode='nearest')
-> cv2.resize(labels, shape, interpolation=cv2.INTER_NEAREST)
"""
new_labels = []
for label in labels:
label = label.cpu().float().numpy()
label = Image.fromarray(label).resize(size, resample=Image.NEAREST)
new_labels.append(np.asarray(label))
new_labels = torch.LongTensor(new_labels)
return new_labels
if __name__ == "__main__":
model = DeepLabV2(
n_classes=21, n_blocks=[3, 4, 23, 3], atrous_rates=[6, 12, 18, 24]
)
model.eval()
image = torch.randn(1, 3, 513, 513)
print(model)
print("input:", image.shape)
print("output:", model(image).shape)
```
#### File: detectron2/tools/sem_seg_selftraining.py
```python
import sys
import glob
import random
import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import numpy as np
import time
import math
import PIL.Image as Image
import datetime
import itertools
import gc
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import detectron2.data.transforms as T
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
DatasetMapper,
)
from detectron2.engine import default_argument_parser, default_setup, default_writers, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.generic_sem_seg_dataset import load_dataset_from_txt, load_dataset_to_inference, load_dataset_from_txt_and_merge
from torch import nn
import torch
from contextlib import ExitStack, contextmanager
from cityscapesscripts.helpers.labels import trainId2label, labels
from detectron2.utils.logger import log_every_n_seconds
from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler
logger = logging.getLogger("detectron2")
INIT_TGT_PORT = 0.2
MAX_TGT_PORT = 0.5
TGT_PORT_STEP = 0.05
DS_RATE = 4
softmax2d = nn.Softmax2d()
def cotraining_argument_parser(parser):
# Adds cotrainig arguments to the detectron2 base parser
parser.add_argument(
'--unlabeled_dataset_A',
dest='unlabeled_dataset_A',
help='File with Data A images',
default=None,
type=str
)
parser.add_argument(
'--unlabeled_dataset_A_name',
dest='unlabeled_dataset_A_name',
help='Unlabeled dataset name to call dataloader function',
default=None,
type=str
)
parser.add_argument(
'--weights_branchA',
dest='weights_branchA',
help='Weights File of branch1',
default=None,
type=str
)
parser.add_argument(
'--num-epochs',
dest='epochs',
help='Number of selftraining rounds',
default=20,
type=int
)
parser.add_argument(
'--max_unlabeled_samples',
dest='max_unlabeled_samples',
help='Number of maximum unlabeled samples',
default=500,
type=int
)
parser.add_argument(
'--samples',
dest='samples',
help='Number of top images to be sampled after each iteration',
default=40,
type=int
)
parser.add_argument(
'--step_inc',
dest='step_inc',
help='Fix a image step to avoid consecutive images in secuences',
default=1,
type=int
)
parser.add_argument(
'--continue_epoch',
dest='continue_epoch',
help='Continue co-training at the begining of the specified epoch',
default=0,
type=int
)
parser.add_argument(
'--continue_training',
action='store_true'
)
parser.add_argument(
'--scratch_training',
help='Use pretrained model for training in each epoch',
action='store_true'
)
parser.add_argument(
'--best_model',
help='Use the best model obtained during the epochs',
action='store_true'
)
parser.add_argument(
'--fp_annot',
action='store_true'
)
parser.add_argument(
'--initial_score_A',
dest='initial_score_A',
help='Initial score to reach to propagate weights to the next epoch',
default=0,
type=float
)
parser.add_argument(
'--seed',
dest='seed',
help='Set a prefixed seed to random select the unlabeled data. Useful to replicate experiments',
default=None,
type=int
)
return parser
def print_txt_format(results_dict, iter_name, epoch, output, model_id):
with open(os.path.join(output,'results.txt'),"a+") as f:
print('----- Epoch: %s iteration: %s Model: %s -----' % (epoch,iter_name,model_id))
f.write('----- Epoch: %s iteration: %s Model: %s ----- \n' % (epoch,iter_name,model_id))
for k, v in results_dict['sem_seg'].items():
if 'IoU' in k:
print('%s: %.4f' % (k, v))
f.write('%s: %.4f \n' % (k, v))
print('\n')
f.write('\n')
def built_custom_dataset(cfg, image_dir, gt_dir, dataset_name, add_pseudolabels=False, pseudo_img_dir=None, pseudo_dir=None, test=False):
if add_pseudolabels and pseudo_img_dir is not None and pseudo_dir is not None:
DatasetCatalog.register(
dataset_name, lambda x1=image_dir, x2=pseudo_img_dir, y1=gt_dir, y2=pseudo_dir: load_dataset_from_txt_and_merge(x1, x2, y1, y2, num_samples=cfg.DATASETS.TRAIN_SAMPLES)
)
else:
if test:
DatasetCatalog.register(
dataset_name, lambda x=image_dir, y=gt_dir: load_dataset_from_txt(x, y)
)
else:
DatasetCatalog.register(
dataset_name, lambda x=image_dir, y=gt_dir: load_dataset_from_txt(x, y, num_samples=cfg.DATASETS.TRAIN_SAMPLES)
)
if cfg.DATASETS.LABELS == 'cityscapes':
MetadataCatalog.get(dataset_name).stuff_classes = [k.name for k in labels if k.trainId < 19 and k.trainId > -1]
MetadataCatalog.get(dataset_name).stuff_colors = [k.color for k in labels if k.trainId < 19 and k.trainId > -1]
else:
raise Exception('Unsupported label set')
MetadataCatalog.get(dataset_name).set(
image_dir=image_dir,
gt_dir=gt_dir,
evaluator_type="generic_sem_seg",
ignore_label=255,
)
def built_inference_dataset(cfg, im_list, dataset_name):
DatasetCatalog.register(
dataset_name, lambda x=im_list: load_dataset_to_inference(x)
)
MetadataCatalog.get(dataset_name).set(
image_dir=im_list,
evaluator_type="generic_sem_seg",
ignore_label=255,
)
def build_sem_seg_train_aug(cfg, augmentation):
augs = []
if cfg.INPUT.ACTIVATE_MIN_SIZE_TRAIN:
augs.append(T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING))
if cfg.INPUT.RESIZED:
augs.append(T.Resize(cfg.INPUT.RESIZE_SIZE))
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop_CategoryAreaConstraint(
cfg.INPUT.CROP.TYPE,
cfg.INPUT.CROP.SIZE,
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE))
if augmentation['hflip']:
augs.append(T.RandomFlip(prob=augmentation['hflip_prob'], horizontal=True, vertical=False))
if augmentation['vflip']:
augs.append(T.RandomFlip(prob=augmentation['vflip_prob'], horizontal=False, vertical=True))
if augmentation['cutout']:
augs.append(T.CutOutPolicy(augmentation['cutout_n_holes'], augmentation['cutout_length']))
if augmentation['random_resize']:
augs.append(T.TrainScalePolicy(augmentation['resize_range']))
return augs
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name, output_folder=output_folder)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if evaluator_type == "generic_sem_seg":
return SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder, write_outputs=False)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test_txt(cfg, model, dataset_name, step_iter, epoch, model_id):
results = OrderedDict()
dataset: List[Dict] = DatasetCatalog.get(dataset_name)
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name, str(step_iter))
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
print_txt_format(results_i, step_iter, epoch, cfg.OUTPUT_DIR, model_id)
'''if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)'''
if len(results) == 1:
results = list(results.values())[0]
return results
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
def inference_on_imlist(cfg, model, weights, dataset_name):
# Following the same detectron2.evaluation.inference_on_dataset function
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).load(weights)
data_loader = build_detection_test_loader(cfg, dataset_name)
total = len(data_loader)
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
with ExitStack() as stack:
if isinstance(model, nn.Module):
stack.enter_context(inference_context(model))
stack.enter_context(torch.no_grad())
outputs = []
pred_cls_num = np.zeros(num_classes)
for idx, inputs in enumerate(data_loader):
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
batch_outputs = model(inputs)
for output in batch_outputs:
# Saving indexes and values of maximums instead the 20 channels scores to save memory
output = output['sem_seg']
output = torch.unsqueeze(output, 0)
output = softmax2d(output)
output = torch.squeeze(output)
output = output.cpu().numpy()
amax_output = np.asarray(np.argmax(output, axis=0), dtype=np.uint8)
conf = np.amax(output,axis=0)
outputs.append([amax_output, conf])
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
"Inference done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
return outputs
def do_train(cfg, model, weights, train_dataset_name, test_dataset_name, model_id, save_checkpoints_path, epoch,
continue_epoch, resume=False, dataset_pseudolabels=None):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, save_checkpoints_path, optimizer=optimizer, scheduler=scheduler
)
max_iter = cfg.SOLVER.MAX_ITER
if resume:
start_iter = (
checkpointer.resume_or_load(weights, resume=resume).get("iteration", -1) + 1
)
else:
checkpointer.resume_or_load(weights)
start_iter = 0
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
#Data aug mapper
augmentations = {}
if model_id.lower() == 'a':
augmentations = {'cutout': cfg.AUGMENTATION_A.CUTOUT,
'cutout_n_holes': cfg.AUGMENTATION_A.CUTOUT_N_HOLES,
'cutout_length': cfg.AUGMENTATION_A.CUTOUT_LENGTH,
'hflip': cfg.AUGMENTATION_A.HFLIP,
'hflip_prob': cfg.AUGMENTATION_A.HFLIP_PROB,
'vflip': cfg.AUGMENTATION_A.VFLIP,
'vflip_prob': cfg.AUGMENTATION_A.VFLIP_PROB,
'random_resize': cfg.AUGMENTATION_A.RANDOM_RESIZE,
'resize_range': cfg.AUGMENTATION_A.RESIZE_RANGE}
elif model_id.lower() == 'b':
augmentations = {'cutout': cfg.AUGMENTATION_B.CUTOUT,
'cutout_n_holes': cfg.AUGMENTATION_B.CUTOUT_N_HOLES,
'cutout_length': cfg.AUGMENTATION_B.CUTOUT_LENGTH,
'hflip': cfg.AUGMENTATION_B.HFLIP,
'hflip_prob': cfg.AUGMENTATION_B.HFLIP_PROB,
'vflip': cfg.AUGMENTATION_B.VFLIP,
'vflip_prob': cfg.AUGMENTATION_B.VFLIP_PROB,
'random_resize': cfg.AUGMENTATION_B.RANDOM_RESIZE,
'resize_range': cfg.AUGMENTATION_B.RESIZE_RANGE}
else:
raise NotImplementedError('Unknown model id for data augmentation')
if "SemanticSegmentor" in cfg.MODEL.META_ARCHITECTURE:
mapper = DatasetMapper(cfg, is_train=True, augmentations=build_sem_seg_train_aug(cfg, augmentations))
else:
mapper = None
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement in a small training loop
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
results_list = []
if cfg.SOLVER.ALTERNATE_SOURCE_PSEUDOLABELS and dataset_pseudolabels is not None:
dataset: List[Dict] = DatasetCatalog.get(train_dataset_name)
data_loader = build_detection_train_loader(cfg, dataset=dataset, mapper=mapper, total_batch_size=cfg.SOLVER.SOURCE_PSEUDOLABELS_BATCH_RATIO[0])
dataset_pseudo: List[Dict] = DatasetCatalog.get(dataset_pseudolabels)
data_loader_pseudo = build_detection_train_loader(cfg, dataset=dataset_pseudo, mapper=mapper, total_batch_size=cfg.SOLVER.SOURCE_PSEUDOLABELS_BATCH_RATIO[1])
results_list = training_loop_mixdatasets(cfg, model, start_iter, max_iter, data_loader, data_loader_pseudo, storage, optimizer, scheduler, periodic_checkpointer, writers, test_dataset_name, epoch, model_id)
else:
dataset: List[Dict] = DatasetCatalog.get(train_dataset_name)
data_loader = build_detection_train_loader(cfg, dataset=dataset, mapper=mapper, total_batch_size=cfg.SOLVER.IMS_PER_BATCH)
results_list = training_loop(cfg, model, start_iter, max_iter, data_loader, storage, optimizer, scheduler, periodic_checkpointer, writers, test_dataset_name, epoch, model_id)
return results_list
def training_loop(cfg, model, start_iter, max_iter, data_loader, storage, optimizer, scheduler, periodic_checkpointer, writers, test_dataset_name, epoch, model_id):
results_list = []
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
#print(data[0]['file_name'])
#print('%s x %s' % (data[0]['height'], data[0]['width']))
storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
):
results = do_test_txt(cfg, model, test_dataset_name, iteration+1, epoch, model_id)
results_list.append([results['sem_seg']['mIoU'],iteration])
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
if iteration - start_iter > 5 and (
(iteration + 1) % 20 == 0 or iteration == max_iter - 1
):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
return results_list
def training_loop_mixdatasets(cfg, model, start_iter, max_iter, data_loader, data_loader_pseudo, storage, optimizer, scheduler, periodic_checkpointer, writers, test_dataset_name, epoch, model_id):
''' Training loop that mixes two dataloaders to compose the final batch with the proportion specified'''
results_list = []
for data1, data2, iteration in zip(data_loader, data_loader_pseudo, range(start_iter, max_iter)):
#print(data[0]['file_name'])
#print('%s x %s' % (data[0]['height'], data[0]['width']))
storage.iter = iteration
data = data1+data2
loss_dict = model(data)
del data
gc.collect()
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
):
results = do_test_txt(cfg, model, test_dataset_name, iteration+1, epoch, model_id)
results_list.append([results['sem_seg']['mIoU'],iteration])
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
if iteration - start_iter > 5 and (
(iteration + 1) % 20 == 0 or iteration == max_iter - 1
):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
return results_list
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if args.unlabeled_dataset_A is not None:
cfg.DATASETS.UNLABELED_DATASET_A = args.unlabeled_dataset_A
if args.weights_branchA is not None:
cfg.MODEL.WEIGHTS_BRANCH_A = args.weights_branchA
if args.max_unlabeled_samples is not None:
cfg.DATASETS.MAX_UNLABELED_SAMPLES = args.max_unlabeled_samples
cfg.freeze()
default_setup(
cfg, args
)
return cfg
def get_unlabeled_data(unlabeled_dataset, step_inc, seed, samples):
with open(unlabeled_dataset,'r') as f:
im_list = [line.rstrip().split(' ') for line in f.readlines()]
im_list.sort()
init_indx = random.randrange(0, step_inc)
indx_sampled = np.asarray(range(init_indx, len(im_list), step_inc), dtype=int)
im_list = np.asarray(im_list)[indx_sampled]
random.seed(seed)
im_list = random.sample(im_list.tolist(), min(len(im_list), samples))
return im_list
def prepare_confidence_maps(outputs, num_classes):
conf_dict = {k: [] for k in range(num_classes)}
pred_cls_num = np.zeros(num_classes)
for idx, output in enumerate(outputs):
amax_output = output[0]
conf = output[1]
pred_label = amax_output.copy()
for idx_cls in range(num_classes):
idx_temp = pred_label == idx_cls
pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp)
if idx_temp.any():
conf_cls_temp = conf[idx_temp].astype(np.float32)
len_cls_temp = conf_cls_temp.size
# downsampling by ds_rate
conf_cls = conf_cls_temp[0:len_cls_temp:DS_RATE]
conf_dict[idx_cls].extend(conf_cls)
return conf_dict, pred_cls_num
def compute_kc_thresholds(conf_dict, pred_cls_num, tgt_portion, num_classes):
# threshold for each class
cls_thresh = np.ones(num_classes,dtype = np.float32)
cls_sel_size = np.zeros(num_classes, dtype=np.float32)
cls_size = np.zeros(num_classes, dtype=np.float32)
for idx_cls in np.arange(0, num_classes):
cls_size[idx_cls] = pred_cls_num[idx_cls]
if conf_dict[idx_cls] != None:
conf_dict[idx_cls].sort(reverse=True) # sort in descending order
len_cls = len(conf_dict[idx_cls])
cls_sel_size[idx_cls] = int(math.floor(len_cls * tgt_portion))
len_cls_thresh = int(cls_sel_size[idx_cls])
if len_cls_thresh != 0:
cls_thresh[idx_cls] = conf_dict[idx_cls][len_cls_thresh-1]
conf_dict[idx_cls] = None
logger.info("CBST thresholds: {}".format(cls_thresh))
return cls_thresh
def generate_pseudolabels(outputs, cls_thresh, tgt_num, num_classes):
pseudolabels = []
pseudolabels_not_filtered = []
scores_list = []
'''print(tgt_num)
print(cls_thresh)
exit(-1)'''
for idx in range(tgt_num):
pseudolabels_not_filtered.append(outputs[idx][0])
pseudolabel = outputs[idx][0].copy()
pred_conf = outputs[idx][1]
weighted_conf = np.zeros(pred_conf.shape,dtype=np.float32)
for idx in range(len(cls_thresh)):
weighted_conf = weighted_conf + (pred_conf*(pseudolabel == idx)/cls_thresh[idx])
pseudolabel[weighted_conf <= 1] = 19 # '255' in cityscapes indicates 'unlabaled' for trainIDs
pseudolabels.append(pseudolabel)
#Compute image score using the mean of the weighted confidence pixels values higher than the threshold cls_thresh
classes_id, pixel_count = np.unique(pseudolabel, return_counts=True)
weighted_conf[weighted_conf <= 1] = np.nan
score = np.nanmean(weighted_conf)
# create aux array for scores and pixel per class count
aux_scores = np.zeros((num_classes+1), dtype=np.float32)
aux_scores[-1] = score
for idx, class_id in enumerate(classes_id):
aux_scores[class_id] = pixel_count[idx]
scores_list.append(aux_scores)
return pseudolabels, scores_list, pseudolabels_not_filtered
def apply_cbst(outputs, num_classes, tgt_num, tgt_portion):
conf_dict, pred_cls_num = prepare_confidence_maps(outputs, num_classes)
cls_thresh = compute_kc_thresholds(conf_dict, pred_cls_num, tgt_portion, num_classes)
pseudolabels, scores_list, pseudolabels_not_filtered = generate_pseudolabels(outputs, cls_thresh, tgt_num, num_classes)
return pseudolabels, scores_list, pseudolabels_not_filtered
def compute_mtp_thresholds(pred_conf, pred_cls_num, tgt_portion, num_classes):
thres = []
for i in range(num_classes):
x = pred_conf[pred_cls_num==i]
if len(x) == 0:
thres.append(0)
continue
x = np.sort(x)
thres.append(x[np.int(np.round(len(x)*tgt_portion))])
thres = np.array(thres)
thres[thres>0.90]=0.90
return thres
def apply_mtp(outputs, num_classes, tgt_num, tgt_portion):
pred_cls_num = np.zeros((tgt_num, outputs[0][0].shape[0], outputs[0][0].shape[1]), dtype=np.uint8)
pred_conf = np.zeros((tgt_num, outputs[0][0].shape[0], outputs[0][0].shape[1]), dtype=np.float32)
for index, output in enumerate(outputs):
pred_cls_num[index] = output[0]
pred_conf[index] = output[1]
thres = compute_mtp_thresholds(pred_conf, pred_cls_num, tgt_portion, num_classes)
logger.info("MPT thresholds: {}".format(thres))
pseudolabels = []
pseudolabels_not_filtered = []
scores_list = []
for index in range(tgt_num):
pseudolabels_not_filtered.append(pred_cls_num[index])
label = pred_cls_num[index].copy()
prob = pred_conf[index]
for i in range(num_classes):
label[(prob<thres[i])*(label==i)] = 19 # '255' in cityscapes indicates 'unlabaled' for trainIDs
prob[(prob<thres[i])*(label==i)] = np.nan
pseudolabels.append(label)
score = np.nanmean(prob)
scores_list.append(score)
return pseudolabels, scores_list, pseudolabels_not_filtered
def create_folder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def save_pseudolabels(images, pseudolabels, scores, pseudolabels_path, coloured_pseudolabels_path,
pseudolabels_not_filtered=None, coloured_pseudolabels_not_filtered_path=None):
filenames_and_scores = os.path.join('/'.join(pseudolabels_path.split('/')[:-1]),'filenames_and_scores.txt')
images_txt = os.path.join('/'.join(pseudolabels_path.split('/')[:-1]),'selected_images_path.txt')
psedolabels_txt = os.path.join('/'.join(pseudolabels_path.split('/')[:-1]),'selected_pseudolabels_path.txt')
with open(filenames_and_scores,'w') as f:
with open(images_txt,'w') as g:
with open(psedolabels_txt,'w') as h:
for idx, image in enumerate(images):
filename = image[0].split('/')[-1]
Image.fromarray(pseudolabels[idx]).save(os.path.join(pseudolabels_path,filename))
pred_colour = 255 * np.ones([pseudolabels[idx].shape[0],pseudolabels[idx].shape[1],3], dtype=np.uint8)
for train_id, label in trainId2label.items():
pred_colour[(pseudolabels[idx] == train_id),0] = label.color[0]
pred_colour[(pseudolabels[idx] == train_id),1] = label.color[1]
pred_colour[(pseudolabels[idx] == train_id),2] = label.color[2]
Image.fromarray(pred_colour).save(os.path.join(coloured_pseudolabels_path,filename))
if pseudolabels_not_filtered is not None and coloured_pseudolabels_not_filtered_path is not None:
pred_colour = 255 * np.ones([pseudolabels_not_filtered[idx].shape[0],pseudolabels_not_filtered[idx].shape[1],3], dtype=np.uint8)
for train_id, label in trainId2label.items():
pred_colour[(pseudolabels_not_filtered[idx] == train_id),0] = label.color[0]
pred_colour[(pseudolabels_not_filtered[idx] == train_id),1] = label.color[1]
pred_colour[(pseudolabels_not_filtered[idx] == train_id),2] = label.color[2]
Image.fromarray(pred_colour).save(os.path.join(coloured_pseudolabels_not_filtered_path,filename))
#Create txt with files names and scores
f.write('%s %s %s %s\n' % (filename, str(scores[idx][-1]), str(scores[idx][-2]), str(np.count_nonzero(scores[idx][:19]))))
g.write('%s\n' % (image[0]))
h.write('%s\n' % (os.path.join(pseudolabels_path,filename)))
return images_txt, psedolabels_txt, filenames_and_scores
def merge_txts_and_save(new_txt, txt1, txt2=None):
if txt2 is not None:
files = [txt1, txt2]
else:
files = [txt1]
with open(new_txt, 'w') as f:
for file in files:
with open(file) as infile:
for line in infile:
f.write(line)
return new_txt
def update_best_score_txts_and_save(accum_scores_txt, accum_images_txt, accum_labels_txt, new_scores_txt,
new_images_txt, new_labels_txt, save_img_txt, save_labels_txt, save_scores_txt, sorting_method):
with open(accum_scores_txt,'r') as f:
accum_scores = [line.rstrip().split(' ') for line in f.readlines()]
with open(new_scores_txt,'r') as f:
new_scores_txt = [line.rstrip().split(' ') for line in f.readlines()]
with open(accum_images_txt,'r') as f:
accum_images = [line.rstrip().split(' ') for line in f.readlines()]
with open(new_images_txt,'r') as f:
new_images = [line.rstrip().split(' ') for line in f.readlines()]
with open(accum_labels_txt,'r') as f:
accum_labels = [line.rstrip().split(' ') for line in f.readlines()]
with open(new_labels_txt,'r') as f:
new_labels = [line.rstrip().split(' ') for line in f.readlines()]
ignore_list = []
# Check for repeated images
for idx, score in enumerate(new_scores_txt):
for idx2, score2 in enumerate(accum_scores):
if score[0] == score2[0]:
# Depending of the sorting method we use scores or number of void pixel to update
if sorting_method == 'per_class' or sorting_method == 'per_void_pixels':
check = score[2] < score2[2]
else:
check = score[1] > score2[1]
if check:
# If we found the same image with better score we updated values in all the acumulated lists
accum_scores[idx2][1] = score[1]
accum_scores[idx2][2] = score[2]
accum_scores[idx2][3] = score[3]
accum_labels[idx2] = new_labels[idx]
# we store the index to do not add it again later
ignore_list.append(idx)
break
# add new images into the accumulated ones
for idx, score in enumerate(new_scores_txt):
if idx not in ignore_list:
accum_scores.append(score)
accum_labels.append(new_labels[idx])
accum_images.append(new_images[idx])
# save each data in its respective txt
new_img_dataset = open(save_img_txt,'w')
new_labels_dataset = open(save_labels_txt,'w')
new_scores_dataset = open(save_scores_txt,'w')
for idx, _ in enumerate(accum_scores):
new_img_dataset.write(accum_images[idx][0] + '\n')
new_labels_dataset.write(accum_labels[idx][0] + '\n')
new_scores_dataset.write(accum_scores[idx][0] + ' ' + accum_scores[idx][1] + '\n')
new_img_dataset.close()
new_labels_dataset.close()
new_scores_dataset.close()
return save_img_txt, save_labels_txt, save_scores_txt
def sorting_scores(scores, sorting_method, selftraining=False):
if sorting_method == 'per_class':
sorted_idx = np.lexsort((scores[:,20],np.count_nonzero(scores[:,:19], axis=1)))[::-1]
elif sorting_method == 'per_void_pixels':
# Sorting by number of void pixels (lower to higher)
sorted_idx = np.argsort(scores[:,-2])
else:
# Sorting by confidence (lower to higher for cotraining)
sorted_idx = np.argsort(scores[:,-1])
if selftraining:
# (higher to lower for selftraining)
sorted_idx = sorted_idx[::-1][:len(scores_listA)]
return sorted_idx
def main(args):
cfg = setup(args)
continue_epoch = args.continue_epoch
accumulated_selection_imgA = []
accumulated_selection_pseudoA = []
accumulated_acores_A = []
pseudolabeling = cfg.PSEUDOLABELING.MODE
collaboration = cfg.PSEUDOLABELING.COLLABORATION
accumulation_mode = cfg.PSEUDOLABELING.ACCUMULATION
num_selected = cfg.PSEUDOLABELING.NUMBER
weights_inference_branchA = cfg.MODEL.WEIGHTS_BRANCH_A
weights_train_branchA = cfg.MODEL.WEIGHTS_BRANCH_A
if pseudolabeling == 'cbst':
tgt_portion = INIT_TGT_PORT
# Set initial scores to surpass during an epoch to propagate weghts to the next one
best_score_A = args.initial_score_A
# Build test dataset
built_custom_dataset(cfg, cfg.DATASETS.TEST_IMG_TXT, cfg.DATASETS.TEST_GT_TXT, cfg.DATASETS.TEST_NAME, test=True)
# set a seed for the unlabeled data selection
if args.seed is not None:
seed = args.seed
else:
seed = random.randrange(sys.maxsize)
# Start self-training
for epoch in range(args.continue_epoch,args.epochs):
logger.info("Starting training from iteration {}".format(epoch))
# prepare unlabeled data
logger.info("prepare unlabeled data")
seed = seed + epoch
logger.info("Seed for unlabeled data {}".format(seed))
# Read unlabeled data from the txt specified and select randomly X samples defined on max_unlabeled_samples
unlabeled_datasetA = get_unlabeled_data(cfg.DATASETS.UNLABELED_DATASET_A, args.step_inc, seed, cfg.DATASETS.MAX_UNLABELED_SAMPLES)
logger.info("Unlabeled data selected from {}: {}".format(cfg.DATASETS.UNLABELED_DATASET_A,len(unlabeled_datasetA)))
# Regiter unlabeled dataset on detectron 2
built_inference_dataset(cfg, unlabeled_datasetA, args.unlabeled_dataset_A_name)
# Compute inference on unlabeled datasets
model = build_model(cfg)
logger.info("Compute inference on unlabeled datasets")
start_time = time.perf_counter()
# Inference return a tuple of labels and confidences
inference_A = inference_on_imlist(cfg, model, weights_inference_branchA, args.unlabeled_dataset_A_name)
total_time = time.perf_counter() - start_time
logger.info("Compute inference on unlabeled dataset A: {:.2f} s".format(total_time))
logger.info("Pseudolabeling mode: {}".format(pseudolabeling))
if pseudolabeling == 'cbst':
start_time = time.perf_counter()
pseudolabels_A, scores_listA, pseudolabels_A_not_filtered = apply_cbst(inference_A, cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,len(unlabeled_datasetA),tgt_portion)
total_time = time.perf_counter() - start_time
logger.info("CBST on unlabeled dataset A: {:.2f} s".format(total_time))
tgt_portion = min(tgt_portion + TGT_PORT_STEP, MAX_TGT_PORT)
elif pseudolabeling == 'mpt':
start_time = time.perf_counter()
pseudolabels_A, scores_listA, pseudolabels_A_not_filtered = apply_mtp(inference_A, cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,len(unlabeled_datasetA), 0.5)
total_time = time.perf_counter() - start_time
logger.info("MPT on unlabeled dataset A: {:.2f} s".format(total_time))
else:
raise Exception('unknown pseudolabeling method defined')
#save pseudolabels
pseudolabels_path_model_A = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'pseudolabeling/pseudolabels')
create_folder(pseudolabels_path_model_A)
coloured_pseudolabels_path_model_A = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'pseudolabeling/coloured_pseudolabels')
create_folder(coloured_pseudolabels_path_model_A)
coloured_pseudolabels_not_filtered_path_model_A = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'pseudolabeling/coloured_pseudolabels_not_filtered')
create_folder(coloured_pseudolabels_not_filtered_path_model_A)
dataset_A_path = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'unlabeled_data_selected')
create_folder(dataset_A_path)
checkpoints_A_path = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'checkpoints')
create_folder(checkpoints_A_path)
# Continue cotraining on the specified epoch
if continue_epoch > 0:
accumulated_selection_imgA = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch-1),'unlabeled_data_selected/dataset_img.txt')
accumulated_selection_pseudoA = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch-1),'unlabeled_data_selected/dataset_pseudolabels.txt')
accumulated_scores_A = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch-1),'unlabeled_data_selected/filenames_and_scores.txt')
continue_epoch = 0
scores_listA = np.asarray(scores_listA)
pseudolabels_A = np.asarray(pseudolabels_A)
unlabeled_datasetA = np.asarray(unlabeled_datasetA)
pseudolabels_A_not_filtered = np.asarray(pseudolabels_A_not_filtered)
# Order pseudolabels by method selected on config file
logger.info("Sorting mode: {}".format(cfg.PSEUDOLABELING.SORTING))
sorted_idx = sorting_scores(scores_listA, cfg.PSEUDOLABELING.SORTING, selftraining=True)
sorted_scores_listA = scores_listA[sorted_idx]
sorted_pseudolabels_A = pseudolabels_A[sorted_idx]
sorted_unlabeled_datasetA = unlabeled_datasetA[sorted_idx]
sorted_pseudolabels_A_not_filtered = pseudolabels_A_not_filtered[sorted_idx]
# free memory
del scores_listA
del pseudolabels_A
del unlabeled_datasetA
del pseudolabels_A_not_filtered
gc.collect()
logger.info("Select candidates and Save on disk")
# select candidates and save them to add them to the source data
if len(sorted_unlabeled_datasetA) > cfg.PSEUDOLABELING.NUMBER:
images_txt_A, psedolabels_txt_A, filenames_and_scoresA = save_pseudolabels(sorted_unlabeled_datasetA[:num_selected], sorted_pseudolabels_A[:num_selected], sorted_scores_listA[:num_selected],
pseudolabels_path_model_A, coloured_pseudolabels_path_model_A, sorted_pseudolabels_A_not_filtered[:num_selected], coloured_pseudolabels_not_filtered_path_model_A)
else:
images_txt_A, psedolabels_txt_A, filenames_and_scoresA = save_pseudolabels(sorted_unlabeled_datasetA, sorted_pseudolabels_A, sorted_scores_listA, pseudolabels_path_model_A,
coloured_pseudolabels_path_model_A, sorted_pseudolabels_A_not_filtered, coloured_pseudolabels_not_filtered_path_model_A)
# free memory
del sorted_unlabeled_datasetA
del sorted_pseudolabels_A
del sorted_scores_listA
del sorted_pseudolabels_A_not_filtered
gc.collect()
# Compute data accumulation procedure
logger.info("Compute data accumulation procedure selected: {}".format(accumulation_mode))
if accumulation_mode is not None and len(accumulated_selection_imgA) > 0:
if accumulation_mode.lower() == 'all':
accumulated_selection_imgA = merge_txts_and_save(os.path.join(dataset_A_path,'dataset_img.txt'),
accumulated_selection_imgA, images_txt_A)
accumulated_selection_pseudoA = merge_txts_and_save(os.path.join(dataset_A_path,'dataset_pseudolabels.txt'),
accumulated_selection_pseudoA, psedolabels_txt_A)
accumulated_scores_A = merge_txts_and_save(os.path.join(dataset_A_path,'filenames_and_scores.txt'),
accumulated_scores_A, filenames_and_scoresA)
if accumulation_mode.lower() == 'update_best_score':
accumulated_selection_imgA, accumulated_selection_pseudoA, accumulated_scores_A = update_best_score_txts_and_save(
accumulated_scores_A, accumulated_selection_imgA, accumulated_selection_pseudoA,
filenames_and_scoresA, images_txt_A, psedolabels_txt_A,
os.path.join(dataset_A_path,'dataset_img.txt'),
os.path.join(dataset_A_path,'dataset_pseudolabels.txt'),
os.path.join(dataset_A_path,'filenames_and_scores.txt'), cfg.PSEUDOLABELING.SORTING)
else:
#No accumulation, only training with new pseudolabels
accumulated_selection_imgA = merge_txts_and_save(os.path.join(dataset_A_path,'dataset_img.txt'),
images_txt_A)
accumulated_selection_pseudoA = merge_txts_and_save(os.path.join(dataset_A_path,'dataset_pseudolabels.txt'),
psedolabels_txt_A)
accumulated_scores_A = merge_txts_and_save(os.path.join(dataset_A_path,'filenames_and_scores.txt'),
filenames_and_scoresA)
if cfg.SOLVER.ALTERNATE_SOURCE_PSEUDOLABELS:
# create one dataloader for the source data and another for target pseudolabels
dataset_A_source = cfg.DATASETS.TRAIN_NAME + '_A_source' + str(epoch)
dataset_A_target = cfg.DATASETS.TRAIN_NAME + '_A_target' + str(epoch)
built_custom_dataset(cfg, cfg.DATASETS.TRAIN_IMG_TXT, cfg.DATASETS.TRAIN_GT_TXT, dataset_A_source)
built_custom_dataset(cfg, accumulated_selection_imgA, accumulated_selection_pseudoA, dataset_A_target)
# Train model A
logger.info("Training Model A")
results_A = do_train(cfg, model, weights_train_branchA, dataset_A_source, cfg.DATASETS.TEST_NAME,'a', checkpoints_A_path, epoch, args.continue_epoch,
resume=False, dataset_pseudolabels=dataset_A_target)
DatasetCatalog.remove(dataset_A_source)
MetadataCatalog.remove(dataset_A_source)
DatasetCatalog.remove(dataset_A_target)
MetadataCatalog.remove(dataset_A_target)
else:
# create dataloader adding psedolabels to source dataset
dataset_A_name = cfg.DATASETS.TRAIN_NAME + '_A_' + str(epoch)
built_custom_dataset(cfg, cfg.DATASETS.TRAIN_IMG_TXT, cfg.DATASETS.TRAIN_GT_TXT,
dataset_A_name, True, accumulated_selection_imgA, accumulated_selection_pseudoA)
# Train model A
logger.info("Training Model A")
results_A = do_train(cfg, model, weights_train_branchA, dataset_A_name, cfg.DATASETS.TEST_NAME,'a', checkpoints_A_path, epoch, args.continue_epoch,
resume=False)
# delete all datasets registered during epoch
DatasetCatalog.remove(dataset_A_name)
MetadataCatalog.remove(dataset_A_name)
DatasetCatalog.remove(args.unlabeled_dataset_A_name)
MetadataCatalog.remove(args.unlabeled_dataset_A_name)
# refresh weight file pointers after iteration for initial inference if there is improvement
if args.best_model:
# Assign best model obtained until now to generate the pseudolabels in the next cycle
# Model only used for inference
for score, iteration in results_A:
if score > best_score_A:
best_score_A = score
weights_inference_branchA = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'checkpoints/model_%s.pth' % (str(iteration).zfill(7)))
if not args.scratch_training:
weights_inference_branchA = weights_train_branchA
logger.info("Best model A until now: {}".format(weights_inference_branchA))
logger.info("Best mIoU: {}".format(best_score_A))
else:
# The model for the next inference and training cycle is the last one obtained
weights_train_branchA = os.path.join(cfg.OUTPUT_DIR,'model_A',str(epoch),'checkpoints/model_final.pth')
weights_inference_branchA = weights_train_branchA
if __name__ == "__main__":
default_parser = default_argument_parser()
args = cotraining_argument_parser(default_parser).parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
``` |
{
"source": "JOSELIN-CONDORI/Tarea-01-Sistemas",
"score": 4
} |
#### File: JOSELIN-CONDORI/Tarea-01-Sistemas/tarea4.py
```python
def estacionamiento1():
#definir variable y otros
estacionamiento=0
#datos de entrada
cantidadx=int(input("cuanto tiempo se quedara:"))
#Proceso
if cantidadx<=2:
estacionamiento=5*cantidadx
if cantidadx<=5:
estacionamiento=(-4*(2-cantidadx))+10
if cantidadx<=10:
estacionamiento=(3*(cantidadx-5))+22
if cantidadx>=11:
estacionamiento=(2*(cantidadx-10))+37
#Datos de Salida
print("pagara en total:",estacionamiento)
estacionamiento1()
```
#### File: JOSELIN-CONDORI/Tarea-01-Sistemas/tarea5.py
```python
def muestraMenorEdad():
#Definir Variables y otros
pnombre=""
pedad=0
#Datos de entrada
p1nombre=input("Ingrese Nombre 1ra Persona:")
p1edad=int(input("Ingrese edad 1ra Persona:"))
p2nombre=input("Ingrese Nombre 2da Persona:")
p2edad=int(input("Ingrese edad 2da Persona:"))
p3nombre=input("Ingrese Nombre 3ra Persona:")
p3edad=int(input("Ingrese edad 3ra Persona:"))
#Proceso
if p1edad<p2edad and p1edad<p3edad:
pnombre=p1nombre
pedad=p1edad
elif p2edad<p1edad and p2edad<p3edad:
pnombre=p2nombre
pedad=p2edad
elif p3edad<p1edad and p3edad<p2edad:
pnombre=p3nombre
pedad=p3edad
elif p1edad==p2edad and p2edad==p3edad:
pnombre=p1nombre+", "+p2nombre+" y "+p3nombre
pedad=p1edad
elif p1edad==p2edad:
pnombre=p1nombre+" y "+p2nombre
pedad=p1edad
elif p2edad==p3edad:
pnombre=p2nombre+" y "+p3nombre
pedad=p2edad
else:
pnombre=p1nombre+" y "+p3nombre
pedad=p3edad
#Datos de Salida
print("La(a) persona(s)", pnombre, " tiene(n):", pedad)
muestraMenorEdad()
``` |
{
"source": "JoselineHouwman/helpdesk_klimaat",
"score": 2
} |
#### File: klimaat_helpdesk/core/views.py
```python
from django.urls import reverse_lazy
from django.views.generic import TemplateView, FormView
from klimaat_helpdesk.cms.models import Answer, AnswerCategory
from klimaat_helpdesk.core.forms import AskQuestion
from klimaat_helpdesk.core.models import Question
from klimaat_helpdesk.experts.models import Expert
class HomePage(TemplateView):
template_name = 'core/home_page.html'
def get_context_data(self, **kwargs):
latest_answers = Answer.objects.live()[:10]
categories = AnswerCategory.objects.all()
expert_profile = Expert.objects.first()
context = super(HomePage, self).get_context_data(**kwargs)
context.update({
'answers': latest_answers,
'categories': categories,
'expert_profile': expert_profile,
})
return context
home_page = HomePage.as_view()
class NewQuestion(FormView):
form_class = AskQuestion
template_name = 'core/new_question.html'
success_url = reverse_lazy('core:new-question-thanks')
def form_valid(self, form):
Question.objects.create(
question=form.cleaned_data['question'],
user_email=form.cleaned_data.get('user_email', None),
asked_by_ip=self.request.META.get('REMOTE_ADDR')
)
return super(NewQuestion, self).form_valid(form)
new_question = NewQuestion.as_view()
``` |
{
"source": "joselito-coder/Hacktoberfest2021",
"score": 4
} |
#### File: Program's_Contributed_By_Contributors/Python_Programs/factorial_generator.py
```python
def factorial(n):
if n == 0: return 1
return n*factorial(n-1)
``` |
{
"source": "joselitomedi/Introduction-to-Algorithms",
"score": 4
} |
#### File: Insertion Sort/Python/insertion_sort.py
```python
list = [5,2,4,6,1,3]
def insertion_sort(sequence,steps=False):
for j in range(1,len(sequence)):
item = sequence[j]
i = j - 1
# Insert item (element j) in the sorted sequence sequence[0...j-1]
while i >= 0 and sequence[i] > item:
sequence[i + 1] = sequence[i]
i = i - 1
sequence[i + 1] = item
# sorting steps (optional parameter)
if steps == True:
print("step ",j,sequence)
return sequence
print(insertion_sort(list,True))
``` |
{
"source": "JoseLlarena/autoclean",
"score": 2
} |
#### File: autoclean/filtering/cli.py
```python
from click import argument, group
from autoclean import global_config
from autoclean.filtering.api import filter_out, evaluate
@group()
def cli():
pass
@cli.command(help='path to text to segment type of lm to use to score segmentations')
@argument('in_path', type=str)
@argument('out_path', type=str)
@argument('threshold', type=float)
def filter(in_path: str, out_path: str, threshold: float):
filter_out(in_path, out_path, threshold)
@cli.command(help='path to text to segment type of lm to use to score segmentations')
@argument('in_domain_path', type=str)
@argument('out_domain_path', type=str)
def eval(in_domain_path: str, out_domain_path: str):
evaluate(in_domain_path, out_domain_path)
if __name__ == '__main__':
global_config()
cli()
```
#### File: autoclean/segmentation/segmenters.py
```python
from functools import lru_cache
from typing import Callable as Fn
from autoclean import Seq
from autoclean.segmentation import CACHE
@lru_cache(maxsize=CACHE)
def viterbi(seq: Seq, cost_of: Fn, splits_of: Fn) -> Seq:
"""
Segments the given sequence using the Viterbi algorithm, parameterised by a splitting function to generate the
splits, and a cost function to score those splits. Based on <NAME>'s chapter 14 of "Beautiful Data" by
<NAME> Hammerbacher.
It uses recursion heavily and so it can only handle reasonably long sentences. It may require increasing the
default Python recursion depth limit.
The algorithm proceeds by splitting the input sequence into a head of starting words and a tail of joined up end
words. This is done recursively for each head until the only head left is the empty sequence,
at which point, the recursion ends and then the cost of each segmentation of a given head is evaluated and the
one with the lowest cost is returned, the rest discarded and not recursed into. The segmentations are created
by joining the segmentation of a head with its tail, for each head-tail split.
:param seq: the sequence to segment
:param cost_of: the cost function to score segmentations
:param splits_of: the function to generate segmentations
:return: a segmented sequence
"""
if len(seq) < 2:
return seq
segmentations = (viterbi(head, cost_of, splits_of) + tail for head, tail in splits_of(seq))
return min(segmentations, key=cost_of, default=seq)
```
#### File: autoclean/segmentation/splitters.py
```python
from functools import lru_cache
from typing import Tuple, Callable as Fn, Iterable
from autoclean import Seq, require
from autoclean.segmentation import CACHE
@lru_cache(maxsize=CACHE)
def string_splits(seq: Seq, is_valid: Fn[[str], bool] = lambda split: True) -> Iterable[Tuple[Seq, Seq]]:
"""
Returns all possible splits of the given sequence that are allowed according to the given predicate
:param seq: the sequence to generate splits for
:param is_valid: whether the tail split is a valid sequence
:return: a collection of splits, in the form of a head and a joined-up tail of tokens
"""
require(len(seq) > 0, 'sequence should have at least 1 element')
return tuple((head := seq[:idx], tail := (tail_split,))
for idx in reversed(range(len(seq)))
if is_valid(tail_split := ''.join(seq[idx:])))
```
#### File: autoclean/unittests/test_segmentation.py
```python
from autoclean.segmentation.segmenters import viterbi
from autoclean.segmentation.splitters import string_splits
def test_splitting():
output = list(string_splits(tuple('funday')))
assert output == [(('f', 'u', 'n', 'd', 'a'), ('y',)),
(('f', 'u', 'n', 'd'), ('ay',)),
(('f', 'u', 'n'), ('day',)),
(('f', 'u'), ('nday',)),
(('f',), ('unday',)),
((), ('funday',))]
def test_segmentation():
def cost_fn(chunk):
return 0 if chunk in {('fun',), ('fun', 'day'), ('day',)} else 1
assert tuple(viterbi(tuple('funday'), cost_fn, string_splits)) == ('fun', 'day')
``` |
{
"source": "JoseLlarena/pypey",
"score": 4
} |
#### File: pypey/pypey/func.py
```python
from functools import partial
from typing import Any, Callable as Fn, TypeVar, Iterable, Type, Tuple
__all__ = ['Fn', 'H', 'I', 'K', 'T', 'V', 'X', 'Y', 'px', 'ident', 'pipe', 'require', 'require_val', 'throw']
H = TypeVar('H', Any, Any)
I = TypeVar('I', bound=Iterable)
K = TypeVar('K')
T = TypeVar('T')
V = TypeVar('V')
X = TypeVar('X')
Y = TypeVar('Y')
#: Concise alias of ``functools.partial``
px: Fn[..., Fn] = partial
def ident(item: T) -> T:
"""
Identity function, returns the argument passed to it.
:param item: any argument
:return: the argument passed in
"""
return item
def pipe(*functions: Fn) -> Fn:
"""
Chains given functions.
::
>>> from pypey import pipe
>>> from math import sqrt
>>> [pipe(len, sqrt)(w) for w in ('a', 'fun','day')]
[1.0, 1.7320508075688772, 1.7320508075688772]
For functions taking multiple arguments, the return of the previous function in the chain
will be unpacked only if it's a ``tuple``:
::
>>> from pypey import pipe
>>> pipe(divmod, lambda quotient, remainder: quotient + remainder)(10, 3)
4
If a function returns an ``Iterable`` that it's not a tuple but unpacking in the next function is still needed,
built-in ``tuple`` can be inserted in between to achieve the desired effect:
::
>>> from pypey import pipe
>>> pipe(range, tuple, lambda _1, _2_, _3: sum([_1, _3]))(3)
2
Conversely, if a function returns a ``tuple`` but unpacking is not required in the next function, built-in ``list``
can be used to achieve the desired effect:
::
>>> from pypey import pipe
>>> pipe(divmod, list, sum)(10, 3)
4
Note that ``list`` is the only exception to the rule that ``tuple`` returns will be unpacked.
:param functions: a variable number of functions
:return: a combined function
"""
if len(functions) == 1:
return functions[0]
return px(_pipe_functions, functions=functions)
def require(cond: bool, message: str, exception: Type[Exception] = TypeError):
"""
Guard clause, useful for implementing exception-raising checks concisely, especially useful in lambdas.
>>> from pypey import require, pype
>>> pype([1,2,'3']).do(lambda n: require(isinstance(n, int), 'not an int'), now=True)
Traceback (most recent call last):
...
TypeError: not an int
:param cond: if ``False`` the given exception will be thrown, otherwise this function is a no-op
:param message: exception message
:param exception: exception to throw if ``cond`` is ``False``, defaults to ``TypeError``
:return: nothing
"""
if not cond:
raise exception(message)
def require_val(cond: bool, message: str):
"""
Throws ``ValueError`` exception if ``cond`` is ``False``, equivalent to :func:`require` with
``exception=ValueError``.
>>> from pypey import require_val, pype
>>> pype([1,2,-3]).do(lambda n: require_val(n>0, 'not a positive number'), now=True)
Traceback (most recent call last):
...
ValueError: not a positive number
:param cond: if ``False`` the a ValueError will be thrown, otherwise this function is a no-op
:param message: the exception message
:return: nothing
"""
require(cond, message, ValueError)
def throw(exception: Type[Exception], message: str):
"""
Throws given exception with given message, equivalent to built-in ``raise``. This function is useful for raising
exceptions inside lambdas as ``raise`` is syntactically invalid in them.
>>> from pypey import throw, pype
>>> pype([1,2,3]).do(lambda n: throw(ValueError, 'test'), now=True)
Traceback (most recent call last):
...
ValueError: test
:param exception: the exception to throw
:param message: the exception message
:return: nothing
"""
raise exception(message)
def _pipe_functions(*arg: Any, functions: Tuple[Fn[..., Any], Any]) -> Any:
# created as global function to avoid issues with multiprocessing
result = arg
for idx, fn in enumerate(functions):
result = fn(*result) if idx == 0 or (idx > 0 and fn != list and isinstance(result, tuple)) else fn(result)
return result
```
#### File: pypey/unittests/__init__.py
```python
from typing import Tuple
from pypey import Pype, Fn
from random import seed, setstate, getstate
_123 = 1, 2, 3
_23 = _123[1:]
_654 = 6, 5, 4
_aba = 'a', 'b', 'a'
_ab = _aba[:-1]
_a_fun_day = 'a', 'fun', 'day'
_fun_day = _a_fun_day[1:]
_112233 = (1, 1), (2, 2), (3, 3)
_2233 = _112233[1:]
_aAfunFUNdayDAY = ('a', 'A'), ('fun', 'FUN'), ('day', 'DAY')
def _empty_pype() -> Pype:
return Pype(iter(()))
def _123_pype() -> Pype[int]:
return Pype(iter(_123))
def _654_pype() -> Pype[int]:
return Pype(iter(_654))
def _a_fun_day_pype() -> Pype[str]:
return Pype(iter(_a_fun_day))
def _112233_pype() -> Pype[Tuple[int, int]]:
return Pype((n, n) for n in _123)
def _aAfunFUNdayDAY_pype() -> Pype[Tuple[str, str]]:
return Pype((w, w.upper()) for w in _a_fun_day)
def _aba_pype() -> Pype[str]:
return Pype(iter('aba'))
def with_seed(seed_: int) -> Fn:
def decorator(function: Fn) -> Fn:
def wrapper(*args, **kwargs):
s = getstate()
seed(seed_)
ret = function(*args, **kwargs)
setstate(s)
return ret
return wrapper
return decorator
```
#### File: pypey/unittests/test_execution_laziness.py
```python
from collections import namedtuple
from operator import add
from os.path import join
from pytest import raises, mark
from pypey import Pype
from pypey.pype import SPLIT_MODES
from unittests import _23, _123, _fun_day, _aba_pype, _112233_pype, _112233, _123_pype, _a_fun_day_pype, \
_aAfunFUNdayDAY_pype, _654_pype, _654
def test_accumulation_does_not_consume_whole_pipe():
pipe = _123_pype()
next(iter(pipe.accum(add)))
assert tuple(pipe) == _23
def test_accumulation_with_initial_value_does_not_consume_whole_pipe():
pipe = _123_pype()
next(iter(pipe.accum(add, 0)))
assert tuple(pipe) == _123
def test_broadcasting_does_not_consume_whole_pipe():
pipe = _123_pype()
next(iter(pipe.broadcast(range)))
assert tuple(pipe) == _23
def test_concatenation_does_not_consume_either_iterable():
pipe = _123_pype()
other = _654_pype()
next(iter(pipe.cat(other)))
assert tuple(pipe) == _23
assert tuple(other) == _654
def test_chunking_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.chunk(2)))
assert tuple(pipe) == _23
def test_chunking_with_multiple_sizes_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.chunk([1, 2])))
assert tuple(pipe) == _23
def test_cloning_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.clone()))
assert tuple(pipe) == _123
def test_finite_cycling_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.cycle(2)))
assert tuple(pipe) == _23
def test_infinite_cycling_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.cycle()))
assert tuple(pipe) == _23
def test_distributing_items_does_not_consume_pipe():
pipe = _123_pype()
next(iter(next(iter(pipe.dist(2)))))
assert tuple(pipe) == _23
def test_dividing_pipe_consumes_it():
pipe = _123_pype()
next(iter(pipe.divide(2)))
assert tuple(pipe) == ()
def test_side_effect_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.do(print)))
assert tuple(pipe) == _23
def test_side_effect_fully_consumes_pipe_if_immediate():
pipe = _123_pype()
with raises(StopIteration):
next(iter(pipe.do(print, now=True)))
def test_parallel_lazy_side_effect_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.do(print, workers=2)))
assert tuple(pipe) == ()
def test_parallel_eager_side_effect_fully_consumes_pipe():
pipe = _123_pype()
with raises(StopIteration):
next(iter(pipe.do(print, workers=2, now=True)))
def test_dropping_the_first_items_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.drop(1)))
assert tuple(pipe) == (3,)
def test_dropping_the_last_items_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.drop(-1)))
assert tuple(pipe) == (3,)
def test_dropping_items_while_predicate_is_false_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.drop_while(lambda n: n > 2)))
assert tuple(pipe) == _23
def test_eager_making_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.eager()))
assert tuple(pipe) == ()
def test_enumeration_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.enum()))
assert tuple(pipe) == _23
def test_enumeration_with_swapped_index_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.enum(swap=True)))
assert tuple(pipe) == _23
def test_flattening_does_not_consume_pipe():
pipe = _a_fun_day_pype()
next(iter(pipe.flat()))
assert tuple(pipe) == _fun_day
def test_flatmapping_does_not_consume_pipe():
pipe = _a_fun_day_pype()
next(iter(pipe.flatmap(str.upper)))
assert tuple(pipe) == _fun_day
def test_computing_item_frequencies_consumes_pipe():
pipe = _a_fun_day_pype()
next(iter(pipe.freqs()))
assert tuple(pipe) == ()
def test_grouping_by_key_consumes_pipe():
pipe = _a_fun_day_pype()
next(iter(pipe.group_by(len)))
assert tuple(pipe) == ()
def test_interleaves_items_with_other_iterable_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.interleave(_a_fun_day_pype())))
assert tuple(pipe) == _23
def test_concise_iteration_does_not_consume_pipe():
pipe = _123_pype()
next(pipe.it())
assert tuple(pipe) == _23
def test_mapping_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.map(lambda n: n * 2)))
assert tuple(pipe) == _23
def test_parallel_mapping_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.map(x2, workers=2)))
assert tuple(pipe) == ()
def test_partitioning_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.partition(lambda n: n > 2)[0]))
assert tuple(pipe) == _23
def test_picking_a_property_does_not_consume_pipe():
Person = namedtuple('Person', ['age'])
pipe = Pype((Person(n) for n in _123))
next(iter(pipe.pick(Person.age)))
assert tuple(pipe) == (Person(2), Person(3))
def test_picking_a_key_does_not_consume_pipe():
pipe = Pype(str(n) for n in _123)
next(iter(pipe.pick(0)))
assert tuple(pipe) == ('2', '3')
def test_eager_printing_fully_consumes_pipe():
pipe = _123_pype()
with raises(StopIteration):
next(iter(pipe.print()))
def test_lazy_printing_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.print(now=False)))
assert tuple(pipe) == _23
def test_reducing_consumes_pipe():
pipe = _123_pype()
pipe.reduce(add)
assert tuple(pipe) == ()
def test_rejecting_items_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.reject(lambda n: n > 1)))
assert tuple(pipe) == _23
def test_reversing_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.reverse()))
assert tuple(pipe) == ()
def test_roundrobin_distribution_consumes_pipe():
pipe = _a_fun_day_pype()
next(iter(pipe.roundrobin()))
assert tuple(pipe) == ()
def test_sampling_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.sample(2)))
assert tuple(pipe) == ()
def test_selecting_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.select(lambda n: n > 0)))
assert tuple(pipe) == _23
def test_shuffling_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.shuffle()))
assert tuple(pipe) == ()
def test_asking_for_size_consumes_pipe():
pipe = _123_pype()
pipe.size()
assert tuple(pipe) == ()
def test_slicing_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.slice(0, 3)))
assert tuple(pipe) == _23
def test_sorting_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.sort()))
assert tuple(pipe) == ()
@mark.parametrize('mode', SPLIT_MODES)
def test_splitting_does_not_consume_pipe(mode):
pipe = _123_pype()
next(iter(pipe.split(lambda n: n == 2, mode=mode)))
assert tuple(pipe) == (3,)
def test_asking_for_first_n_items_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.take(1)))
assert tuple(pipe) == _23
def test_asking_for_last_n_items_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.take(-3)))
assert tuple(pipe) == ()
def test_selecting_items_while_condition_is_met_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.take_while(lambda n: n < 2)))
assert tuple(pipe) == _23
def test_teeing_does_not_consume_pipe():
pipe = _123_pype()
next(iter(next(iter(pipe.tee(3)))))
assert tuple(pipe) == _23
def test_applying_function_to_pipe_consumes_pipe_when_function_is_eager():
pipe = _123_pype()
next(iter(pipe.to(list)))
assert tuple(pipe) == ()
def test_applying_function_to_pipe_does_not_consume_pipe_when_function_is_lazy():
pipe = _123_pype()
next(pipe.to(enumerate))
assert tuple(pipe) == _23
def test_applying_functions_to_pipe_consumes_pipe_when_at_least_one_function_is_eager():
pipe = _123_pype()
next(iter(pipe.to(enumerate, zip, list)))
assert tuple(pipe) == ()
def test_eager_writing_to_file_fully_consumes_pipe(tmpdir):
pipe = _123_pype()
with raises(StopIteration):
next(iter(pipe.to_file(join(tmpdir, 'zip.txt'))))
def test_lazy_writing_to_file_does_not_consume_pipe(tmpdir):
pipe = _123_pype()
next(iter(pipe.to_file(join(tmpdir, 'zip.txt'), now=False)))
assert tuple(pipe) == _23
def test_writing_to_json_file_consumes_pipe(tmpdir):
target = join(tmpdir, 'object.json')
pipe = Pype((k, v) for k, v in [('a', 1), ('fun', 2), ('day', 3)])
with raises(StopIteration):
next(iter(pipe.to_json(target)))
def test_asking_for_top_n_items_consumes_pipe():
pipe = _123_pype()
next(iter(pipe.top(2)))
assert tuple(pipe) == ()
def test_asking_for_the_unique_items_does_not_consume_pipe():
pipe = _aba_pype()
next(iter(pipe.uniq()))
assert tuple(pipe) == ('b', 'a')
def test_unzipping_does_not_consume_pipe():
pipe = _112233_pype()
next(iter(tuple(pipe.unzip())[0]))
assert tuple(pipe) == _112233[1:]
def test_sliding_a_window_over_items_does_not_consume_pipe():
pipe = _123_pype()
next(iter(pipe.window(1)))
assert tuple(pipe) == (2, 3)
def test_zipping_does_not_consume_pipe():
pipe = _123_pype()
other = _123_pype()
next(iter(pipe.zip(other)))
assert tuple(pipe) == _23
assert tuple(other) == _23
def test_self_zipping_consumes_pipe():
pipe = _aAfunFUNdayDAY_pype()
next(iter(pipe.zip()))
assert tuple(pipe) == ()
def test_zipping_with_a_function_does_not_consume_pipe():
pipe = _a_fun_day_pype()
next(iter(pipe.zip_with(len)))
assert tuple(pipe) == _fun_day
def x2(n: float) -> float:
return n * 2
```
#### File: pypey/unittests/test_input_function_invocation.py
```python
from pytest import mark
from pypey import px
from pypey.pype import _unpack_fn, UNARY_WITHOUT_SIGNATURE, N_ARY_WITHOUT_SIGNATURE
class TestClass:
def __init__(self, param='param'):
self.param = param
def an_instance_method(self, first: str = 'first', *rest: str):
return first, rest
@classmethod
def a_class_method(cls, first: str = 'first', *rest: str):
return first, rest
@staticmethod
def a_static_method(first: str = 'first', *rest: str):
return first, rest
def __eq__(self, other):
return isinstance(other, type(self)) and self.param == other.param
unary_funcs = (lambda n, /: n,
lambda n=4, /: n,
lambda n: n,
lambda n=4: n)
keyword_only_unary_funcs = (lambda *, n: n, lambda *, n=4: n)
dyadic_funcs = (lambda n, m, /: [n, m], lambda n, m: [n, m])
keyword_only_dyadic_funcs = (lambda *, n, m: (n, m), lambda *, n=4, m=5: (n, m))
effectively_unary_funcs = (
lambda n=4, m=5, /: [n, m],
lambda n=4, m=5: [n, m],
lambda n, **kwargs: [n, kwargs],
lambda n=8, **kwargs: [n, kwargs],
px(lambda n, m, /: [n, m], 6),
px(lambda n=4, m=5, /: [n, m], 6),
px(lambda n, m: [n, m], 6),
px(lambda n, m: [n, m], m=7),
px(lambda n=4, m=5: [n, m], 6),
px(lambda n=4, m=5: [n, m], m=7)
)
variadic_funcs = (
lambda n, *args: [n, args],
lambda n=8, *args: [n, args],
lambda *args, n=8: [args, n],
lambda *args: args,
lambda n, *args, **kwargs: [n, args, kwargs],
lambda n=8, *args, **kwargs: [n, args, kwargs],
lambda *args, n=8, **kwargs: [args, n, kwargs],
lambda *args, **kwargs: [args, kwargs],
px(lambda n, *args: [n, args], 6),
px(lambda n, *args: [n, args], 6, 7),
px(lambda n=8, *args: [n, args], 6),
px(lambda n=8, *args: [n, args], 6, 7),
px(lambda *args: args, 4),
px(lambda *args: args, 4, 5),
px(lambda *args, n: [args, n], n=6),
px(lambda *args, n: [args, n], 6, n=7),
px(lambda *args, n=8: [args, n], 6),
px(lambda *args, n=8: [args, n], n=6),
px(lambda *args, n=8: [args, n], 6, 7),
px(lambda *args, n=8: [args, n], 6, n=7),
px(lambda *args, **kwargs: [args, kwargs], 4),
px(lambda *args, **kwargs: [args, kwargs], n=4),
px(lambda *args, **kwargs: [args, kwargs], 4, 5),
px(lambda *args, **kwargs: [args, kwargs], 4, m=5),
px(lambda *args, **kwargs: [args, kwargs], n=4, m=5),
px(lambda *args, n, **kwargs: [args, n, kwargs], n=4),
px(lambda *args, n, **kwargs: [args, n, kwargs], n=4, m=5),
px(lambda *args, n=8, **kwargs: [args, n, kwargs], 4),
px(lambda *args, n=8, **kwargs: [args, n, kwargs], n=4),
px(lambda *args, n=8, **kwargs: [args, n, kwargs], 4, 5),
px(lambda *args, n=8, **kwargs: [args, n, kwargs], 4, m=5),
px(lambda *args, n=8, **kwargs: [args, n, kwargs], n=4, m=5))
keyword_only_variadic_funcs = (lambda **kwargs: kwargs,
px(lambda **kwargs: kwargs, n=4),
px(lambda **kwargs: kwargs, n=4, m=5),
lambda *args, n: [args, n],
lambda *args, n, **kwargs: [args, n, kwargs],
lambda *args, n, **kwargs: [args, n, kwargs],
px(lambda *args, n: [args, n], 6),
px(lambda *args, n: [args, n], 6, 7))
methods = (TestClass.an_instance_method,
TestClass().an_instance_method,
TestClass.a_class_method,
TestClass.a_static_method,
TestClass)
all_fns = (unary_funcs +
keyword_only_unary_funcs +
dyadic_funcs +
keyword_only_dyadic_funcs +
effectively_unary_funcs +
variadic_funcs +
keyword_only_variadic_funcs +
methods +
tuple(UNARY_WITHOUT_SIGNATURE) +
tuple(N_ARY_WITHOUT_SIGNATURE - {breakpoint}))
non_spreading_fns = (unary_funcs +
keyword_only_unary_funcs +
keyword_only_dyadic_funcs +
effectively_unary_funcs +
keyword_only_variadic_funcs +
methods[:1] + methods[-1:] +
tuple(UNARY_WITHOUT_SIGNATURE) +
tuple(N_ARY_WITHOUT_SIGNATURE - {filter, range, slice, zip, breakpoint}))
spreading_fns = (dyadic_funcs + variadic_funcs + methods[1:-1] + (filter, range, slice, zip))
@mark.parametrize('fn', all_fns)
def test_function_is_invoked_normally_with_non_iterable_item(fn):
iterable = [1, 2]
unpacked_fn = _unpack_fn(fn, iterable[0])
item = iterable[-1]
fn_output = '<NO FN OUTPUT>'
fn_exception = '<NO FN EXCEPTION>'
try:
fn_output = fn(item)
except Exception as python_exception:
fn_exception = python_exception
unpacked_fn_output = '<NO UNPACKED FN OUTPUT>'
unpacked_fn_exception = '<NO UNPACKED FN EXCEPTION>'
try:
unpacked_fn_output = unpacked_fn(item)
except Exception as unpacked_exception:
unpacked_fn_exception = unpacked_exception
if fn_output != '<NO FN OUTPUT>':
if isinstance(fn_output, staticmethod) and isinstance(unpacked_fn_output, staticmethod) or \
isinstance(fn_output, classmethod) and isinstance(unpacked_fn_output, classmethod):
assert type(unpacked_fn_output) == type(fn_output)
else:
assert unpacked_fn_output == fn_output
else:
assert unpacked_fn_exception.__str__() == fn_exception.__str__()
@mark.parametrize('fn', non_spreading_fns)
def test_function_is_invoked_normally_with_iterable_item_when_it_is_effectively_unary(fn):
iterable = [[1, 2], [3, 4]]
unpacked_fn = _unpack_fn(fn, iterable[0])
item = iterable[-1]
fn_output = '<NO FN OUTPUT>'
fn_exception = '<NO FN EXCEPTION>'
try:
fn_output = fn(item)
except Exception as python_exception:
fn_exception = python_exception
unpacked_fn_output = '<NO UNPACKED FN OUTPUT>'
unpacked_fn_exception = '<NO UNPACKED FN EXCEPTION>'
try:
unpacked_fn_output = unpacked_fn(item)
except Exception as unpacked_exception:
unpacked_fn_exception = unpacked_exception
if fn_output != '<NO FN OUTPUT>':
if isinstance(fn_output, staticmethod) and isinstance(unpacked_fn_output, staticmethod) or \
isinstance(fn_output, classmethod) and isinstance(unpacked_fn_output, classmethod) or \
isinstance(fn_output, type(iter([]))) and isinstance(unpacked_fn_output, type(iter([]))):
assert type(unpacked_fn_output) == type(fn_output)
else:
assert unpacked_fn_output == fn_output, f'{unpacked_fn_exception}|{fn} -> {unpacked_fn}'
else:
assert unpacked_fn_exception.__str__() == fn_exception.__str__(), f'{unpacked_fn_output} |{fn} -> {unpacked_fn}'
@mark.parametrize('fn', spreading_fns)
def test_function_is_invoked_with_argument_unpacking_with_iterable_item_when_it_is_nary(fn):
iterable = [[1, 2], [3, 4]]
unpacked_fn = _unpack_fn(fn, iterable[0])
item = iterable[-1]
fn_output = '<NO FN OUTPUT>'
fn_exception = '<NO FN EXCEPTION>'
try:
fn_output = fn(*item)
except Exception as python_exception:
fn_exception = python_exception
unpacked_fn_output = '<NO UNPACKED FN OUTPUT>'
unpacked_fn_exception = '<NO UNPACKED FN EXCEPTION>'
try:
unpacked_fn_output = unpacked_fn(item)
except Exception as unpacked_exception:
unpacked_fn_exception = unpacked_exception
if fn_output != '<NO FN OUTPUT>':
if isinstance(fn_output, staticmethod) and isinstance(unpacked_fn_output, staticmethod) or \
isinstance(fn_output, classmethod) and isinstance(unpacked_fn_output, classmethod) or \
isinstance(fn_output, zip) and isinstance(unpacked_fn_output, zip):
assert type(unpacked_fn_output) == type(fn_output)
else:
assert unpacked_fn_output == fn_output, f'{unpacked_fn_exception}|{fn} -> {unpacked_fn}'
else:
assert unpacked_fn_exception.__str__() == fn_exception.__str__(), f'{unpacked_fn_output} |{fn} -> {unpacked_fn}'
```
#### File: pypey/unittests/test_non_empty_pipe.py
```python
import json
from collections import namedtuple
from multiprocessing import Value
from operator import add, neg
from os.path import join
from unittest.mock import Mock, call, create_autospec
import sys
from pypey import Pype, px, pype, TOTAL
from unittests import _123_pype, _123, _654_pype, _654, _empty_pype, _a_fun_day_pype, _23, _aba_pype, _ab, \
_aAfunFUNdayDAY_pype, with_seed
def test_can_be_iterated_through():
pipe = iter(_123_pype())
assert next(pipe) == _123[0]
assert next(pipe) == _123[1]
assert next(pipe) == _123[2]
def test_accumulates_values_across_items():
assert tuple(_123_pype().accum(add)) == (1, 3, 6)
def test_accumulates_values_across_items_with_initial_value():
assert tuple(_123_pype().accum(add, -1)) == (-1, 0, 2, 5)
def test_broadcasts_items_to_iterables():
assert tuple(_123_pype().broadcast(range)) == ((1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2))
def test_concatenates_with_another_iterable():
assert tuple(_123_pype().cat(_654_pype())) == _123 + _654
def test_concatenation_with_an_empty_iterable_returns_this_pipe():
assert tuple(_123_pype().cat(_empty_pype())) == _123
def test_breaks_pipe_into_subpipes_of_at_most_the_given_number_of_items_if_pipe_is_no_smaller_than_number():
chunks = tuple(_123_pype().chunk(2))
assert tuple(map(tuple, chunks)) == ((1, 2), (3,))
def test_breaks_pipe_into_subpipe_of_the_same_size_as_the_pipe_if_given_number_is_larger_than_pipe():
chunks = tuple(_123_pype().chunk(4))
assert tuple(map(tuple, chunks)) == ((1, 2, 3),)
def test_breaks_pipe_into_subpipes_of_the_given_sizes():
chunks = tuple(_123_pype().chunk([1, 2]))
assert tuple(map(tuple, chunks)) == ((1,), (2, 3))
def test_breaks_pipe_into_subpipes_of_the_given_sizes_dropping_items_if_sum_of_sizes_is_smaller_than_length():
chunks = tuple(_123_pype().chunk([1, 1]))
assert tuple(map(tuple, chunks)) == ((1,), (2,))
def test_breaks_pipe_into_subpipes_of_the_given_sizes_or_smaller_if_sum_of_sizes_is_greater_than_length():
chunks = tuple(_123_pype().chunk([1, 2, 3]))
assert tuple(map(tuple, chunks)) == ((1,), (2, 3), ())
def test_clones_pipe():
pipe = _123_pype()
assert tuple(pipe.clone()) == tuple(pipe)
def test_cycles_through_pipe_for_the_given_number_of_times():
assert tuple(_123_pype().cycle(n=3)) == _123 + _123 + _123
def test_cycles_through_pipe_forever_if_not_given_number_of_times():
assert tuple(_123_pype().cycle().take(9)) == _123 + _123 + _123
def test_cycling_with_zero_number_of_times_returns_an_empty_pipe(): # FIXME SHOULD THIS BE ALLOWED?
assert tuple(_123_pype().cycle(n=0)) == ()
def test_distributes_items_in_pipe_into_n_subpipes():
segments = tuple(_123_pype().dist(2))
assert tuple(map(tuple, segments)) == ((1, 3), (2,))
def test_distributing_items_into_more_subpipes_than_there_are_items_returns_empty_subpipes():
segments = tuple(_123_pype().dist(4))
assert tuple(map(tuple, segments)) == ((1,), (2,), (3,), ())
def test_divides_pipe_into_n_equal_sized_subpipes_when_n_is_multiple_of_size():
segments = tuple(_123_pype().cat(_654_pype()).divide(3))
assert tuple(map(tuple, segments)) == ((1, 2), (3, 6), (5, 4))
def test_divides_pipe_into_as_single_item_subpipes_followed_by_empty_pipes_when_n_is_larger_than_size():
segments = tuple(_123_pype().divide(4))
assert tuple(map(tuple, segments)) == ((1,), (2,), (3,), ())
def test_divides_pipe_into_same_size_subpipes_plus_excess_subpipe_when_n_is_smaller_than_size_but_not_multiple():
segments = tuple(_123_pype().cat(_654_pype()).divide(4))
assert tuple(map(tuple, segments)) == ((1,), (2,), (3,), _654)
def test_produces_a_side_effect_per_item():
side_effect = create_autospec(lambda n: n)
assert tuple(_123_pype().do(side_effect)) == _123
side_effect.assert_has_calls([call(1), call(2), call(3)])
PARALLEL_SUM = Value('i', 0)
def test_produces_a_side_effect_per_item_in_parallel():
"""
Mocks can't be pickled and only memory-shared objects which are global can be used in multiprocessing
"""
_123_pype().do(side_effect, now=True, workers=2)
assert PARALLEL_SUM.value == sum(_123)
def side_effect(n: int):
with PARALLEL_SUM.get_lock():
PARALLEL_SUM.value += n
def test_drops_the_given_number_of_first_items():
assert tuple(_123_pype().drop(1)) == (2, 3)
def test_dropping_zero_items_returns_the_same_pipe(): # FIXME SHOULD THIS BE ALLOWED?
assert tuple(_123_pype().drop(0)) == (1, 2, 3)
def test_dropping_more_first_items_than_there_are_in_pipe_is_the_same_as_dropping_as_many_as_there_are_in_it():
assert tuple(_123_pype().drop(10)) == ()
def test_drops_the_given_number_of_last_items():
assert tuple(_123_pype().drop(-2)) == (1,)
def test_dropping_more_last_items_than_there_are_in_pipe_is_the_same_as_dropping_as_many_as_there_are_in_it():
assert tuple(_123_pype().drop(-10)) == ()
def test_rejects_items_until_condition_is_true():
assert tuple(_123_pype().drop_while(lambda n: n != 2)) == _23
def test_makes_pipe_eager():
pipe = _123_pype().eager()
pipe.size()
assert pipe.size() == 3
def test_enumerates_items():
assert tuple(_a_fun_day_pype().enum(start=1)) == ((1, 'a'), (2, 'fun'), (3, 'day'))
def test_enumerates_items_with_a_swapped_index():
assert tuple(_a_fun_day_pype().enum(start=1, swap=True)) == (('a', 1), ('fun', 2), ('day', 3))
def test_flattens_pipe_of_iterables_into_a_single_iterable():
assert tuple(_a_fun_day_pype().flat()) == ('a', 'f', 'u', 'n', 'd', 'a', 'y')
def test_transforms_iterable_items_and_flattens_them_into_a_pipe_of_elements():
assert tuple(_a_fun_day_pype().flatmap(str.upper)) == ('A', 'F', 'U', 'N', 'D', 'A', 'Y')
def test_computes_item_frequencies_with_total():
pipe = pype('AFUNDAY')
assert tuple(pipe.freqs()) == (('A', 2, 2 / 7),) + tuple((char, 1, 1 / 7) for char in 'FUNDY') + ((TOTAL, 7, 1.),)
def test_computes_item_frequencies_without_total():
pipe = pype('AFUNDAY')
assert tuple(pipe.freqs(total=False)) == (('A', 2, 2 / 7),) + tuple((char, 1, 1 / 7) for char in 'FUNDY')
def test_groups_items_by_given_key():
assert tuple(_a_fun_day_pype().group_by(len)) == ((1, ['a']), (3, ['fun', 'day']))
def test_interleaves_items_with_other_iterable():
assert tuple(_123_pype().interleave(_a_fun_day_pype())) == (1, 'a', 2, 'fun', 3, 'day')
def test_interleaves_items_with_other_iterable_truncating_to_shortest():
assert tuple(_123_pype().interleave(_a_fun_day_pype(), n=2)) == (1, 2, 'a', 3, 'fun')
def test_interleaves_items_with_other_iterable_skipping_items_if_other_pipe_is_exhausted():
assert tuple(_123_pype().interleave(_a_fun_day_pype(), n=2, trunc=False)) == (1, 2, 'a', 3, 'fun', 'day')
def test_interleaves_items_with_other_iterable_skipping_items_this_pipe_is_exhausted():
assert tuple(_123_pype().interleave(['a', 'fun', 'fun', 'day'], trunc=False)) == (1, 'a', 2, 'fun', 3, 'fun', 'day')
def test_interleaving_with_an_empty_iterable_skipping_items_returns_this_pipe():
assert tuple(_123_pype().interleave(_empty_pype(), trunc=False)) == _123
def test_interleaving_with_an_empty_iterable_with_truncation_returns_an_empty_pipe():
assert tuple(_123_pype().interleave(_empty_pype(), trunc=True)) == ()
def test_can_be_iterated_through_concisely():
pipe = _123_pype().it()
assert next(pipe) == _123[0]
assert next(pipe) == _123[1]
assert next(pipe) == _123[2]
def test_transforms_items():
assert tuple(_123_pype().map(px(pow, 2), round)) == (2, 4, 8)
def test_transforms_items_in_parallel():
assert tuple(_123_pype().map(px(pow, 2), workers=2)) == (2, 4, 8)
def test_partitions_items_according_to_predicate_into_a_tuple_of_pipes():
assert tuple(map(tuple, _123_pype().partition(lambda n: n < 2))) == ((2, 3), (1,))
def test_picks_items_property():
Person = namedtuple('Person', ['age'])
pipe = Pype((Person(11), Person(22), Person(33)))
assert tuple(pipe.pick(Person.age)) == (11, 22, 33)
def test_picks_items_elements_at_the_given_key():
pipe = Pype(str(n) for n in _123)
assert tuple(pipe.pick(0)) == ('1', '2', '3')
def test_prints_each_item_using_str():
mock_stdout = Mock(spec_set=sys.stdout)
_a_fun_day_pype().print(file=mock_stdout)
mock_stdout.write.assert_has_calls([call('a'), call('\n'), call('fun'), call('\n'), call('day'), call('\n')])
def test_prints_each_item_as_per_the_given_function():
mock_stdout = Mock(spec_set=sys.stdout)
_123_pype().print(lambda n: f'n:{n}', file=mock_stdout)
mock_stdout.write.assert_has_calls([call('n:1'), call('\n'), call('n:2'), call('\n'), call('n:3'), call('\n')])
def test_reduces_items_to_single_value():
assert _123_pype().reduce(lambda summation, n: summation + n) == sum(_123)
def test_reduces_items_to_single_value_starting_with_a_initial_item():
assert _123_pype().reduce(lambda summation, n: summation + n, init=-1) == sum(_23)
def test_rejects_items_that_fulfill_predicate():
assert tuple(_123_pype().reject(lambda n: n < 2)) == _23
def test_reverses_pipe():
assert tuple(_123_pype().reverse()) == (3, 2, 1)
def test_returns_iterable_items_elements_in_a_roundrobin_fashion():
assert tuple(_a_fun_day_pype().roundrobin()) == ('a', 'f', 'd', 'u', 'a', 'n', 'y')
@with_seed(42)
def test_samples_items_with_current_seed():
assert tuple(_123_pype().sample(2)) == (3, 1)
def test_samples_items_with_given_seed():
assert tuple(_123_pype().sample(2, seed_=42)) == (3, 1)
def test_selects_items_that_fulfill_predicate():
assert tuple(_123_pype().select(lambda n: n < 2)) == (1,)
@with_seed(42)
def test_shuffles_items_with_current_seed():
assert tuple(_123_pype().shuffle()) == (2, 1, 3)
def test_shuffles_items_with_given_seed():
assert tuple(_123_pype().shuffle(seed_=42)) == (2, 1, 3)
def test_returns_the_size_of_the_pipe():
assert _123_pype().size() == len(_123)
def test_produces_a_slice_of_the_pipe():
assert tuple(_123_pype().slice(1, 2)) == (2,)
def test_slicing_with_end_larger_than_the_size_is_the_same_as_slicing_with_end_equal_to_the_size():
assert tuple(_123_pype().slice(1, 3)) == tuple(_123_pype().slice(1, 4))
def test_slicing_with_start_larger_than_the_size_returns_an_empty_pipe():
assert tuple(_123_pype().slice(6, 7)) == ()
def test_sorts_items():
assert tuple(_a_fun_day_pype().sort()) == ('a', 'day', 'fun')
def test_sorts_items_in_reverse_order():
assert tuple(_a_fun_day_pype().sort(rev=True)) == ('fun', 'day', 'a')
def test_sorts_items_with_key():
assert tuple(_123_pype().sort(lambda n: -n)) == (3, 2, 1)
def test_splits_pipeline_after_predicate_is_true():
assert tuple(map(tuple, _123_pype().split(lambda n: n == 2))) == ((1, 2), (3,))
def test_splits_pipeline_before_predicate_is_true():
assert tuple(map(tuple, _123_pype().split(lambda n: n == 2, mode='before'))) == ((1,), (2, 3,))
def test_splits_pipeline_before_and_after_predicate_is_true_leaving_true_items_out():
assert tuple(map(tuple, _123_pype().split(lambda n: n == 2, mode='at'))) == ((1,), (3,))
def test_produces_the_first_n_items():
assert tuple(_123_pype().take(1)) == (1,)
def test_produces_empty_pipe_when_asked_for_first_0_items():
assert tuple(_123_pype().take(0)) == ()
def test_asking_for_more_first_items_than_the_size_is_the_same_as_asking_for_as_many_first_items_as_the_size():
assert tuple(_123_pype().take(10)) == tuple(_123_pype().take(3))
def test_produces_the_last_n_items():
assert tuple(_123_pype().take(-2)) == _23
def test_asking_for_more_last_items_than_the_size_is_the_same_as_asking_for_as_many_last_items_as_the_size():
assert tuple(_123_pype().take(-10)) == tuple(_123_pype().take(-3))
def test_selects_items_until_condition_is_true():
assert tuple(_123_pype().take_while(lambda n: n < 3)) == (1, 2)
def test_produces_multiple_copies_of_itself():
copy1, copy2, copy3 = _123_pype().tee(3)
assert (tuple(copy1), tuple(copy2), tuple(copy3)) == (_123, _123, _123)
def test_applies_function_to_itself():
assert _123_pype().to(tuple) == _123
def test_applies_several_functions_to_itself():
assert _123_pype().to(tuple, pype, Pype.size) == len(_123)
def test_lazily_writes_items_to_file(tmpdir):
target = join(tmpdir, '123.txt')
assert tuple(_123_pype().to_file(target, now=False)) == _123
with open(target) as target:
assert target.readlines() == ['1\n', '2\n', '3\n']
def test_eagerly_writes_items_to_file(tmpdir):
target = join(tmpdir, '123.txt')
pipe = _123_pype().to_file(target)
with open(target) as target:
assert target.readlines() == ['1\n', '2\n', '3\n']
assert tuple(pipe) == ()
def test_writes_items_to_file_without_line_terminator(tmpdir):
target = join(tmpdir, '123.txt')
_123_pype().map(str).to_file(target, eol=False, now=True)
with open(target) as target:
assert target.readlines() == ['123']
def test_writes_pairs_to_json_file_as_object(tmpdir):
target = join(tmpdir, 'object.json')
Pype([('a', 1), ('fun', 2), ('day', 3)]).to_json(target)
with open(target) as file:
assert json.load(file) == {'a': 1, 'fun': 2, 'day': 3}
def test_writes_pairs_to_json_file_as_list(tmpdir):
target = join(tmpdir, 'list.json')
Pype([('a', 1), ('fun', 2), ('day', 3)]).to_json(target, as_dict=False)
with open(target) as file:
assert json.load(file) == [['a', 1], ['fun', 2], ['day', 3]]
def test_writes_items_to_json_file_as_list(tmpdir):
target = join(tmpdir, 'list.json')
Pype(['a', 'fun', 'day']).to_json(target, as_dict=False)
with open(target) as file:
assert json.load(file) == ['a', 'fun', 'day']
def test_finds_top_items():
assert tuple(_123_pype().top(1)) == (3,)
def test_finds_top_items_with_key():
assert tuple(_123_pype().top(1, neg)) == (1,)
def test_produces_unique_items():
assert tuple(_aba_pype().uniq()) == _ab
def test_creates_multiple_pipes_from_iterable_items_own_items():
pairs = Pype(((1, -1), (2, -2), (3, -3)))
lefts = 1, 2, 3
rights = -1, -2, -3
assert tuple(map(tuple, pairs.unzip())) == (lefts, rights)
def test_sliding_window_of_size_0_returns_a_pipe_with_a_single_empty_window(): # FIXME SHOULD THIS BE ALLOWED?
assert tuple(map(tuple, _123_pype().window(0))) == ((),)
def test_produces_windows_slid_over_items():
assert tuple(map(tuple, _123_pype().window(2))) == ((1, 2), (2, 3))
def test_produces_windows_slid_over_items_with_given_shift_and_padding_value():
assert tuple(map(tuple, _123_pype().window(2, shift=2, pad='filler'))) == ((1, 2), (3, 'filler'))
def test_zipping_with_a_pipe_of_the_same_size_returns_a_pipe_of_the_same_size_with_paired_items():
assert tuple(_123_pype().zip(_654_pype())) == ((1, 6), (2, 5), (3, 4))
def test_zipping_with_a_pipe_of_different_size_returns_a_pipe_the_size_of_the_longer_one_with_missing_items_padded():
assert tuple(_123_pype().zip(Pype('funny'), trunc=False, pad=4)) \
== ((1, 'f'), (2, 'u'), (3, 'n'), (4, 'n'), (4, 'y'))
def test_self_zipping_when_items_have_the_same_size_returns_pipe_with_the_paired_items_elements():
assert tuple(_aAfunFUNdayDAY_pype().zip()) == (('a', 'fun', 'day'), ('A', 'FUN', 'DAY'))
def test_self_zipping_with_different_sized_items_gives_pipe_with_items_the_size_of_the_longest_one_with_padding():
assert tuple(_a_fun_day_pype().zip(trunc=False, pad='?')) == (('a', 'f', 'd'), ('?', 'u', 'a'), ('?', 'n', 'y'))
def test_self_zipping_with_a_function_pairs_items_with_output_of_said_function():
assert tuple(_a_fun_day_pype().zip_with(len)) == (('a', 1), ('fun', 3), ('day', 3))
``` |
{
"source": "joselluis7/virtual-store",
"score": 2
} |
#### File: virtual-store/app/models.py
```python
from datetime import datetime
from enum import unique
from operator import index
from sqlalchemy.orm import backref
from app.enums.EStatus import EStatus
from app.extension import db
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), nullable=False, unique=True, index=True)
password = db.Column(db.String(255), nullable=False)
item = db.relationship("Item", backref="user", uselist=True)
profile = db.relationship("Profile", backref="user", uselist=False)
def __repr__(self):
return self.email
class Profile(db.Model):
__tablename__ = "profiles"
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
document_id = db.Column(db.String(20), nullable=False, unique=True, index=True)
phone = db.Column(db.String(20), nullable=False, unique=True, index=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
def __repr__(self):
return self.first_name + self.last_name
class Product(db.Model):
__tablename__ = "products"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), nullable=False)
slug = db.Column(db.String(150), nullable=False, unique=True, index=True)
price = db.Column(db.Float, nullable=False)
image = db.Column(db.LargeBinary, nullable=False)
quantity = db.Column(db.Integer, nullable=False)
description = db.Column(db.Text, nullable=False)
category = db.Column(db.Integer, db.ForeignKey("categories.id"))
created_at = db.Column(db.DateTime, default=datetime.now())
item = db.relationship("Item", backref="product", uselist=True)
def __repr__(self):
return self.name
class Category(db.Model):
__tablename__ = "categories"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), nullable=False)
slug = db.Column(db.String(150), nullable=False, unique=True, index=True)
products = db.relationship("Product", backref="categories", uselist=True)
def __repr__(self):
return self.name
class Order(db.Model):
__tablename__ = "orders"
id = db.Column(db.Integer, primary_key=True)
order_number = db.Column(db.String(20), unique=True, nullable=False)
status = db.Column(db.String(30), default=EStatus.CREATED.value, nullable=False)
item = db.relationship("Item", backref="order", uselist=True)
created_at = db.Column(db.DateTime, default=datetime.now())
def __repr__(self):
return f"order number: {self.order_number}"
class Item(db.Model):
__tablename__ = "items"
id = db.Column(db.Integer, primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey("products.id"), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
order_id = db.Column(db.Integer, db.ForeignKey("orders.id"), nullable=False)
quantity = db.Column(db.Integer, nullable=False)
price = db.Column(db.Float, nullable=False)
def __repr__(self):
return (
f"order number: ${self.order_number} quantity: {self.quantity} user: {self.user.profile.first_name} {self.user.profile.last_name}"
)
```
#### File: app/resources/auth.py
```python
import logging
import secrets
from base64 import b64decode
from datetime import timedelta
from flask import jsonify, request
from flask_jwt_extended import create_access_token
from flask_restful import Resource, reqparse
from werkzeug.security import check_password_hash, generate_password_hash
from app.extension import db
from app.models import User
from app.services.mail import send_mail
class Login(Resource):
def get(self):
if not request.headers.get("Authorization"):
return {"error": "authorization not found"}, 400
basic, code = request.headers["Authorization"].split(" ")
print(code)
if not basic.lower() == "basic":
return {"error": "bad authorization"}, 400
email, password = b64decode(code).decode().split(":")
print("email", email)
print("password", password)
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
return {
"error": "authentication fails! user or password are incorrect"
}, 400
token = create_access_token(
{"id": user.id}, expires_delta=timedelta(minutes=50)
)
return {"encoded_token": token}
class Signup(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("email", required=True, help="email field is required")
parser.add_argument(
"password", required=True, help="password field is required"
)
args = parser.parse_args()
user = User.query.filter_by(email=args.email).first()
if not user:
user = User(
email=args.email, password=generate_password_hash(args.password)
)
db.session.add(user)
print(f"{user.id, user.password, user.email}")
try:
db.session.commit()
send_mail(
"virtual store",
"<EMAIL>",
"welcome",
email=user.email,
)
except Exception as e:
db.session.rollback()
logging.critical(str(e))
return {"error": "something went wrong"}, 500
return {"message": "user added successfully"}, 200
return {"mesage": "user already exists"}, 201
class ForgotPassword(Resource):
def post(self):
parser = reqparse.RequestParser(trim=True)
parser.add_argument("email", required=True)
args = parser.parse_args()
user = User.query.filter_by(email=args.email).first()
if not user:
return {"error": "user does not exists"}, 400
generate_password = secrets.token_hex(4)
user.password = <PASSWORD>_password_hash(generate_password)
db.session.add(user)
db.session.commit()
send_mail(
"Account Recovery",
"<EMAIL>",
"forgot-password",
generate_password=<PASSWORD>,
)
return {"message": "Email successfully sended"}
```
#### File: app/resources/categories.py
```python
import logging
from flask_restful import Resource, marshal_with, reqparse
from app.response_template import categories_fields
from app.models import Category
from app.extension import db
class Create(Resource):
def post(self):
parser = reqparse.RequestParser(trim=True)
parser.add_argument("name", required=True, help="required field")
parser.add_argument("slug", required=True, help="required field")
args = parser.parse_args()
print("FFFF: ", args.name)
category = Category.query.filter_by(slug=args.slug).first()
print(category)
if not category:
category = Category(name=args.name, slug=args.slug)
db.session.add(category)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
logging.critical(str(e))
return {"error": "something went wrong"}, 500
logging.info(f"Category {args.name} created successfully")
return {"message": "category added successfully "}, 200
return {"mesage": "category already exists"}, 201
class ListCategory(Resource):
@marshal_with(categories_fields, envelope="categories")
def get(self):
category = Category.query.all()
return category
```
#### File: app/resources/order.py
```python
import logging
from datetime import datetime
from flask_jwt_extended import get_jwt_identity, jwt_required
from flask_restful import Resource, marshal, reqparse
from app.extension import db
from app.models import Item, Order, Product
from app.response_template import order_fields
class Create(Resource):
@jwt_required
def post(self):
current_user = get_jwt_identity()
parser = reqparse.RequestParser()
parser.add_argument(
"product_id", type=int, required=True, help="required field"
)
parser.add_argument("quantity", type=int, required=True, help="required field")
args = parser.parse_args()
product = Product.query.get(args.product_id)
print(f"ID {product.id} NAME: {product.name} USER: {current_user['id']} EPERA ")
if not product:
return {"error": "product does not exists"}, 400
if args.quantity > product.quantity:
return {"error": "invalid quantity"}, 400
try:
order = Order()
# remove 4 digits from miliseconds
order.order_number = datetime.utcnow().strftime("%Y%m%d%H%M%S%f")[:-4]
db.session.add(order)
db.session.commit()
item = Item()
item.order_id = order.id
item.product_id = product.id
item.user_id = current_user["id"]
item.quantity = args.quantity
item.price = product.price * item.quantity
db.session.add(item)
db.session.commit()
except Exception as e:
logging.critical(str(e))
db.session.rollback()
return {"error": "unable to create order"}, 500
return marshal(order, order_fields, "order")
class OrderGet(Resource):
def get(self, number):
order = Order.query.filter_by(order_number=number).first()
return marshal(order, order_fields, "order")
class Pay(Resource):
pass
class Notification(Resource):
pass
``` |
{
"source": "joseloc300/AutoParSelector",
"score": 3
} |
#### File: AutoParSelector/python/tf_models.py
```python
import tensorflow as tf
import numpy as np
def run_tf_regression(x_train, x_test, y_train, y_test):
print("\ntensorflow regression")
# print(tf.__version__)
tf.keras.backend.set_floatx('float64')
tf_x_train = np.asarray(x_train[:])
tf_y_train = np.asarray(y_train[:])
tf_x_test = np.asarray(x_test[:])
tf_y_test = np.asarray(y_test[:])
tf_x_train = tf.keras.utils.normalize(tf_x_train, axis=1)
tf_x_test = tf.keras.utils.normalize(tf_x_test, axis=1)
model = tf.keras.Sequential([
# tf.keras.layers.Dense(6200, activation='relu', kernel_initializer='normal', input_shape=tf_x_test[0].shape),
tf.keras.layers.Dense(62, activation='relu', kernel_initializer='normal', input_shape=tf_x_test[0].shape),
tf.keras.layers.Dense(62, activation='relu', kernel_initializer='normal'),
tf.keras.layers.Dense(62, activation='relu', kernel_initializer='normal'),
tf.keras.layers.Dense(62, activation='relu', kernel_initializer='normal'),
tf.keras.layers.Dense(62, activation='relu', kernel_initializer='normal'),
tf.keras.layers.Dense(62, activation='relu', kernel_initializer='normal'),
tf.keras.layers.Dense(1, activation='linear', kernel_initializer='normal')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer='adam',
metrics=['mae', 'mse'])
# model.fit(tf_x_train, tf_y_train, epochs=500, batch_size=8, validation_data=(tf_x_test, tf_y_test))
model.fit(tf_x_train, tf_y_train, epochs=10, batch_size=1)
model.evaluate(tf_x_test, tf_y_test, verbose=1)
def run_tf_classification(x_train, x_test, y_train, y_test):
print("\ntensorflow classification")
# print(tf.__version__)
tf.keras.backend.set_floatx('float64')
tf_x_train = np.asarray(x_train[:])
tf_y_train = np.asarray(y_train[:])
tf_x_test = np.asarray(x_test[:])
tf_y_test = np.asarray(y_test[:])
tf_x_train = tf.keras.utils.normalize(tf_x_train, axis=1)
tf_x_test = tf.keras.utils.normalize(tf_x_test, axis=1)
model = tf.keras.Sequential([
# tf.keras.layers.Dense(6200, activation='relu', kernel_initializer='normal', input_shape=tf_x_test[0].shape),
tf.keras.layers.Dense(200, activation='relu', kernel_initializer='normal', input_shape=tf_x_test[0].shape),
tf.keras.layers.Dense(200, activation='relu', kernel_initializer='normal'),
tf.keras.layers.Dense(1, activation='linear', kernel_initializer='normal')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer='adam',
metrics=['mae', 'mse'])
# model.fit(tf_x_train, tf_y_train, epochs=500, batch_size=8, validation_data=(tf_x_test, tf_y_test))
model.fit(tf_x_train, tf_y_train, epochs=10, batch_size=1)
model.evaluate(tf_x_test, tf_y_test, verbose=1)
``` |
{
"source": "jose-log/playlistCreator",
"score": 3
} |
#### File: jose-log/playlistCreator/database.py
```python
import sqlite3
import youtube_dl
import json
import re
import os
import spotify
#******************************************************************************
# Table structure:
# 1. The whole title text of the video
# - id --> primary key
# - title
# - length
# - youtubeID
# - id_track --> foreing key
# 2. Track names
# - id --> primary key
# - name
# - spotifyID
# - id_artist --> foreing key
# 3. Artists
# - id --> primary key
# - name
# - spotifyID
#
class db(object):
name = None
def __init__(self, name):
self.name = name
if os.path.exists(self.name) is True:
print(' - Opening database file \"' + self.name + '\"')
else:
print(' - Creating database file \"' + self.name + '\"')
conn = sqlite3.connect(self.name) # Connect to the database. If non-existent, create it.
cur = conn.cursor() # database handler object
# Video Names table
x = cur.execute('''
CREATE TABLE IF NOT EXISTS videos (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
length INTEGER,
youtubeID TEXT UNIQUE,
id_tracks INTEGER )''')
# Tracks table
cur.execute('''
CREATE TABLE IF NOT EXISTS tracks (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE,
spotifyID TEXT UNIQUE,
id_artists INTEGER )''')
# Artists table
cur.execute('''
CREATE TABLE IF NOT EXISTS artists (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE,
spotifyID TEXT UNIQUE )''')
conn.commit() # Save changes
conn.close() # Close database
#**************************************************************************
def insert_single_video(self, title, duration, youtubeID):
conn = sqlite3.connect(self.name) # Connect to the database.
cur = conn.cursor() # database handler object
new_entries = []
try:
cur.execute('INSERT OR IGNORE INTO videos (title, length, youtubeID) VALUES (?, ?, ?)',(title, duration, youtubeID))
new_entries.append(youtubeID)
except:
print(' - WARNING: Already exists: ' + title + ' | ' + youtubeID)
conn.commit() # Save changes
conn.close() # Close database
return new_entries
#**************************************************************************
def insert_multiple_videos(self, videos):
conn = sqlite3.connect(self.name) # Connect to the database.
cur = conn.cursor() # database handler object
new_entries = []
for i in videos:
try:
cur.execute('''
INSERT OR FAIL INTO videos (title, length, youtubeID)
VALUES (?, ?, ?)''',(i['title'], i['duration'], i['youtubeID'])
)
new_entries.append(i['youtubeID'])
except:
print(' - MESSAGE: Already exists: ' + i['title'] + ' | ' + i['youtubeID'])
conn.commit() # Save changes
conn.close() # Close database
# returns list of YoutubeIDs newly added to Database
return new_entries
#**************************************************************************
def filter_tracks(self, new_videos):
print(' - Filtering Songs Using YoutubeDL')
new_track_id = []
if len(new_videos) is 0:
print(' - MESSAGE: No videos to search for')
return new_track_id
conn = sqlite3.connect(self.name) # Connect to the database.
cur = conn.cursor() # database handler object
for video_id in new_videos:
youtube_url = 'https://www.youtube.com/watch?v={}'.format(video_id)
try:
# use youtube_dl to collect the song name & artist name
info = youtube_dl.YoutubeDL({}).extract_info(youtube_url, download=False)
track_name = info.get('track', None)
artist_name = info.get('artist', None)
except:
print('WARNING: Video not available. Youtube ID:', video_id)
track_name = None
artist_name = None
# save all important info and skip any missing song and artist
if track_name is not None and artist_name is not None:
print(' > found: Artist: ' + artist_name + ', Song: ' + track_name)
try:
cur.execute('INSERT OR IGNORE INTO artists (name) VALUES (?)', (artist_name,))
cur.execute('SELECT id FROM artists WHERE name = ?', (artist_name,))
artist_id = cur.fetchone()
cur.execute('INSERT OR IGNORE INTO tracks (name, id_artists) VALUES (?, ?)', (track_name, artist_id[0]))
cur.execute('SELECT id FROM tracks WHERE name = ?', (track_name,))
track_id = cur.fetchone()
cur.execute('UPDATE videos SET id_tracks = ? WHERE youtubeID = ?', (track_id[0], video_id))
# Keep track of new tracks added
new_track_id.append(track_id[0])
except:
print('ERROR when adding to database: ' + track_name + ', ' + artist_name + 'NOT ADDED')
conn.commit() # Save changes
conn.close() # Close database
# Return the track_id (in database) of the newly added tracks
return new_track_id
#**************************************************************************
def filter_single_track(self, youtubeID):
if youtubeID is None:
return
conn = sqlite3.connect(self.name) # Connect to the database.
cur = conn.cursor() # database handler object
cur.execute('SELECT youtubeID FROM videos WHERE youtubeID = ?', (youtubeID,))
item = cur.fetchone() # returns tuples
item = item[0]
youtube_url = 'https://www.youtube.com/watch?v={}'.format(item)
try:
# use youtube_dl to collect the song name & artist name
info = youtube_dl.YoutubeDL({}).extract_info(youtube_url, download=False)
track_name = info['track']
artist_name = info['artist']
except:
print('ERROR: Video not available. Youtube ID:', item)
track_name = None
artist_name = None
if track_name is not None and artist_name is not None:
if (isinstance(track_name, str)):
print(' - Track Name:', track_name)
else:
print(' - WARNING: Track name not a string.')
print(track_name)
if (isinstance(artist_name, str)):
print(' - Artist Name:', artist_name)
else:
print(' - WARNING: Artist name not a string.')
print(artist_name)
else:
print(' - SORRY, no track & artist found')
conn.commit() # Save changes
conn.close() # Close database
#**************************************************************************
def search_spotify_catalog(self, new_tracks):
if len(new_tracks) is 0:
print(' - MESSAGE: Nothing to search for')
return
conn = sqlite3.connect(self.name) # Connect to the database.
cur = conn.cursor() # database handler object
cur.execute('SELECT tracks.id, tracks.name, artists.name FROM tracks JOIN artists ON tracks.id_artists = artists.id')
items = cur.fetchall()
print('\n - Total # of NEW Tracks: ' + str(len(new_tracks)))
print(' - Searching in Spotify Catalog')
for i in items:
track_id = i[0]
track_name = i[1]
artist_name = i[2]
if track_id in new_tracks:
print(' > Search: ' + track_name + ', ' + artist_name)
uri = spotify.search_catalog(track_name, artist_name)
if uri is not None:
cur.execute('UPDATE tracks SET spotifyID = ? WHERE id = ?', (uri, track_id))
else:
print('WARNING: not found')
conn.commit() # Save changes
conn.close() # Close database
#**************************************************************************
def append_tracks_to_playlist(self, playlist_id, new_tracks):
conn = sqlite3.connect(self.name) # Connect to the database.
cur = conn.cursor() # database handler object
cur.execute('SELECT tracks.id, tracks.spotifyID FROM tracks WHERE spotifyID IS NOT NULL')
items = cur.fetchall()
conn.commit() # Save changes
conn.close() # Close database
tracks = []
for i in items:
# choose only the newly added ones
if i[0] in new_tracks:
tracks.append(i[1])
if len(tracks) is not 0:
spotify.insert_into_playlist(playlist_id, tracks)
else:
print(' - WARNING: No tracks to be added')
#******************************************************************************
def store_in_database(files, db_obj):
n = 0
new_videos = []
# Extract info from files
for f in files:
if os.path.exists(f):
fh = open(f)
js = json.load(fh)
fh.close()
else:
print('ERROR. File ' + f + ' not found')
exit()
try:
print('\n' + f + ': {} items\n'.format(len(js['items'])))
except:
print('ERROR. JSON file not properly formatted')
exit()
n += len(js['items'])
videos = []
item = {}
c = 1
try:
for i in js['items']:
item['title'] = i['snippet']['title']
item['duration'] = __get_time(i['contentDetails']['duration'])
item['youtubeID'] = i['id']
videos.append(item.copy())
print(' > Video {}: {} | id = {}'.format(c, i['snippet']['title'], i['id']))
c += 1
except:
print('ERROR. JSON file not properly formatted')
exit()
# Concatenate videos' IDs newly added to database
new_videos.extend(db_obj.insert_multiple_videos(videos))
print('\n >> TOTAL ITEMS: ' + str(n) + '\n')
return new_videos
#******************************************************************************
def __get_time(time):
# ISO8601 Time format conversion
RE_SEC = r'^P.*[TM](\d+)S$'
RE_MIN = r'^P.*[TH](\d+)M.*S$'
RE_H = r'^P.*[T](\d+)H.*M.*S$'
RE_DAY = r'^P(\d+)DT.*H.*M.*S$'
seconds = re.search(RE_SEC, time)
minutes = re.search(RE_MIN, time)
hours = re.search(RE_H, time)
days = re.search(RE_DAY, time)
s = 0
if seconds is not None: s += int(seconds[1])
if minutes is not None: s += int(minutes[1]) * 60
if hours is not None: s += int(hours[1]) * 3600
if days is not None: s += int(days[1]) * 86400
return s
```
#### File: jose-log/playlistCreator/spotifyOAuth.py
```python
import requests
import urllib.request
import urllib.parse
# Browser and server modules
import webbrowser
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
# Utility modules
import json
import base64
import time
import random
# Secrets modules
from secrets_spotify import client_id
from secrets_spotify import client_secret
# Authorization endpoints:
AUTH_URL = 'https://accounts.spotify.com/authorize'
TOKEN_URL = 'https://accounts.spotify.com/api/token'
REDIRECT_URI = 'http://localhost:9090'
CACHE_PATH = './.cache-token'
SCOPES = 'user-read-private playlist-read-private playlist-modify-public playlist-modify-private'
#******************************************************************************
# RequestHandler
# Extended class from BaseHTTPRequestHandler used to handle the spotify
# authorization response and get the authorization code.
#
# https://docs.python.org/3/library/http.server.html#http.server.BaseHTTPRequestHandler.handle_one_request
#
class RequestHandler(BaseHTTPRequestHandler):
# GET method handler:
def do_GET(self):
query_s = urllib.parse.urlparse(self.path).query
form = dict(urllib.parse.parse_qsl(query_s))
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
if "code" in form:
self.server.auth_code = form["code"]
self.server.state = form["state"]
self.server.error = None
status = "successful"
elif "error" in form:
self.server.error = form["error"]
self.server.auth_code = None
status = "failed ({})".format(form["error"])
else:
self._write("<html><body><h1>Invalid request</h1></body></html>")
return
self._write(""" <html>
<body>
<h1>Authentication status: {}</h1>
This window can be closed.
<script>
window.close()
</script>
</body>
</html>""".format(status))
def _write(self, text):
return self.wfile.write(text.encode("utf-8"))
#******************************************************************************
# Request authorization to access data
# The requests module could be used to send a GET request to the authoriza-
# tion server, nevertheless, user authentication is required, thus, the
# web browser must be used to ask the user for login and authorization
#
def get_authorization_code():
code = None
redirect_port = 9090
print(' > Creating Local Server in port:', str(redirect_port))
server = start_local_http_server(redirect_port)
state = generate_random_string(20)
print(' - Random state string: ' + state)
url = build_authorize_url(state)
print(' > OAuth Authorization URL:', url)
try:
webbrowser.open(url)
print(' - Authentication URL opened in your browser:', AUTH_URL)
except webbrowser.Error:
print(' - Please navigate here:', url)
print(' >> Handling request')
server.handle_request() # wait for authorization endpoint response
if server.auth_code is not None:
code = server.auth_code
if server.state.strip() != state:
print('ERROR: response state don\'t match')
print(server.state)
elif server.error is not None:
print(' - Received error from OAuth server: {}'.format(server.error))
exit()
else:
print(' - Server listening on localhost has not been accessed')
exit()
return code
#******************************************************************************
# Request refresh and access tokens
# This time, no user interaction through the browser is needed, thus, the
# POST request is handled using Requests module
#
def request_access_token(code):
token = None
payload = {
'redirect_uri': REDIRECT_URI,
'code': code,
'grant_type': 'authorization_code',
}
headers = make_authorization_headers()
response = requests.post(TOKEN_URL, data=payload, headers=headers)
if response.status_code != 200:
print('ERROR. Token request failed')
print(response)
exit()
token_info = response.json()
# Compute time value for token expiration date
token_info['expires_at'] = int(time.time()) + token_info.get('expires_in')
save_token_info(token_info)
return token_info.get('access_token')
#******************************************************************************
def generate_random_string(length):
rand = ''
universe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
for i in range(length):
rand += universe[random.randint(0, len(universe) - 1)]
return rand
#******************************************************************************
def start_local_http_server(port, handler=RequestHandler):
server = HTTPServer(("127.0.0.1", port), handler)
server.allow_reuse_address = True
server.auth_code = None
server.error = None
return server
#******************************************************************************
def build_authorize_url(state):
# Gets the URL to use to authorize this app
payload = {
'response_type':'code',
'client_id':client_id,
'state':state,
'scope':SCOPES,
'redirect_uri':REDIRECT_URI,
}
urlparams = urllib.parse.urlencode(payload)
return '{}?{}'.format(AUTH_URL, urlparams)
#******************************************************************************
def make_authorization_headers():
# This method encodes the header, nevertheless the API allows to send these
# parameters as part of the POST request body.
auth_header = (client_id + ':' + client_secret).encode('ascii')
auth_header = base64.b64encode(auth_header)
return {'Authorization': 'Basic {}'.format(auth_header.decode('ascii'))}
#******************************************************************************
def save_token_info(token_info):
try:
f = open(CACHE_PATH, 'w')
f.write(json.dumps(token_info))
f.close()
return True
except:
print('Couldn\'t write token to cache at: {}'.format(CACHE_PATH))
return False
#******************************************************************************
def get_cached_token():
token_info = None
print(' - Extracting Spotify cached token')
try:
f = open(CACHE_PATH)
token_info_string = f.read()
f.close()
token_info = json.loads(token_info_string)
except:
print('ERROR. Opening {} failed'.format(CACHE_PATH))
return token_info
#******************************************************************************
def is_token_expired(token_info):
now = int(time.time())
# if expiration time is less than a minute
return token_info['expires_at'] - now < 60
#******************************************************************************
def refresh_access_token(refresh_token):
if refresh_token is None:
print(' - WARNING: Invalid refresh token')
return None
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
}
headers = make_authorization_headers()
response = requests.post(TOKEN_URL, data=payload, headers=headers)
if response.status_code != 200:
print('ERROR. Refresh token request failed')
exit()
token_info = response.json()
# Compute time value for token expiration date
token_info['expires_at'] = int(time.time()) + token_info.get('expires_in')
if 'refresh_token' not in token_info:
token_info['refresh_token'] = refresh_token
save_token_info(token_info)
return token_info.get('access_token')
#******************************************************************************
def request_valid_token():
token = None
token_info = get_cached_token()
if token_info is None:
print('WARNING. No cached token was found')
else:
if is_token_expired(token_info):
print(' - Cached token expired. Refreshing access token')
token = refresh_access_token(token_info.get('refresh_token'))
else:
print(' - Cached token VALID')
token = token_info.get('access_token')
return token
```
#### File: playlistCreator/spotify/spotify_scamToken.py
```python
import requests
import json
from spotify_secrets import spotify_oauth_token as oauth_token
from spotify_secrets import spotify_user_id as usr_id
from spotify_secrets import client_app_id as app_id
from spotify_secrets import client_app_secret as app_secret
serviceurl = 'https://api.spotify.com/v1'
authurl = 'https://accounts.spotify.com/authorize'
###############################################################################
# POST REQUESTS
###############################################################################
def create_new_playlist(pl_name):
endpoint = 'https://api.spotify.com/v1/users/{}/playlists'.format(usr_id)
parameters = {
"name": pl_name,
"description": "Liked Youtube Videos",
"public": True
}
head = {
'Accept':'application/json',
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(oauth_token)
}
payload = json.dumps(parameters)
print('Creating new playlist...')
response = requests.post(endpoint, data=payload, headers=head)
response_json = response.json()
try:
# playlist id
out = response_json['id']
print('>>New playlist \"' + pl_name + '\" created:')
except:
print(json.dumps(response_json, sort_keys=True, indent=4))
print('ERROR:', response_json['error']['status'])
print('message:', response_json['error']['message'])
out = response_json['error']['status']
return out
def insert_into_playlist(pl_id, song_uri):
endpoint = 'https://api.spotify.com/v1/playlists/{}/tracks'.format(pl_id)
parameters = {
'uris':['{}'.format(song_uri)]
}
head = {
'Accept':'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(oauth_token)
}
payload = json.dumps(parameters)
print('Adding item to playlist...')
response = requests.post(endpoint, data=payload, headers=head)
response_json = response.json()
try:
# playlist id
out = response_json['snapshot_id']
print('>>Track \"' + song_uri + '\" added to \"' + pl_id + '\" playlist')
except:
print(json.dumps(response_json, sort_keys=True, indent=4))
print('ERROR:', response_json['error']['status'])
print('message:', response_json['error']['message'])
out = response_json['error']['status']
return 0
###############################################################################
# GET REQUESTS
###############################################################################
def retrieve_user_playlists():
endpoint = 'https://api.spotify.com/v1/me/playlists'
parameters = {
'limit':'50',
'offset':'0'
}
head = {
'Accept':'application/json',
'Content-Type':'application/json',
'Authorization':'Bearer {}'.format(oauth_token)
}
print('Retrieving user playlists...')
response = requests.get(endpoint, params=parameters, headers=head)
response_json = response.json()
try:
print('>> Playlists Count: ', response_json['total'])
except:
# Dump JSON response to screen
print(json.dumps(response_json, sort_keys=True, indent=4))
print('ERROR:', response_json['error']['status'])
print('message:', response_json['error']['message'])
exit()
c = 0
pl = {}
for i in response_json['items']:
# Store data into the data base
print(' > Playlist {}: {} | Songs = {} | ID: {}'.format(c + 1, i['name'], i['tracks']['total'], i['id']))
pl[i['name']] = i['id']
c += 1
# Back up json response
fh = open('spotify_pl.json', 'w')
json.dump(response_json, fh, sort_keys=True, indent=4)
fh.close()
# Back up the filtered playlist names
fh = open('spotify_pl.txt', 'w')
for i,j in pl.items():
fh.write(i + ',' + j + '\n')
fh.close()
return pl
def search_catalog(track, artist):
endpoint = 'https://api.spotify.com/v1/search'
parameters = {
'q':'track:{} artist:{}'.format(track, artist),
'type':'track',
'limit':'3'
}
head = {
'Accept':'application/json',
'Content-Type':'application/json',
'Authorization':'Bearer {}'.format(oauth_token)
}
print('Searching in spotify catalog...')
response = requests.get(endpoint, params=parameters, headers=head)
response_json = response.json()
try:
songs = response_json["tracks"]["items"]
# only use the first song
out = songs[0]["uri"]
print('Search FINISHED!')
except:
print(json.dumps(response_json, sort_keys=True, indent=4))
print('ERROR:', response_json['error']['status'])
print('message:', response_json['error']['message'])
out = response_json['error']['status']
return out
###############################################################################
# MISC
###############################################################################
def read_user_playlists():
pl = {}
fh = open('spotify_pl.txt')
for line in fh:
line = line.rstrip('\n')
x = line.split(',')
pl[x[0]] = x[1]
fh.close()
return pl
###############################################################################
# MAIN PROGRAM
###############################################################################
if __name__ == '__main__':
# print(create_playlist())
#j = get_spotify_uri('hallelujah','hillsong')
#print(j)
print('\n\rHello World!\n\r< SPOTIFY API INTERACTION >')
download = None
while download is None:
x = input('Do you want to download your playlists? (Y/n)')
if x in 'yY ':
download = True
break
elif x in 'nN':
download = False
break
elif x in 'qQ':
exit()
else:
print('INVALID!')
if download is True:
pl = retrieve_user_playlists()
else:
pl = read_user_playlists()
new_playlist = 'Youtube Liked Vids'
if new_playlist not in pl.keys():
print('Creating new Youtube playlist')
pl_id = create_new_playlist(new_playlist)
else:
print('Youtube playlist already exists')
pl_id = pl[new_playlist]
track_uri = (search_catalog('Oceans','Hillsong'))
insert_into_playlist(pl_id, track_uri)
```
#### File: playlistCreator/spotify/spotipy_lib.py
```python
import json
# SPOTIPY-related
import spotipy
import spotipy.util as util
from spotipy.oauth2 import SpotifyClientCredentials
# Secrets-related
from spotify_secrets import client_app_id as client_id
from spotify_secrets import client_app_secret as client_secret
from spotify_secrets import spotify_user_id as username
#############################################################
#
# Steps:
# 1. Retrieve all user playlists: current_user_playlists(limit=50, offset=0
# 2. Look for the Youtube like vids playlist
# 3. If non-existent, create it. If existent, get the ID.
# 4. Based on the videos_dump.json files, read them and look for the songs in spotify
# 5. If songs exist, add them to the playlist.
def get_authentication(scope=None):
redirect_uri = 'http://localhost:9090'
print('> Getting OAuth authentication')
token = util.prompt_for_user_token(redirect_uri=redirect_uri, scope=scope)
if token is None:
print('Can\'t get token for', username)
exit()
print(' Authentication successful')
sp = spotipy.Spotify(auth=token)
return sp
def download_user_playlists(sp):
print('> Downloading playlists')
playlists = sp.user_playlists(username)
usr_pl = {}
for playlist in playlists['items']:
print(playlist['name'], playlist['id'])
usr_pl[playlist['name']] = playlist['id']
print(username, 'playlists retrieved')
return usr_pl
def create_new_playlist(playlist):
scope = 'playlist-modify-public'
sp = get_authentication(scope)
sp.user_playlist_create(username, playlist)
def add_tracks_to_playlist(playlist, tracks):
scope = 'playlist-modify-public'
sp = get_authentication(scope)
sp.user_playlist_add_tracks(username, playlist, tracks)
###############################################################################
# M A I N P R O G R A M
###############################################################################
if __name__ == '__main__':
sp = get_authentication()
usr_pl = download_user_playlists(sp)
new_playlist = 'Youtube Liked Vids'
if new_playlist not in usr_pl.keys():
print('WARNING!: Youtube playlist non-existent')
print('> Creating Spotify playlist')
create_new_playlist(new_playlist)
# Downolad again playlists to get New Playlist URI
usr_pl = download_user_playlists(sp)
else:
print('WARNING!: Youtube playlist already exists')
print(' ', new_playlist, 'ID:', usr_pl[new_playlist])
test_array = []
test_array.append(('Hillsong','Another in the fire'))
test_array.append(('<NAME>','B Collision'))
test_array.append(('<NAME>','<NAME>'))
test_array.append(('Un Corazón','la calle'))
test_array.append(('Ecclesia','Excellence'))
track_ids = []
# shows related artists for the given seed artist
for item in test_array:
result = sp.search(q='track:' + item[1] + ' artist:' + item[0], type='track')
try:
name = result['tracks']['items'][0]['name']
uri = result['tracks']['items'][0]['uri']
artist = result['tracks']['items'][0]['artists'][0]['name']
print(name + ' | ' + artist + ' | ' + uri)
track_ids.append(uri)
except:
print("No results retrieved")
print(result)
exit()
print('> Adding songs to playlist')
add_tracks_to_playlist(usr_pl[new_playlist], track_ids)
print(' > FINISHED!')
```
#### File: jose-log/playlistCreator/youtube.py
```python
import json
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
# This file is downloaded from the Youtube API Dashboard, once the application
# is registered and activated
CLIENT_SECRETS_FILE = 'secrets_youtube.json'
# Scopse are the type of permissions that the application needs to access user
# data while interacting with the API
SCOPES = ['https://www.googleapis.com/auth/youtube.readonly']
# API-specific macros
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
# Files' Names
INDEX_FILE = 'videos_index.txt'
VIDEOS_BASENAME = 'videos_dump'
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
def __get_authentincated_service():
print(' - Building the authenticated service...')
# Get OAuth credentials
# Google OAuth entirely handled by the Google Python Client libraries
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)
# Create local server to interact with OAuth server.
# User is required to grant access to the application.
credentials = flow.run_local_server(
host = 'localhost',
port = 8080,
authorization_prompt_message = 'Please visit this URL: \n\r{url}',
success_message = 'The auth flow is complete; you may close this window.',
open_browser = True)
# Build the Service object:
service = googleapiclient.discovery.build(API_SERVICE_NAME, API_VERSION, credentials=credentials)
return service
def __request_service(service, **kwargs):
collection = service.videos() # Extract collection of videos
request = collection.list(**kwargs) # HTTP Request object
return request.execute()
def request_liked_playlist(n_videos):
if os.path.exists(INDEX_FILE) is True:
os.remove(INDEX_FILE)
service = __get_authentincated_service()
# No of response results is limited!, thus, multiple queries must be
# performed in order to get the entire list of videos. Using page pointers
# (available in the json response) the subsequent requests ask for the next
# block of videos to be downloaded
i = 1
files = []
nxtPage = None
while True:
if n_videos is None:
n = 50
elif n_videos >= 50:
n = 50
n_videos -= 50
else:
n = n_videos
n_videos = 0
try:
print(' - Requesting query No {} to Youtube Data API'.format(i))
response = __request_service(
service,
part = 'id,snippet,contentDetails',
myRating = 'like',
maxResults = n,
pageToken = nxtPage,
fields = 'items(id,snippet(title),contentDetails(duration)),pageInfo,nextPageToken'
)
except:
print('ERROR Requesting Youtube Data API.')
print(response)
quit()
# save response to files
outfile = VIDEOS_BASENAME + str(i) + '.json'
fh = open(outfile, 'w')
json.dump(response, fh, sort_keys=True, indent=4)
fh.close()
files.append(outfile)
# index file
fh = open(INDEX_FILE, 'a')
fh.write(outfile + '\n')
fh.close()
if n_videos is None or n_videos > 0:
nxtPage = response.get('nextPageToken', None)
else:
nxtPage = None
if nxtPage is None:
try:
total = response['pageInfo']['totalResults']
except:
print('ERROR. JSON response not properly formatted')
quit()
print(' > Total No of results: {}'.format(total))
print(' > No of request iterations: {}'.format(i))
break
i += 1
return files
def request_youtube_files():
files = []
path = None
while True:
x = input('Enter the index file name: ')
if len(x) is 0:
path = 'videos_index.txt'
if os.path.exists(path) is True:
break
else:
print('WARNING: No files added. Try Again')
elif os.path.exists(x) is False:
print('ERROR. File does NOT exists. Please try again')
else:
path = x
break
fh = open(path)
print(' - Files Added:')
for line in fh:
files.append(line.strip())
print(' * ' + line.strip())
return files
```
#### File: playlistCreator/youtube/youtube_test1.py
```python
import os
import google_auth_oauthlib.flow
import googleapiclient.discovery
import googleapiclient.errors
import json
scopes = ["https://www.googleapis.com/auth/youtube.readonly"]
def main():
# Disable OAuthlib's HTTPS verification when running locally.
# *DO NOT* leave this option enabled in production.
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "0"
# Clarification: https://github.com/singingwolfboy/flask-dance/issues/129
# Flask-Dance is built on top of oauthlib, a Python toolkit that implements
# the OAuth specifications. The specification for OAuth 2 requires that all
# communication must occur over secure HTTPS -- if any communication occurs
# over insecure HTTP, it's vulnerable to attack. As a result, oauthlib raises
# an error if you use insecure HTTP. However, most people don't want to spend
# the time and effort to configure secure HTTPS for doing local testing -- it's
# only a problem for production, not for development.
# As a result, oauthlib allows you to disable this check. If you set the
# OAUTHLIB_INSECURE_TRANSPORT environment variable, it will not check for secure
# HTTPS, and allow the OAuth dance to continue whether or not it is secure.
# Disabling this check is intended only for development, not for production --
# in production, you must configure secure HTTPS to make sure communication happen
# securely.
api_service_name = "youtube"
api_version = "v3"
client_secrets_file = "youtube_oauth.json"
#*******************************************
# Get credentials and create an API client
#*******************************************
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(client_secrets_file, scopes)
credentials = flow.run_local_server(
host='localhost',
port=8080,
authorization_prompt_message='Please visit this URL: {url}',
success_message='The auth flow is complete; you may close this window.',
open_browser=True)
# Build the Service object:
youtube = googleapiclient.discovery.build(api_service_name, api_version, credentials=credentials)
#*******************************************
# Request structure
#*******************************************
# channels(): function for the Channels collection
collection = youtube.channels()
# list(): method of the given collection
request = collection.list( part="id,status,contentDetails",
id="UCR9ay2jZLzQLw_E743NO-tQ") # HTTP Request object
# Execute the request and get a response
response = request.execute()
# Dump JSON response to screen
print(json.dumps(response, sort_keys=True, indent=4))
print('\n\n\n\n\n***********************************')
print('flow:')
print(flow)
print('credentials:')
print(credentials)
print('youtube:')
print(youtube)
print('collection:')
print(collection)
print('request:')
print(request)
print('response:')
print(response)
if __name__ == "__main__":
main()
'''
HTTP Requests: http://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http.HttpRequest-class.html
Accessing the JSON data:
print('Num 5 cent stamps: %d'.format(response['count']))
print('First stamp name: %s'.format(response['items'][0]['name']))
'''
``` |
{
"source": "joselpadronc/CRUD_Persons",
"score": 2
} |
#### File: CRUD_Persons/persons/views.py
```python
import json
# Imports of framework
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
from django.http import HttpRequest, HttpResponse
# Imports of local
from persons.forms import PersonForm
from persons.models import Person
from django.contrib.auth.decorators import login_required
# Create your views here.
def home_page(request):
persons = Person.objects.all()
data = {
'persons':persons
}
template_name = 'index.html'
return render(request, template_name, data)
def list_persons_view(request):
persons = Person.objects.all()
list_persons = []
for person in persons:
data_person = {}
data_person['id'] = person.id
data_person['name'] = person.name
data_person['id_card'] = person.id_card
data_person['nationality'] = person.nationality
data_person['age'] = person.age
list_persons.append(data_person)
data = json.dumps(list_persons)
print (type(data))
return HttpResponse(data, 'application/json')
@login_required
def register_person_view(request):
data = {
'form':PersonForm()
}
if request.method == 'POST':
form = PersonForm(request.POST)
if form.is_valid():
form.save()
data ['message'] = "Persona guardada correctamente"
return render(request, 'register.html', data)
@login_required
def edit_person_view(request, id):
person = Person.objects.get(id=id)
data = {
'form':PersonForm(instance=person)
}
if request.method == 'POST':
form = PersonForm(request.POST, instance=person)
if form.is_valid():
form.save()
data['message'] = 'Persona modificada correctamente'
data['form'] = form
return render(request, 'edit.html', data)
@login_required
def delete_person_view(request, id):
person = Person.objects.get(id=id)
person.delete()
return redirect(to='home_page')
``` |
{
"source": "joselrodrigues/TMC-app",
"score": 3
} |
#### File: TMC-app/tmc/forms.py
```python
from django import forms
from tmc.models import Tmc
class TmcForm(forms.ModelForm):
"""Formulario para la TMC
Arguments:
forms {Object}
"""
class Meta:
model = Tmc
fields = ('plazo', 'monto', 'fecha_tmc')
def __init__(self, *args, **kwargs):
super(TmcForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control',
'placeholder': self.fields[field].label
})
self.fields[field].label = ''
```
#### File: TMC-app/tmc/utils.py
```python
def get_tmc(args, **kwargs):
plazo = kwargs.get('plazo')
uf = kwargs.get('monto')
tmc = ''
if plazo < 90:
if uf <= 5000:
tmc = over_tmc('26', args)
else:
tmc = over_tmc('25', args)
elif plazo >= 90:
if uf <= 50:
tmc = over_tmc('45', args)
elif 200 >= uf > 50:
tmc = over_tmc('44', args)
elif 5000 >= uf > 200:
tmc = over_tmc('35', args)
else:
tmc = over_tmc('34', args)
return tmc
def over_tmc(tipo, args):
tmc_values = ''
for tmc in args:
if tmc['Tipo'] == tipo:
tmc_values = tmc
return tmc_values
``` |
{
"source": "joseluan/ifnaroca",
"score": 2
} |
#### File: ifnaroca/lista/views.py
```python
from django.contrib.auth.models import User
from django.shortcuts import render,render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import authenticate, logout, login
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from lista.models import *
def lista(request):
cadastro = False
Nparticipantes = len(Participante.objects.all())
if request.method=='POST':
try:
Participante.objects.get(rg=request.POST.get("RG"))
except Exception, e:
cadastro = True
Participante.objects.create(nome=request.POST.get("nome"),rg=request.POST.get("RG")).save()
Nparticipantes = len(Participante.objects.all())
return render(request,'lista.html',{'cadastro': True,'Nparticipantes':Nparticipantes})
return render(request,'lista.html',{'cadastro': True,'Nparticipantes':Nparticipantes})
else:
return render(request,'lista.html',{'cadastro': cadastro,'Nparticipantes':Nparticipantes})
def index(request):
return render(request,'index.html',{})
``` |
{
"source": "joselucas77/Python",
"score": 3
} |
#### File: intensivao_python/aula01/auto_sistema.py
```python
import pyautogui
import pyperclip
import time
import pandas as pd
def abrir_chrome():
pyautogui.press('win')
pyautogui.write('chrome')
pyautogui.press('enter')
def entrar_driver():
pyautogui.write("https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga")
pyautogui.press('enter')
def baixar_arquivo():
pyautogui.click(x=292, y=267, clicks=2)
time.sleep(3)
pyautogui.click(x=331, y=450, button='right')
time.sleep(3)
pyautogui.click(x=457, y=676)
time.sleep(3)
def abrir_email():
# Abrir o app do email
pyautogui.hotkey('win')
pyautogui.write('email')
pyautogui.press('enter')
def enviar_email():
# Base de dados
tabela = pd.read_excel(r'C:\Users\jose_\OneDrive\Documentos\Estudos\arquivos_pyton\Vendas - Dez.xlsx')
faturamento = tabela['Valor Final'].sum()
quantidade = tabela['Quantidade'].sum()
time.sleep(5)
# Abri a janela de escrever o email
pyautogui.click(x=81, y=106)
time.sleep(5)
# Escreve o destinatário
pyautogui.write('<EMAIL>')
pyautogui.press('enter')
time.sleep(2)
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('tab')
time.sleep(2)
# Escreve o assunto
pyautogui.write("Relatorio de vendas")
time.sleep(2)
pyautogui.press('tab')
time.sleep(2)
# Escrever o corpo do email
texto = f'''
Prezados, bom dia
O faturamento de ontem foi de: R$ {faturamento}
A quantidade de produtos foi de: R$ {quantidade}
Abs
JoseLucas
'''
pyperclip.copy(texto)
pyautogui.hotkey('ctrl', 'v')
time.sleep(2)
# Enviando o email
pyautogui.hotkey('ctrl', 'enter')
time.sleep(2)
pyautogui.POUSE = 2
abrir_chrome()
time.sleep(5)
entrar_driver()
time.sleep(5)
baixar_arquivo()
time.sleep(5)
abrir_email()
time.sleep(5)
enviar_email()
time.sleep(5)
``` |
{
"source": "JoseLucasASilva/backend-train-station",
"score": 2
} |
#### File: backend-train-station/utils/train_requests.py
```python
import requests
from utils.config import OCP_API_KEY
BASE_ENDPOINT = "https://gateway.apiportal.ns.nl/reisinformatie-api/api/v2"
def get_headers():
headers = {
"content-type": "application/json",
"Ocp-Apim-Subscription-Key": OCP_API_KEY
}
return headers
def get_stations(headers=None):
if not headers:
headers = get_headers()
result = requests.get(f"{BASE_ENDPOINT}/stations", headers=headers)
return result
def get_departures(stations_code, headers=None):
if not headers:
headers = get_headers()
result = requests.get(f"{BASE_ENDPOINT}/departures?station={stations_code}", headers=headers)
return result
``` |
{
"source": "joseluis031/Ejercicios-de-Clases-de-POO",
"score": 3
} |
#### File: Ejercicios-de-Clases-de-POO/Clases/puzzle.py
```python
class A:
#A continuación define los métodos de la clase
def z(self):
return self #Aquí devuelve una referencia al objeto de instancia que fue llamado.
def y(self, t):
return len(t)
``` |
{
"source": "joseluis031/Introducci-n-a-la-algor-tmica",
"score": 4
} |
#### File: joseluis031/Introducci-n-a-la-algor-tmica/ej12.py
```python
cuenta = input("¿Quiere abrir una cuenta en el banco?:")
saldo = 0
class banco:
def __init__(self,cuenta,saldo): #defino el constructor
self.cuenta = cuenta
self.saldo = saldo
def respuesta(self):
if self.cuenta == "no":
print("De acuerdo, gracias")
if self.cuenta == "si":
self.saldo = 0
abono = int(input("Introduzca el dinero que desea:"))
self.saldo = abono
print("Tu saldo es:" , self.saldo)
def actividad(self):
act = input("¿Quiere consultar,retirar o abonar dinero?")
if act == "no":
print("De acuerdo, gracias")
if act == "consultar":
print("Su saldo es: ", self.saldo , "€")
if act == "retirar":
retiro = float(input("Dinero a retirar:"))
self.saldo = self.saldo - retiro
print("Su saldo actual es: ",self.saldo)
if act == "abonar":
ingreso = float(input("Dinero a ingresar"))
self.saldo = self.saldo + ingreso
print("Su saldo es: ", self.saldo, "€")
total = banco(cuenta,saldo)
print(total.respuesta())
print(total.actividad())
``` |
{
"source": "joseluis031/Iteracion",
"score": 4
} |
#### File: joseluis031/Iteracion/ejercicio11.py
```python
elegir = input("Elige: euclides o sumas y rectas ")
x= int(input("Introduce el valor del divisor: "))
y= int(input("Introduce el valor del divisor: "))
if elegir == "euclides":
def mcd(x, y):
if y == 0:
return x
return mcd(y, x%y)
final = mcd(x, y)
print(final)
elif elegir == "sumas y rectas":
def mcd(x, y):
if x ==0 :
return y
elif y == 0:
return x
elif x == y:
return x
elif x > y:
return mcd(x-y, y)
return mcd(x, y-x)
if(mcd(x, y)):
final = mcd(x, y)
print(final)
else:
print("No existe m.c.d")
``` |
{
"source": "joseluis031/laberinto",
"score": 4
} |
#### File: joseluis031/laberinto/laberinto.py
```python
laberinto = [] #defino mi lista del laberinto
muro = ((0,1), (0,2), (0,3), (0,4), (1,1), (2,1), (2,3), (3,3), (4,0), (4,1), (4,2), (4,3)) #defino las casillas de los muros
for _ in range (5):
laberinto.append(([' ']*5))
for i in muro:
laberinto[int(i[0])][int(i[1])] = 'X'
laberinto [4] [4] = "Salida"
for i in range (5):
print (laberinto[i]) #Consigo crear el laberinto
def recorrido():
movimientos = []
a = 0 #eje Y
b = 0 #eje X
c = 0 #'contrario' de a
d = 0 #'contrario' de b
while laberinto [a][b] != "Salida": #Genero un bucle que me va a recorrer el laberinto mientras no este en la casilla de salida, es decir, hasta que llegue a la salida, el bucle ira desarrollandose
if d <= a and laberinto[a+1][b] != 'X': #Cuando esas condiciones den como resultado una casilla diferente de 'X', añado a los movimientos "Abajo"
movimientos.append("Abajo")
a += 1
elif c <= b < 4 and laberinto[a][b+1] != 'X': #Cuando esas condiciones den como resultado una casilla diferente de 'X', añado a los movimientos "Derecha"
movimientos.append("Derecha")
b += 1
c = b-1
d = a
elif b < c and laberinto[a][b-1] != 'X': #Cuando esas condiciones den como resultado una casilla diferente de 'X', añado a los movimientos "Izquierda"
movimientos.append("Izquierda")
b -= 1
else:
movimientos.append("Arriba") #Cuando no ocurra ninguna de las condiciones anteriores, añado a los movimientos "Arriba"
a -= 1
return movimientos
print("Los movimientos son:\n{}".format(recorrido())) #Le doy formato a la funcion
``` |
{
"source": "joseluis031/parcial",
"score": 4
} |
#### File: parcial/Clases/ejercicio2.py
```python
class animal:
def __init__(self, tipo):
self.tipo = tipo
class mamifero(animal): #utilizo la herencia multiple
def __init__(self,tipo,mamiferoo):
self.mamiferoo = mamiferoo
animal.__init__(self, tipo)
class oviparo(mamifero,animal):
def __init__(self,tipo,mamiferoo,oviparo):
self.oviparo = oviparo
mamifero.__init__(self, tipo, mamiferoo)
pollo =
gato =
ornitorrinco =
print(pollo.animal)
print(gato.mamifero)
print(ornitorrinco.oviparo)
``` |
{
"source": "joseluis031/Ping-Pong",
"score": 3
} |
#### File: joseluis031/Ping-Pong/pingpong.py
```python
import random
import pygame
from pygame.locals import QUIT
# Constantes para la inicialización de la superficie de dibujo
VENTANA_HORI = 800 # Ancho de la ventana
VENTANA_VERT = 600 # Alto de la ventana
FPS = 60 # Fotogramas por segundo
BLANCO = (255, 255, 255) # Color del fondo de la ventana (RGB)
NEGRO = (0, 0, 0) # Color del texto (RGB)
class PelotaPong:
def __init__(self, fichero_imagen):
# --- Atributos de la Clase ---
# Imagen de la Pelota
self.imagen = pygame.image.load(fichero_imagen).convert_alpha()
# Dimensiones de la Pelota
self.ancho, self.alto = self.imagen.get_size()
# Posición de la Pelota
self.x = VENTANA_HORI / 2 - self.ancho / 2
self.y = VENTANA_VERT / 2 - self.alto / 2
# Dirección de movimiento de la Pelota
self.dir_x = random.choice([-5, 5])
self.dir_y = random.choice([-5, 5])
# Puntuación de la pelota
self.puntuacion = 0
self.puntuacion_ia = 0
def mover(self):
self.x += self.dir_x
self.y += self.dir_y
def rebotar(self):
if self.x <= -self.ancho:
self.reiniciar()
self.puntuacion_ia += 1
if self.x >= VENTANA_HORI:
self.reiniciar()
self.puntuacion += 1
if self.y <= 0:
self.dir_y = -self.dir_y
if self.y + self.alto >= VENTANA_VERT:
self.dir_y = -self.dir_y
def reiniciar(self):
self.x = VENTANA_HORI / 2 - self.ancho / 2
self.y = VENTANA_VERT / 2 - self.alto / 2
self.dir_x = -self.dir_x
self.dir_y = random.choice([-5, 5])
class RaquetaPong:
def __init__(self):
self.imagen = pygame.image.load("raqueta.png").convert_alpha()
# --- Atributos de la Clase ---
# Dimensiones de la Raqueta
self.ancho, self.alto = self.imagen.get_size()
# Posición de la Raqueta
self.x = 0
self.y = VENTANA_VERT / 2 - self.alto / 2
# Dirección de movimiento de la Raqueta
self.dir_y = 0
def mover(self):
self.y += self.dir_y
if self.y <= 0:
self.y = 0
if self.y + self.alto >= VENTANA_VERT:
self.y = VENTANA_VERT - self.alto
def mover_ia(self, pelota):
if self.y > pelota.y:
self.dir_y = -3
elif self.y < pelota.y:
self.dir_y = 3
else:
self.dir_y = 0
self.y += self.dir_y
def golpear(self, pelota):
if (
pelota.x < self.x + self.ancho
and pelota.x > self.x
and pelota.y + pelota.alto > self.y
and pelota.y < self.y + self.alto
):
pelota.dir_x = -pelota.dir_x
pelota.x = self.x + self.ancho
def golpear_ia(self, pelota):
if (
pelota.x + pelota.ancho > self.x
and pelota.x < self.x + self.ancho
and pelota.y + pelota.alto > self.y
and pelota.y < self.y + self.alto
):
pelota.dir_x = -pelota.dir_x
pelota.x = self.x - pelota.ancho
def main():
# Inicialización de Pygame
pygame.init()
# Inicialización de la superficie de dibujo (display surface)
ventana = pygame.display.set_mode((VENTANA_HORI, VENTANA_VERT))
pygame.display.set_caption("Pong 9")
# Inicialización de la fuente
fuente = pygame.font.Font(None, 60)
pelota = PelotaPong("bola_roja.png")
raqueta_1 = RaquetaPong()
raqueta_1.x = 60
raqueta_2 = RaquetaPong()
raqueta_2.x = VENTANA_HORI - 60 - raqueta_2.ancho
# Bucle principal
jugando = True
while jugando:
pelota.mover()
pelota.rebotar()
raqueta_1.mover()
raqueta_2.mover_ia(pelota)
raqueta_1.golpear(pelota)
raqueta_2.golpear_ia(pelota)
ventana.fill(BLANCO)
ventana.blit(pelota.imagen, (pelota.x, pelota.y))
ventana.blit(raqueta_1.imagen, (raqueta_1.x, raqueta_1.y))
ventana.blit(raqueta_2.imagen, (raqueta_2.x, raqueta_2.y))
texto = f"{pelota.puntuacion} : {pelota.puntuacion_ia}"
letrero = fuente.render(texto, False, NEGRO)
ventana.blit(letrero, (VENTANA_HORI / 2 - fuente.size(texto)[0] / 2, 50))
for event in pygame.event.get():
if event.type == QUIT:
jugando = False
# Detecta que se ha pulsado una tecla
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
raqueta_1.dir_y = -5
if event.key == pygame.K_s:
raqueta_1.dir_y = 5
# Detecta que se ha soltado la tecla
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
raqueta_1.dir_y = 0
if event.key == pygame.K_s:
raqueta_1.dir_y = 0
pygame.display.flip()
pygame.time.Clock().tick(FPS)
pygame.quit()
if __name__ == "__main__":
main()
``` |
{
"source": "joseluis031/Recursividad",
"score": 4
} |
#### File: joseluis031/Recursividad/ej5dicotonomia.py
```python
dicoto = input("¿Que numero quieres buscar en la tabla?")
def buscar(tabla,dicoto,indice):
if dicoto == str(tabla[indice]):
print("el numero se encuentra en la tabla")
else:
if indice < (len(tabla)-1):
indice += 1
buscar(tabla, dicoto, indice)
buscar([0,1,2,3,4,5,7,8,10,11,12,14,15,16,18,19,20], dicoto, 0)
```
#### File: joseluis031/Recursividad/ej7bandera.py
```python
bandera = ["N","B","B","N","A","B","B","N","B","N","N","A","N","N","B","A","A"]
print(bandera)
negro = []
blanco = []
azul = []
def ordenada(bandera):
if len(bandera) > 0:
color = bandera.pop(0)
if color =="N":
negro.append(color)
ordenada(bandera)
elif color == "B":
blanco.append(color)
ordenada(bandera)
elif color == "A":
azul.append(color)
ordenada(bandera)
else:
ordenada = negro + blanco + azul
print(ordenada)
else:
ordenada = negro + blanco + azul
print(ordenada)
ordenada(bandera)
``` |
{
"source": "joseluis031/tarea-grupal",
"score": 3
} |
#### File: joseluis031/tarea-grupal/ranalaberinto.py
```python
import math
import os
import random
import re
import sys
# Definimos coordenadas
class Coordenadas:
def __init__(self, x, y):
self.x = x
self.y = y
def comparate(self,x,y):
if(self.x==x and self.y==y):
return True
else:
return False
# Definimos las coordenadas del tunel
class Tunel:
def __init__(self, x1, y1, x2, y2):
self.extremo1 = Coordenadas(x1, y1)
self.extremo2 = Coordenadas(x2, y2)
# Le decimos a la rana que busque el tunel para poder llegar al final del laberinto
def buscaTunel(Casillax,Casillay,tuneles):
coordenadas = Coordenadas(Casillax, Casillay)
for t in tuneles:
if(t.extremo1.comparate(Casillax,Casillay)==True):
coordenadas.x=t.extremo2.x
coordenadas.y=t.extremo2.y
break
elif(t.extremo2.comparate(Casillax,Casillay)==True):
coordenadas.x=t.extremo1.x
coordenadas.y=t.extremo1.y
break
return coordenadas
# definimos la exploracion de la rana y por donde tiene que ir para llegar hasta el final, siem pre sabiendo que tiene la misma probabilidad de hacer cualquier movimiento
def exploracion(Casillax, Casillay, laberinto, n , m, tuneles):
num=0
den=0
probabilidad=0.0
if(Casillax>0 and laberinto[Casillax-1][Casillay]!="#"):
den +=1
if(laberinto[Casillax-1][Casillay]=="%"):
num+=1
if(Casillax<n-1 and laberinto[Casillax+1][Casillay]!="#"):
den +=1
if(laberinto[Casillax+1][Casillay]=="%"):
num+=1
if(Casillay<m-1 and laberinto[Casillax][Casillay+1]!="#"):
den +=1
if(laberinto[Casillax][Casillay+1]=="%"):
num+=1
if(Casillay>0 and laberinto[Casillax][Casillay-1]!="#"):
den +=1
if(laberinto[Casillax][Casillay-1]=="%"):
num+=1
if(den==0):
return probabilidad
probabilidad=num/den
if(Casillax>0 and laberinto[Casillax-1][Casillay]=="$"):
laberintocopia=laberinto
coordenadas= buscaTunel(Casillax-1,Casillay,tuneles)
laberintocopia[Casillax][Casillay]="#"
probabilidad+=exploracion(coordenadas.x,coordenadas.y, laberintocopia, n , m, tuneles)/den
if(Casillax<n-1 and laberinto[Casillax+1][Casillay]=="$"):
laberintocopia=laberinto
coordenadas= buscaTunel(Casillax+1,Casillay,tuneles)
laberintocopia[Casillax][Casillay]="#"
probabilidad+=exploracion(coordenadas.x,coordenadas.y, laberintocopia, n , m, tuneles)/den
if(Casillay<m-1 and laberinto[Casillax][Casillay+1]=="$"):
laberintocopia=laberinto
coordenadas= buscaTunel(Casillax,Casillay+1,tuneles)
laberintocopia[Casillax][Casillay]="#"
probabilidad+=exploracion(coordenadas.x,coordenadas.y, laberintocopia, n , m, tuneles)/den
if(Casillay>0 and laberinto[Casillax][Casillay-1]=="$"):
laberintocopia=laberinto
coordenadas= buscaTunel(Casillax,Casillay-1,tuneles)
laberintocopia[Casillax][Casillay]="#"
probabilidad+=(exploracion(coordenadas.x,coordenadas.y, laberintocopia, n , m, tuneles)/den)
return probabilidad
#rellenamos lo que queda con nuestro código lotro ya estaba hecho
if __name__ == '__main__':
print("Dimensiones del laberinto y número de tuneles:(filas columnas tuneles)")
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
k = int(first_multiple_input[2])
laberinto=[]
for n_itr in range(n):
print("Fila " + str(n_itr) + " del laberinto:(# muro,porcentaje salida, * bomba, $ vacia o tunel")
row = input()
laberinto.append(list(row))
# Write your code here
tuneles=[]
for k_itr in range(k):
print("Coordenadas(i1 j1 i2 j2) del tunel " +str(k_itr))
second_multiple_input = input().rstrip().split()
i1 = int(second_multiple_input[0])
j1 = int(second_multiple_input[1])
i2 = int(second_multiple_input[2])
j2 = int(second_multiple_input[3])
tuneles.append(Tunel(i1,j1,i2,j2))
# Write your code here
# Write your code here
print("Coordenadas iniciales de la rana:")
third_multiple_input = input().rstrip().split()
pos1= int(third_multiple_input[0])
pos2= int(third_multiple_input[1])
probabilidad= exploracion(pos1,pos2,laberinto,n,m,tuneles)
print(probabilidad)
``` |
{
"source": "joseluis1061/neuralnilm",
"score": 3
} |
#### File: neuralnilm/data/activationssource.py
```python
from __future__ import print_function, division
from copy import copy
from datetime import timedelta
import numpy as np
import pandas as pd
from neuralnilm.data.source import Source
from neuralnilm.consts import DATA_FOLD_NAMES
import logging
logger = logging.getLogger(__name__)
class ActivationsSource(Source):
"""Abstract base class for holding common behaviour across subclasses.
Attributes
----------
activations: dict:
Structure example:
{<train | unseen_appliances | unseen_activations_of_seen_appliances>: {
<appliance>: {
<building_name>: [<activations>]
}}}
Each activation is a pd.Series with DatetimeIndex and the following
metadata attributes: building, appliance, fold.
"""
def __init__(self, **kwargs):
if not self.allow_incomplete_target:
self._remove_over_long_activations(self.target_appliance)
super(ActivationsSource, self).__init__(**kwargs)
def report(self):
report = super(ActivationsSource, self).report()
report['num_activations'] = self.get_num_activations()
return report
def get_num_activations(self):
num_activations = {}
for fold, appliances in self.activations.items():
for appliance, buildings in appliances.items():
for building_name, activations in buildings.items():
num_activations.setdefault(fold, {}).setdefault(
appliance, {})[building_name] = len(activations)
return num_activations
def get_sequence(self, fold='train', enable_all_appliances=False):
while True:
yield self._get_sequence(
fold=fold, enable_all_appliances=enable_all_appliances)
def _distractor_appliances(self, fold):
all_appliances = set(self.activations[fold].keys())
distractor_appliances = all_appliances - set([self.target_appliance])
return list(distractor_appliances)
def _remove_over_long_activations(self, appliance_to_filter):
new_activations = {}
for fold, appliances in self.activations.items():
new_activations[fold] = {}
for appliance, buildings in appliances.items():
new_activations[fold][appliance] = {}
if appliance == appliance_to_filter:
for building, activations in buildings.items():
new_activations[fold][appliance][building] = [
activation for activation in activations
if len(activation) < self.seq_length]
else:
new_activations[fold][appliance] = buildings
self.activations = new_activations
def _select_building(self, fold, appliance):
"""
Parameters
----------
fold : str
appliance : str
Returns
-------
building_name : str
"""
if fold not in DATA_FOLD_NAMES:
raise ValueError("`fold` must be one of '{}' not '{}'."
.format(DATA_FOLD_NAMES, fold))
activations_per_building = self.activations[fold][appliance]
# select `p` for np.random.choice
if self.uniform_prob_of_selecting_each_building:
p = None # uniform distribution over all buildings
else:
num_activations_per_building = np.array([
len(activations) for activations in
activations_per_building.values()])
p = (num_activations_per_building /
num_activations_per_building.sum())
num_buildings = len(activations_per_building)
building_i = self.rng.choice(num_buildings, p=p)
#building_name = activations_per_building.keys()[building_i]
#Nuevas
activations_per_building = list(activations_per_building.keys()) #Los convierto en lista
building_name = activations_per_building[building_i] #Selecciono de la lista
return building_name
def _select_activation(self, activations):
num_activations = len(activations)
if num_activations == 0:
raise RuntimeError("No appliance activations.")
activation_i = self.rng.randint(low=0, high=num_activations)
return activation_i
def _position_activation(self, activation, is_target_appliance):
"""
Parameters
----------
activation : pd.Series
is_target_appliance : bool
Returns
-------
pd.Series
"""
if is_target_appliance:
allow_incomplete = self.allow_incomplete_target
else:
allow_incomplete = self.allow_incomplete_distractors
# Select a start index
if allow_incomplete:
earliest_start_i = -len(activation)
latest_start_i = self.seq_length
else:
if len(activation) > self.seq_length:
raise RuntimeError("Activation too long to fit into sequence"
" and incomplete activations not allowed.")
earliest_start_i = 0
latest_start_i = self.seq_length - len(activation)
start_i = self.rng.randint(low=earliest_start_i, high=latest_start_i)
# Clip or pad head of sequence
if start_i < 0:
positioned_activation = activation.values[-start_i:]
else:
positioned_activation = np.pad(
activation.values, pad_width=(start_i, 0), mode='constant')
# Clip or pad tail to produce a sequence which is seq_length long
if len(positioned_activation) <= self.seq_length:
n_zeros_to_pad = self.seq_length - len(positioned_activation)
positioned_activation = np.pad(
positioned_activation, pad_width=(0, n_zeros_to_pad),
mode='constant')
else:
positioned_activation = positioned_activation[:self.seq_length]
if len(activation) > self.seq_length:
is_complete = False
else:
space_after_activation = self.seq_length - len(activation)
is_complete = 0 <= start_i <= space_after_activation
seq_start_time = activation.index[0] - timedelta(
seconds=start_i * self.sample_period)
index = pd.date_range(seq_start_time, periods=self.seq_length,
freq="{:d}S".format(self.sample_period))
positioned_activation_series = pd.Series(
positioned_activation, index=index)
return positioned_activation_series, is_complete
```
#### File: neuralnilm/data/datapipeline.py
```python
from __future__ import print_function, division
from copy import copy
import numpy as np
from neuralnilm.utils import none_to_list
class DataPipeline(object):
def __init__(self, sources, num_seq_per_batch,
input_processing=None,
target_processing=None,
source_probabilities=None,
rng_seed=None):
self.sources = sources
self.num_seq_per_batch = num_seq_per_batch
self.input_processing = none_to_list(input_processing)
self.target_processing = none_to_list(target_processing)
num_sources = len(self.sources)
if source_probabilities is None:
self.source_probabilities = [1 / num_sources] * num_sources
else:
self.source_probabilities = source_probabilities
self.rng_seed = rng_seed
self.rng = np.random.RandomState(self.rng_seed)
self._source_iterators = [None] * num_sources
def get_batch(self, fold='train', enable_all_appliances=False,
source_id=None, reset_iterator=False,
validation=False):
"""
Returns
-------
A Batch object or None if source iterator has hit a StopIteration.
"""
if source_id is None:
n = len(self.sources)
source_id = self.rng.choice(n, p=self.source_probabilities)
if reset_iterator or self._source_iterators[source_id] is None:
self._source_iterators[source_id] = (
self.sources[source_id].get_batch(
num_seq_per_batch=self.num_seq_per_batch,
fold=fold,
enable_all_appliances=enable_all_appliances,
validation=validation))
try:
batch = self._source_iterators[source_id].__next__()
except StopIteration:
self._source_iterators[source_id] = None
return None
else:
batch.after_processing.input, i_metadata = self.apply_processing(
batch.before_processing.input, 'input')
batch.after_processing.target, t_metadata = self.apply_processing(
batch.before_processing.target, 'target')
batch.metadata.update({
'source_id': source_id,
'processing': {
'input': i_metadata,
'target': t_metadata
}
})
return batch
def apply_processing(self, data, net_input_or_target):
"""Applies `<input, target>_processing` to `data`.
Parameters
----------
data : np.ndarray
shape = (num_seq_per_batch, seq_length, num_features)
net_input_or_target : {'target', 'input}
Returns
-------
processed_data, metadata
processed_data : np.ndarray
shape = (num_seq_per_batch, seq_length, num_features)
metadata : dict
"""
processing_steps = self._get_processing_steps(net_input_or_target)
metadata = {}
for step in processing_steps:
data = step(data)
if hasattr(step, 'metadata'):
metadata.update(step.metadata)
return data, metadata
def apply_inverse_processing(self, data, net_input_or_target):
"""Applies the inverse of `<input, target>_processing` to `data`.
Parameters
----------
data : np.ndarray
shape = (num_seq_per_batch, seq_length, num_features)
net_input_or_target : {'target', 'input}
Returns
-------
processed_data : np.ndarray
shape = (num_seq_per_batch, seq_length, num_features)
"""
processing_steps = self._get_processing_steps(net_input_or_target)
reversed_processing_steps = processing_steps[::-1]
for step in reversed_processing_steps:
try:
data = step.inverse(data)
except AttributeError:
pass
return data
def _get_processing_steps(self, net_input_or_target):
assert net_input_or_target in ['input', 'target']
attribute = net_input_or_target + '_processing'
processing_steps = getattr(self, attribute)
assert isinstance(processing_steps, list)
return processing_steps
def report(self):
report = copy(self.__dict__)
for attr in ['sources', 'rng', '_source_iterators']:
report.pop(attr)
report['sources'] = {
i: source.report() for i, source in enumerate(self.sources)}
report['input_processing'] = [
processor.report() for processor in self.input_processing]
report['target_processing'] = [
processor.report() for processor in self.target_processing]
return {'pipeline': report}
def _get_output_neurons(self, new_batch):
batch_size = new_batch.target.shape[0]
neural_net_output = np.empty((batch_size, 3))
for b in range(batch_size):
seq = new_batch.target[b]
# case 1 and 2: if the signal start at 0
if seq[0] > 0:
start = 0
stop_array = np.where(seq > 0)[0]
# case 2: signal stops after 1
# set stop to the last element
if len(stop_array) == 0:
stop = seq[-1]
# case 1: signal stops before 1
else:
stop = stop_array[-1]
# calculate avg power
avg_power = np.mean(seq[start:stop + 1])
# case 3: signal starts after 0 and before 1
else:
start_array = np.where(seq > 0)[0]
if len(start_array) == 0:
# case 5: there is no signal in the window
start = 0
stop = 0
avg_power = 0
else:
start = start_array[0]
# find stop
stop_array = np.where(seq > 0)[0]
# case 4: signal stops after 1
# set to the last element
if len(stop_array) == 0:
stop = seq[-1]
else:
stop = stop_array[-1]
avg_power = np.mean(seq[start:stop + 1])
start = start / float(new_batch.target.shape[1] - 1)
stop = stop / float(new_batch.target.shape[1] - 1)
if stop < start:
raise ValueError("start must be before stop in sequence {}".format(b))
neural_net_output[b, :] = np.array([start, stop, avg_power])
return neural_net_output
def train_generator(self, fold='train', enable_all_appliances=False,
source_id=None, reset_iterator=False,
validation=False ):
while 1:
batch_iter = self.get_batch(fold, enable_all_appliances, source_id, reset_iterator,validation)
X_train = batch_iter.input
input_dim = X_train.shape[1]
Y_train = self._get_output_neurons(batch_iter)
yield (np.reshape(X_train, [self.num_seq_per_batch, input_dim, 1]), Y_train.astype(np.float32))
``` |
{
"source": "joseluis8906/acme_bank",
"score": 2
} |
#### File: acme_bank/src/application_test.py
```python
import unittest
from dto import Transaction, Step, Trigger
from infra import AccountInMemoryRepo, DollarFakeFetcher
from domain import Account
from application import Workflow, StepActionError
class TestWorkflow(unittest.TestCase):
def test_exec_without_errors(self):
trigger = Trigger({'user_id': '10', 'pin': '1234'}, None, None)
steps = [
Step(None, None, 'validate_account', None),
Step(None, None, 'get_account_balance', None),
Step(None, {'money': {
'value': 10000
}}, 'withdraw_in_pesos', None),
Step(None, {'money': {
'value': 3
}}, 'withdraw_in_dollars', None),
]
transaction = Transaction(steps, trigger)
account_repo = AccountInMemoryRepo()
account_repo.save(Account('10', '1234', 50000))
dollar_fetcher = DollarFakeFetcher()
workflow = Workflow(account_repo, account_repo, dollar_fetcher)
workflow.run(transaction)
account = account_repo.get_by_id('10')
self.assertEqual(account.balance(), 29623)
def test_exec_return_error(self):
with self.assertRaises(StepActionError):
trigger = Trigger({'user_id': '11', 'pin': '4321'}, None, None)
steps = [
Step(None, None, 'validate_account', None),
Step(None, None, 'get_account_balance', None),
Step(None, {'money': {
'value': 10000
}}, 'withdraw_in_pesos', None),
Step(None, {'money': {
'value': 3
}}, 'withdraw_in_euros', None),
]
transaction = Transaction(steps, trigger)
account_repo = AccountInMemoryRepo()
account_repo.save(Account('11', '4321', 50000))
dollar_fetcher = DollarFakeFetcher()
workflow = Workflow(account_repo, account_repo, dollar_fetcher)
workflow.run(transaction)
account = account_repo.get_by_id('10')
self.assertEqual(account.balance(), 29623)
```
#### File: acme_bank/src/domain.py
```python
class PinValidationError(Exception):
pass
class PinChangingError(Exception):
pass
class AccountWithoutBalanceError(Exception):
pass
class Account:
def __init__(self, id: str, pin: str, balance: float):
self.__id = id
self.__pin = pin
self.__balance = balance
def id(self) -> str:
return self.__id
def verify_pin(self, pin) -> bool:
if not isinstance(pin, str):
pin = str(pin)
if self.__pin != pin:
raise PinValidationError('invalid pin')
return True
def update_pin(self, pin: str) -> None:
if self.__pin == pin:
raise PinChangingError('old and new pin are the same')
self.__pin = pin
def balance(self) -> float:
return self.__balance
def add_balance(self, amount: float) -> None:
self.__balance += amount
def sub_balance(self, amount: float) -> None:
if self.__balance < amount:
raise AccountWithoutBalanceError('insufficient balance')
self.__balance -= amount
def to_dict(self):
return dict(user_id=self.__id, pin=self.__pin, balance=self.__balance)
@classmethod
def from_dict(cls, raw: dict):
return cls(raw['user_id'], raw['pin'], int(raw['balance']))
``` |
{
"source": "joseluis8906/fancychat",
"score": 2
} |
#### File: fancychat/bin/FancyChat.py
```python
import gi
gi.require_version("Gtk", "3.0")
import gi.repository.Gtk as Gtk
import gi.repository.Gdk as Gdk
from fancychat.TitleBar import TitleBar
from fancychat.ScrollView import ScrollView
from fancychat.MsgArrive import MsgArrive
from fancychat.MsgSend import MsgSend
from fancychat.Editor import Editor
class Restaurantic(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Fancy Chat")
self.set_resizable(False)
self.set_default_size(360, 512)
self._container = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(self._container)
self.title_bar = TitleBar()
self._container.pack_start(self.title_bar, False, True, 0)
self.scroll_view = ScrollView()
self._container.pack_start(self.scroll_view, True, True, 0)
self.scroll_view.append(MsgArrive("Message arrive..."))
self.scroll_view.append(MsgSend("Message send..."))
self.editor = Editor()
self._container.pack_start(self.editor, False, False, 0)
if __name__ == '__main__':
style_provider = Gtk.CssProvider()
style_provider.load_from_path("share/fancychat/styles.css")
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
window = Restaurantic()
window.connect("delete-event", Gtk.main_quit)
window.show_all()
Gtk.main()
``` |
{
"source": "joseluis8906/tests",
"score": 2
} |
#### File: python/arbol/arbol.py
```python
import gi
gi.require_version ("Gtk", "3.0")
from gi.repository import Gtk
from page1 import Page1
from page2 import Page2
from page3 import Page3
class ArbolDecision(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Árbol De Decisión")
self.set_default_size(256, 600)
self.notebook = Gtk.Notebook()
self.add(self.notebook)
self.page1 = Page1()
self.notebook.append_page(self.page1, Gtk.Label('Tablas'))
self.page2 = Page2()
self.notebook.append_page(self.page2, Gtk.Label('Gráfico'))
self.page3 = Page3()
self.notebook.append_page(self.page3, Gtk.Label('Solución'))
self.page1.btn_update.connect("clicked", self.Guardar)
def Guardar(self, ev):
val_list = self.page1.GetText()
t1c1 = val_list[0][0]+" ("+val_list[0][1]+")"
t2c1 = val_list[0][2]+" ("+val_list[0][3]+")"
t3c1 = val_list[0][4]+" ("+val_list[0][5]+")"
t1c2 = val_list[0][6]+" ("+val_list[0][7]+")"
t2c2 = val_list[0][8]+" ("+val_list[0][9]+")"
t3c2 = val_list[0][10]+" ("+val_list[0][11]+")"
t1c3 = val_list[0][6]+" ("+val_list[0][7]+")"
t2c3 = val_list[0][8]+" ("+val_list[0][9]+")"
t3c3 = val_list[0][10]+" ("+val_list[0][11]+")"
self.page2.UpdateData(t1c1,t2c1,t3c1,t1c2,t2c2,t3c2,t1c3,t2c3,t3c3)
value_list = self.page1.GetValue()
self.page3.UpdateData(value_list[0][0],value_list[0][1],value_list[0][2],value_list[0][3],value_list[0][4],value_list[0][5],value_list[0][6],value_list[0][7],value_list[0][8],value_list[0][9],value_list[0][10],value_list[0][11],value_list[1][0],value_list[1][1],value_list[1][2],value_list[1][3])
self.page3.Renderize()
def main():
win = ArbolDecision()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
if __name__ == "__main__":
main()
```
#### File: python/arbol/tblcifven.py
```python
import gi
gi.require_version ("Gtk", "3.0")
from gi.repository import Gtk, Gio, Gdk
from probentry import ProbEntry
from costoentry import CostoEntry
class TblCifVen(Gtk.Grid):
def __init__(self):
Gtk.Grid.__init__(self)
self.set_column_spacing (1)
self.set_row_spacing (1)
self.set_column_homogeneous (False)
self.insert_column(1)
self.insert_column(2)
self.insert_column(3)
self.insert_row(1)
self.insert_row(2)
self.insert_row(3)
self.insert_row(4)
self.lbl_cifra_de_ventas = Gtk.Label('Tabla Cifra De Ventas')
self.attach(self.lbl_cifra_de_ventas, 0, 0, 4, 1)
####### sin servicio
self.lbl_sin_servicio = Gtk.Label('Sin Servicio')
self.attach(self.lbl_sin_servicio, 0, 1, 2, 1)
self.lbl_ingresos1 = Gtk.Label('Ingresos')
self.attach(self.lbl_ingresos1, 0, 2, 1, 1)
self.lbl_prob1 = Gtk.Label('Probabilidad')
self.attach(self.lbl_prob1, 1, 2, 1, 1)
self.txt_ingreso1_sin = CostoEntry()
self.attach(self.txt_ingreso1_sin, 0, 3, 1, 1)
self.txt_ingreso2_sin = CostoEntry()
self.attach(self.txt_ingreso2_sin, 0, 4, 1, 1)
self.txt_ingreso3_sin = CostoEntry()
self.attach(self.txt_ingreso3_sin, 0, 5, 1, 1)
self.txt_prob1_sin = ProbEntry()
self.attach(self.txt_prob1_sin, 1, 3, 1, 1)
self.txt_prob2_sin = ProbEntry()
self.attach(self.txt_prob2_sin, 1, 4, 1, 1)
self.txt_prob3_sin = ProbEntry()
self.attach(self.txt_prob3_sin, 1, 5, 1, 1)
##### con servicio
self.lbl_con_servicio = Gtk.Label('Con Servicio')
self.attach(self.lbl_con_servicio, 2, 1, 2, 1)
self.lbl_ingresos2 = Gtk.Label('Ingresos')
self.attach(self.lbl_ingresos2, 2, 2, 1, 1)
self.lbl_prob2 = Gtk.Label('Probabilidad')
self.attach(self.lbl_prob2, 3, 2, 1, 1)
self.txt_ingreso1_con = CostoEntry()
self.attach(self.txt_ingreso1_con, 2, 3, 1, 1)
self.txt_ingreso2_con = CostoEntry()
self.attach(self.txt_ingreso2_con, 2, 4, 1, 1)
self.txt_ingreso3_con = CostoEntry()
self.attach(self.txt_ingreso3_con, 2, 5, 1, 1)
self.txt_prob1_con = ProbEntry()
self.attach(self.txt_prob1_con, 3, 3, 1, 1)
self.txt_prob2_con = ProbEntry()
self.attach(self.txt_prob2_con, 3, 4, 1, 1)
self.txt_prob3_con = ProbEntry()
self.attach(self.txt_prob3_con, 3, 5, 1, 1)
def GetText(self):
return([self.txt_ingreso1_sin.get_text(), self.txt_prob1_sin.get_text(), self.txt_ingreso2_sin.get_text(), self.txt_prob2_sin.get_text(), self.txt_ingreso3_sin.get_text(), self.txt_prob3_sin.get_text(), self.txt_ingreso1_con.get_text(), self.txt_prob1_con.get_text(), self.txt_ingreso2_con.get_text(), self.txt_prob2_con.get_text(), self.txt_ingreso3_con.get_text(), self.txt_prob3_con.get_text()])
def GetValue(self):
return([self.txt_ingreso1_sin.GetIntValue(), self.txt_prob1_sin.GetPercentValue(), self.txt_ingreso2_sin.GetIntValue(), self.txt_prob2_sin.GetPercentValue(), self.txt_ingreso3_sin.GetIntValue(), self.txt_prob3_sin.GetPercentValue(), self.txt_ingreso1_con.GetIntValue(), self.txt_prob1_con.GetPercentValue(), self.txt_ingreso2_con.GetIntValue(), self.txt_prob2_con.GetPercentValue(), self.txt_ingreso3_con.GetIntValue(), self.txt_prob3_con.GetPercentValue()])
```
#### File: pagord/controllers/certificado_disponibilidad_controller.py
```python
import datetime
from PyQt4 import QtGui
from models.models import session
from models.cerdisp_model import CerdispModel
def initData(self, query_orden):
self.text_num_certi_disp1.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.text_num_certi_disp2.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.text_num_certi_disp3.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.date_fecha_certi_disp1.setDate(datetime.date.today())
self.date_fecha_certi_disp2.setDate(datetime.date.today())
self.date_fecha_certi_disp3.setDate(datetime.date.today())
def translateView(self):
self.setTitle(QtGui.QApplication.translate('Orden', 'Certificados de disponibilidad', None, QtGui.QApplication.UnicodeUTF8))
self.label_num_certi_disp.setText(QtGui.QApplication.translate('Orden', 'Número', None, QtGui.QApplication.UnicodeUTF8))
self.label_fecha_certi_disp.setText(QtGui.QApplication.translate('Orden', 'Fecha', None, QtGui.QApplication.UnicodeUTF8))
self.text_num_certi_disp1.setMinimumWidth(120)
self.text_num_certi_disp2.setMinimumWidth(120)
self.text_num_certi_disp3.setMinimumWidth(120)
self.date_fecha_certi_disp1.setCalendarPopup(True)
self.date_fecha_certi_disp2.setCalendarPopup(True)
self.date_fecha_certi_disp3.setCalendarPopup(True)
def updateData(self, query_orden):
if(len(query_orden)==0):
self.init_data(query_orden)
else:
orden = query_orden[0]
try:
self.text_num_certi_disp1.setText(QtGui.QApplication.translate('Orden', u"%s"%(orden.cerdisp[0].get_numero()), None, QtGui.QApplication.UnicodeUTF8))
self.date_fecha_certi_disp1.setDate(datetime.date(orden.cerdisp[0].get_year(), orden.cerdisp[0].get_month(), orden.cerdisp[0].get_day()))
self.text_num_certi_disp2.setText(QtGui.QApplication.translate('Orden', u"%s"%(orden.cerdisp[1].get_numero()), None, QtGui.QApplication.UnicodeUTF8))
self.date_fecha_certi_disp2.setDate(datetime.date(orden.cerdisp[1].get_year(), orden.cerdisp[1].get_month(), orden.cerdisp[1].get_day()))
self.text_num_certi_disp3.setText(QtGui.QApplication.translate('Orden', u"%s"%(orden.cerdisp[2].get_numero()), None, QtGui.QApplication.UnicodeUTF8))
self.date_fecha_certi_disp3.setDate(datetime.date(orden.cerdisp[2].get_year(), orden.cerdisp[2].get_month(), orden.cerdisp[2].get_day()))
except:
pass
def checkValidacion(self):
if self.text_num_certi_disp1.validacion():
if self.text_num_certi_disp2.validacion():
if self.text_num_certi_disp3.validacion():
fechas_app_cerdisp = [self.date_fecha_certi_disp1, self.date_fecha_certi_disp2, self.date_fecha_certi_disp3]
for index, cerdisp in enumerate([self.text_num_certi_disp1, self.text_num_certi_disp2, self.text_num_certi_disp3]):
if len(cerdisp.text()) > 0:
fecha_app_cerdisp = datetime.date(fechas_app_cerdisp[index].date().year(), fechas_app_cerdisp[index].date().month(), fechas_app_cerdisp[index].date().day())
if len(self.parent().parent().OrdenModel.get_cerdisp()) > index:
fecha_query = self.parent().parent().OrdenModel.cerdisp[index].get_fecha()
if self.parent().parent().OrdenModel.cerdisp[index].get_numero() != u"%s"%(cerdisp.text()) or fecha_query != fecha_app_cerdisp:
self.parent().parent().OrdenModel.cerdisp[index].set_numero(u"%s"%(cerdisp.text()))
self.parent().parent().OrdenModel.cerdisp[index].set_fecha(fecha_app_cerdisp)
else:
self.parent().parent().OrdenModel.cerdisp.append(CerdispModel(u"%s"%(cerdisp.text()), fecha_app_cerdisp))
else:
if len(self.parent().parent().OrdenModel.get_cerdisp()) > index:
session.delete(self.parent().parent().OrdenModel.cerdisp[index])
return True
return False
def Save(self):
if self.check_validacion():
return True
return False
```
#### File: pagord/controllers/descuentos_adiciones_controller.py
```python
from PyQt4 import QtGui
from models.discriminacion_model import DiscriminacionModel
def initData(self, query_orden):
self.text_iva.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.text_ret_fuente.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.text_ret_iva.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.checkbox_imp_municipales.setChecked(False)
self.checkbox_auto_retenedor.setChecked(False)
self.text_otros_concepto.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
self.text_otros_valor.setText(QtGui.QApplication.translate('Orden', u"", None, QtGui.QApplication.UnicodeUTF8))
def translateView(self):
self.setTitle(QtGui.QApplication.translate('Orden', 'Descuentos y adiciones', None, QtGui.QApplication.UnicodeUTF8))
self.label_iva.setText(QtGui.QApplication.translate('Orden', 'Iva $', None, QtGui.QApplication.UnicodeUTF8))
self.label_ret_iva.setText(QtGui.QApplication.translate('Orden', 'Reteiva %', None, QtGui.QApplication.UnicodeUTF8))
self.label_auto_retenedor.setText(QtGui.QApplication.translate('Orden', 'Autorretenedor', None, QtGui.QApplication.UnicodeUTF8))
self.label_imp_municipales.setText(QtGui.QApplication.translate('Orden', 'Impuestos municipales', None, QtGui.QApplication.UnicodeUTF8))
self.label_ret_fuente.setText(QtGui.QApplication.translate('Orden', 'Retefuente % ', None, QtGui.QApplication.UnicodeUTF8))
self.label_otros_concepto.setText(QtGui.QApplication.translate('Orden', 'Concepto otros (item1,item2,...) ', None, QtGui.QApplication.UnicodeUTF8))
self.label_otros.setText(QtGui.QApplication.translate('Orden', 'Valor otros (item1+item2+...) ', None, QtGui.QApplication.UnicodeUTF8))
def updateData(self, query_orden):
if(len(query_orden)==0):
self.init_data(query_orden)
else:
discriminacion = query_orden[0].discriminacion[0]
self.text_iva.setText(QtGui.QApplication.translate('Orden', u"%s"%(discriminacion.get_iva()), None, QtGui.QApplication.UnicodeUTF8))
self.text_ret_fuente.setText(QtGui.QApplication.translate('Orden', u"%s"%(discriminacion.get_ret_fuente()), None, QtGui.QApplication.UnicodeUTF8))
self.text_ret_iva.setText(QtGui.QApplication.translate('Orden', u"%s"%(discriminacion.get_ret_iva()), None, QtGui.QApplication.UnicodeUTF8))
self.checkbox_auto_retenedor.setChecked(bool(discriminacion.get_auto_retenedor()))
self.checkbox_imp_municipales.setChecked(bool(discriminacion.get_imp_municipales()))
self.text_otros_concepto.setText(QtGui.QApplication.translate('Orden', u"%s"%(discriminacion.get_otros_concepto()), None, QtGui.QApplication.UnicodeUTF8))
self.text_otros_valor.setText(QtGui.QApplication.translate('Orden', u"%s"%(discriminacion.get_otros_valor()), None, QtGui.QApplication.UnicodeUTF8))
def checkValidacion(self):
if self.text_iva.validacion():
if self.text_ret_fuente.validacion():
if self.text_ret_iva.validacion():
if self.text_otros_concepto.validacion():
if self.text_otros_valor.validacion():
if len(self.parent().OrdenModel.get_discriminacion()) > 0:
self.parent().OrdenModel.discriminacion[0].set_iva(u"%s"%(self.text_iva.text()))
self.parent().OrdenModel.discriminacion[0].set_ret_fuente(u"%s"%(self.text_ret_fuente.text()))
self.parent().OrdenModel.discriminacion[0].set_ret_iva(u"%s"%(self.text_ret_iva.text()))
self.parent().OrdenModel.discriminacion[0].set_auto_retenedor(bool(self.checkbox_auto_retenedor.checkState()))
self.parent().OrdenModel.discriminacion[0].set_imp_municipales(bool(self.checkbox_imp_municipales.checkState()))
self.parent().OrdenModel.discriminacion[0].set_otros_concepto(u"%s"%(self.text_otros_concepto.text()))
self.parent().OrdenModel.discriminacion[0].set_otros_valor(u"%s"%(self.text_otros_valor.text()))
else:
self.parent().OrdenModel.discriminacion.append(DiscriminacionModel(u"%s"%(self.text_iva.text()), u"%s"%(self.text_ret_fuente.text()), u"%s"%(self.text_ret_iva.text()), bool(self.checkbox_auto_retenedor.checkState()), bool(self.checkbox_imp_municipales.checkState()), u"%s"%(self.text_otros_concepto.text()), u"%s"%(self.text_otros_valor.text())))
return True
return False
def Save(self):
if self.check_validacion():
return True
return False
```
#### File: pagord/views/barra_menu_view.py
```python
from PyQt4 import QtGui
class BarraMenuView(QtGui.QMenuBar):
def __init__(self, parent):
super(BarraMenuView, self).__init__(parent)
accion_salir = QtGui.QAction(QtGui.QIcon('exit.png'), '&Salir', self)
accion_salir.setStatusTip(u'Salir')
accion_salir.triggered.connect(QtGui.qApp.quit)
self.archivo = self.addMenu(u'&Archivo')
self.archivo.addAction(accion_salir)
accion_dialogo = QtGui.QAction(QtGui.QIcon('configuracion.png'), u'&Búsqueda', self)
accion_dialogo.setStatusTip(u'Búsqueda')
accion_dialogo.triggered.connect(self.parent().dialogo_busqueda)
self.opciones = self.addMenu(u'&Opciones')
self.opciones.addAction(accion_dialogo)
def configuracion(self):
print("llamando a configuración")
```
#### File: pagord/views/certificado_registro_presupuestal_view.py
```python
from PyQt4 import QtGui
from campos import CampoCad
from controllers.certificado_registro_presupuestal_controller import initData, translateView, updateData, checkValidacion, Save
class CertificadoRegistroPresupuestalView(QtGui.QGroupBox):
def __init__(self, parent=None):
super(CertificadoRegistroPresupuestalView, self).__init__(parent)
self.conten_colum_certi_reg = QtGui.QHBoxLayout()
self.columna_num_certi_reg = QtGui.QVBoxLayout()
self.columna_fechas_certi_reg = QtGui.QVBoxLayout()
self.label_num_certi_reg = QtGui.QLabel(self)
self.label_fecha_certi_reg = QtGui.QLabel(self)
self.text_num_certi_reg1 = CampoCad(self, u"Certificado Registro Presupuestal 1")
self.date_fecha_certi_reg1 = QtGui.QDateEdit(self)
self.text_num_certi_reg2 = CampoCad(self, u"Certificado Registro Presupuestal 2")
self.date_fecha_certi_reg2 = QtGui.QDateEdit(self)
self.text_num_certi_reg3 = CampoCad(self, u"Certificado Registro Presupuestal 3")
self.date_fecha_certi_reg3 = QtGui.QDateEdit(self)
self.conten_colum_certi_reg.addLayout(self.columna_num_certi_reg)
self.conten_colum_certi_reg.addLayout(self.columna_fechas_certi_reg)
self.columna_num_certi_reg.addWidget(self.label_num_certi_reg)
self.columna_fechas_certi_reg.addWidget(self.label_fecha_certi_reg)
self.columna_num_certi_reg.addWidget(self.text_num_certi_reg1)
self.columna_fechas_certi_reg.addWidget(self.date_fecha_certi_reg1)
self.columna_num_certi_reg.addWidget(self.text_num_certi_reg2)
self.columna_fechas_certi_reg.addWidget(self.date_fecha_certi_reg2)
self.columna_num_certi_reg.addWidget(self.text_num_certi_reg3)
self.columna_fechas_certi_reg.addWidget(self.date_fecha_certi_reg3)
self.setLayout(self.conten_colum_certi_reg)
self.translate_view()
init_data = initData
translate_view = translateView
update_data = updateData
check_validacion = checkValidacion
save = Save
```
#### File: pagord/views/descuentos_adiciones_view.py
```python
from PyQt4 import QtGui
from campos import CampoNum, CampoCad, CampoDec, ConceptoOtros, ValorOtros
from controllers.descuentos_adiciones_controller import initData, translateView, updateData, checkValidacion, Save
class DescuentosAdicionesView(QtGui.QGroupBox):
def __init__(self, parent=None):
super(DescuentosAdicionesView, self).__init__(parent)
self.contenedor_descuentos = QtGui.QHBoxLayout()
self.columna1 = QtGui.QVBoxLayout()
self.columna2 = QtGui.QVBoxLayout()
self.columna3 = QtGui.QVBoxLayout()
self.columna_auto_retenedor = QtGui.QVBoxLayout()
self.columna_imp_municipales = QtGui.QVBoxLayout()
self.fila_columnas_checkbox = QtGui.QHBoxLayout()
self.label_iva = QtGui.QLabel(self)
self.text_iva = CampoNum(self, u"Iva")
self.label_ret_iva = QtGui.QLabel(self)
self.text_ret_iva = CampoDec(self, u"Reteiva")
self.label_imp_municipales = QtGui.QLabel(self)
self.checkbox_imp_municipales = QtGui.QCheckBox(self)
self.label_auto_retenedor = QtGui.QLabel(self)
self.checkbox_auto_retenedor = QtGui.QCheckBox(self)
self.label_ret_fuente = QtGui.QLabel(self)
self.text_ret_fuente = CampoDec(self, u"Retefuente")
self.label_otros_concepto = QtGui.QLabel(self)
self.text_otros_concepto = ConceptoOtros(self, u"Concepto Otros")
self.label_otros = QtGui.QLabel(self)
self.text_otros_valor = ValorOtros(self, u"Valor Otros")
self.contenedor_descuentos.addLayout(self.columna1)
self.contenedor_descuentos.addLayout(self.columna2)
self.contenedor_descuentos.addLayout(self.columna3)
self.columna1.addWidget(self.label_iva)
self.columna1.addWidget(self.text_iva)
self.columna1.addWidget(self.label_ret_iva)
self.columna1.addWidget(self.text_ret_iva)
self.columna2.addWidget(self.label_ret_fuente)
self.columna2.addWidget(self.text_ret_fuente)
self.columna2.addLayout(self.fila_columnas_checkbox)
self.fila_columnas_checkbox.addLayout(self.columna_auto_retenedor)
self.fila_columnas_checkbox.addLayout(self.columna_imp_municipales)
self.columna_auto_retenedor.addWidget(self.label_auto_retenedor)
self.columna_auto_retenedor.addWidget(self.checkbox_auto_retenedor)
self.columna_imp_municipales.addWidget(self.label_imp_municipales)
self.columna_imp_municipales.addWidget(self.checkbox_imp_municipales)
self.columna3.addWidget(self.label_otros_concepto)
self.columna3.addWidget(self.text_otros_concepto)
self.columna3.addWidget(self.label_otros)
self.columna3.addWidget(self.text_otros_valor)
self.setLayout(self.contenedor_descuentos)
self.translate_view()
init_data = initData
translate_view = translateView
update_data = updateData
check_validacion = checkValidacion
save = Save
``` |
{
"source": "joseluisbezerra/recipe-api",
"score": 3
} |
#### File: core/tests/test_admin.py
```python
from rest_framework import status
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import (
TestCase,
Client
)
User = get_user_model()
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
admin_user = User.objects.create_superuser(
email='<EMAIL>',
password='<PASSWORD>'
)
self.client.force_login(admin_user)
self.user = User.objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='Foo bar',
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
response = self.client.get(url)
self.assertContains(response, self.user.name)
self.assertContains(response, self.user.email)
def test_user_page_change(self):
"""Test that the user edit page works"""
url = reverse(
'admin:core_user_change',
args=[self.user.id]
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
``` |
{
"source": "joseluisbn/PythonLearningProject",
"score": 4
} |
#### File: PythonLearningProject/src/class_create.py
```python
class Human:
# Attributes
def __init__(self, name, surname, genre): # __init__ is the constructor
self.name = name
self.surname = surname
self.genre = genre
# Methods
def develop(self, hours):
self.hours = 0
print(f"is developing for {hours} hours")
def sleep(self):
print("Zzzzzzzz")
# Object
my_human = Human("Hironobu", "Sakaguchi", "male")
print(my_human.name, my_human.surname)
```
#### File: PythonLearningProject/src/functions.py
```python
def user():
name = input("What's your name?")
age = input("How old are you?")
print(name + age)
# Functions with parameters
user()
def another_user(name, age):
print(f"Your name is {name} and your age is {age}")
another_user("Luis", 34)
# With return
def multiply(num1, num2):
total = num1 * num2
return total
``` |
{
"source": "joseluisdiaz/vamonostro-IoT",
"score": 3
} |
#### File: joseluisdiaz/vamonostro-IoT/mqtt_client.py
```python
import paho.mqtt.client as mqtt
import requests
IFTTT_URL = "https://maker.ifttt.com/trigger/"
IFTTT_EVENT = "motion_detected"
IFTTT_KEY = "<KEY>"
MQTT_URL = "iot.eclipse.org"
MQTT_TOPIC = "v2/zolertia/tutorialthings/66"
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
print("Subscribed to " + MQTT_TOPIC)
client.subscribe(MQTT_TOPIC)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
requests.post(IFTTT_URL + IFTTT_EVENT + "/with/key/" + IFTTT_KEY)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print("connecting to " + MQTT_URL)
client.connect(MQTT_URL, 1883, 60)
client.loop_forever()
``` |
{
"source": "joseluisGA/videojuegos",
"score": 3
} |
#### File: videojuegos/arcanoid/ball.py
```python
import pygame
from settings import *
from pygame import Vector2
import random
from brick import Brick
class Ball(pygame.sprite.Sprite):
def __init__(self, x, y, groups, ball_image, bounce_fx):
pygame.sprite.Sprite.__init__(self, groups)
#self.image = pygame.Surface((BALL_WIDTH, BALL_HEIGHT))
#self.image.fill(BLUISHGREY)
self.image = ball_image
self.rect = self.image.get_rect()
self.rect.center = Vector2(x, y)
self.velocity = Vector2(0, 0)
self.bounce_fx = bounce_fx
self.asleep = True
def update(self, bricks, pad, ball_lost_callback):
if self.asleep:
if pad.velocity != 0:
self.velocity = Vector2(pad.velocity, -5).normalize()
self.asleep = False
return
position = self.rect.center + self.velocity.normalize() * BALL_SPEED
self.rect.centerx = position.x
self.collide_with('x', bricks)
self.rect.centery = position.y
self.collide_with('y', bricks)
if self.rect.centerx < 0:
self.rect.centerx = 0
self.velocity.x *= -1
self.bounce_fx.play()
if self.rect.centerx > WIDTH-1:
self.rect.centerx = WIDTH-1
self.velocity.x *= -1
self.bounce_fx.play()
if self.rect.centery < 0:
self.rect.centery = 0
self.velocity.y *= -1
self.bounce_fx.play()
if self.rect.centery > HEIGHT-1:
self.kill()
ball_lost_callback()
def collide_with(self, dir, groups):
hits = pygame.sprite.spritecollide(self, groups, False)
if len(hits) == 0:
return
if dir == 'x':
if self.velocity.x > 0:
self.rect.right = hits[0].rect.left
if self.velocity.x < 0:
self.rect.left = hits[0].rect.right
self.velocity.x *= -1
if dir == 'y':
if self.velocity.y > 0:
self.rect.bottom = hits[0].rect.top
if self.velocity.y < 0:
self.rect.top = hits[0].rect.bottom
self.velocity.y *= -1
if type(hits[0]) == Brick:
hits[0].breakIt()
```
#### File: videojuegos/arcanoid/game.py
```python
import pygame
from settings import *
from pygame import Vector2
import math
from ball import Ball
from pad import Pad
from brick import Brick
import random
from os import path
# El jugador controla una PALA que mueve en horizontal, con incercia
# Debe evitar que la/s BOLAS caigan por la parte inferior de la pantalla
# Las BOLAS van rebotando por el espacio de juego contra los laterales,
# contra la pala del jugador y los LADRILLOS
# En cada FASE hay un conjunto de LADRILLOS que deben ser DESTRUIDOS,
# haciendo chocar la bola con ellos.
# La fase termina si todos los LADRILLOS son DESTRUIDOS
# Si una bola cae por la parte inferior de la pantalla se retira de la partida
# La partida termina si no hay ninguna bola en pantalla
# AÑADIDO: Muros de ladrillos con distintas formas
class Game:
def __init__(self):
pygame.mixer.pre_init(44100, -16, 2, 1024)
pygame.init()
pygame.mixer.init()
self.screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.load_data()
def load_data(self):
root_folder = path.dirname(__file__)
sound_folder = path.join(root_folder, "sound")
img_folder = path.join(root_folder, "img")
self.load_images(img_folder)
self.load_sounds(sound_folder)
def load_images(self, img_folder):
self.ball_image = pygame.image.load(path.join(img_folder, "ballBlue.png")).convert_alpha()
self.pad_image = pygame.image.load(path.join(img_folder, "paddleRed.png")).convert_alpha()
brick_colors=["blue", "green", "grey", "purple", "yellow", "red"]
self.brick_images = []
for color in brick_colors:
filename= f"element_{color}_rectangle.png"
img = pygame.image.load(path.join(img_folder, filename))
self.brick_images.append(img)
def load_sounds(self, sound_folder):
self.bounce_fx = pygame.mixer.Sound(path.join(sound_folder, "bounce.wav"))
self.break_fx = pygame.mixer.Sound(path.join(sound_folder, "break.wav"))
self.break_fx.set_volume(0.1)
def start(self):
self.all_sprites = pygame.sprite.Group()
self.balls = pygame.sprite.Group()
self.bricks = pygame.sprite.Group()
self.lifes = 3
self.pad = Pad(WIDTH//2, HEIGHT - PAD_HEIGHT*2, (self.all_sprites), self.pad_image, self.bounce_fx)
#self.ball = Ball(self.pad.rect.centerx, self.pad.rect.top - PAD_HEIGHT*2, (self.balls), self.ball_image, self.bounce_fx)
self.ball = self.create_ball_at(self.pad.rect.centerx, self.pad.rect.top - PAD_HEIGHT * 2)
self.brick_wall()
self.lifes = 5
self.score = 0
self.run()
def brick_wall(self):
for x in range(10):
for y in range(7):
brick_x = BRICK_WIDTH + x * BRICK_WIDTH + x * 5
brick_y = BRICK_HEIGHT * 2 + y * BRICK_HEIGHT + y * 5
Brick(brick_x, brick_y, (self.all_sprites, self.bricks), self.brick_images, self.break_fx)
def create_ball_at(self, x, y):
return Ball(x, y , (self.balls), self.ball_image, self.bounce_fx)
def run(self):
self.playing = True
while (self.playing):
self.dt = self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.powerup_multiball()
def update(self):
self.pad.update()
self.balls.update(self.bricks, self.pad, self.ball_lost)
hits = pygame.sprite.spritecollide(self.pad, self.balls, False)
for ball in hits:
self.pad.hit(ball)
def ball_lost(self):
if len(self.balls.sprites())>0:
return
self.pad.velocity = 0
self.lifes-=1
if self.lifes >0:
self.create_ball_at(self.pad.rect.centerx, self.pad.rect.top - PAD_HEIGHT *2)
else:
print("Game Over")
def powerup_multiball(self):
for _ in range(5):
reference_ball = self.balls.sprites()[0]
ball = self.create_ball_at(reference_ball.rect.centerx, reference_ball.rect.centery)
ball.asleep = False
ball.velocity = Vector2(ball.velocity.x + random.uniform(-0.5, 0.5), reference_ball.velocity.y)
def draw(self):
self.screen.fill(BLACK)
self.all_sprites.draw(self.screen)
self.balls.draw(self.screen)
pygame.display.flip()
game = Game()
game.start()
```
#### File: videojuegos/Lunar Lander/moon.py
```python
import pygame
from settings import *
import random
import math
class Moon():
def __init__(self, max_heights, min_heights):
self.max_heights = math.floor(max_heights)
self.min_heights = math.floor(min_heights)
self.heights = []
self.landing_spot_x = 0
self.landing_spot_width = 0
def generate_terrain(self):
last_height = random.randrange(self.min_heights, self.max_heights)
mid_height = (self.min_heights + self.max_heights)//2
for _ in range(0, WIDTH):
rnd = random.randrange(self.min_heights, self.max_heights)
go_up = rnd > mid_height
if go_up:
last_height += random.randrange(1,5)
else:
last_height -= random.randrange(1,5)
last_height = max(self.min_heights,
min(self.max_heights, last_height))
self.heights.append(last_height)
landing_spot_x = random.randrange(WIDTH * 0.15, WIDTH * 0.85)
landing_spot_width = random.randrange(20,30)
landing_spot_height = self.heights[landing_spot_x]
for x in range(landing_spot_x-landing_spot_width,
landing_spot_x+landing_spot_width):
self.heights[x] = landing_spot_height
self.landing_spot_x = landing_spot_x
self.landing_spot_width = landing_spot_width
def draw(self, surface):
for x in range(0, WIDTH):
pygame.draw.line(surface, RED, (x, HEIGHT),
(x, HEIGHT-self.heights[x]))
landing_spot_height = HEIGHT - self.heights[self.landing_spot_x]
start_pos = (self.landing_spot_x -
self.landing_spot_width, landing_spot_height)
end_pos = (self.landing_spot_x +
self.landing_spot_width, landing_spot_height)
pygame.draw.line(surface, LIGHTGREY,start_pos,end_pos, 3)
```
#### File: videojuegos/Lunar Lander/rocket.py
```python
import pygame
from settings import *
from pygame import Vector2
from moon import Moon
import random
class Rocket(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((ROCKET_WIDTH, ROCKET_HEIGHT))
self.image.fill(BLUISHGREY)
self.rect = self.image.get_rect()
self.rect.center = Vector2(x, y)
self.velocity = Vector2(0,0)
self.engine = ROCKET_ENGINE
self.fuel = 1
self.alive = True
self.landed = False
#nos permite ir moviendo cosas por el espacio
def update(self, moon):
if not self.alive:
return
if self.landed:
return
if self.fuel > 0:
self.keyboard_input()
self.velocity.y += GRAVITY
self.rect.center += self.velocity
if self.rect.centerx < 0:
self.rect.centerx = WIDTH-1 # pasa de un lado a otro de la pantalla
# self.rect.centerx = 0
# self.velocity.x *=-1
if self.rect.centerx > WIDTH-1:
self.rect.centerx = 0
# self.rect.centerx = WIDTH-1
# self.velocity.x *= -1
is_grounded = self.check_landing(moon)
if is_grounded:
landing_velocity_ok = self.velocity.magnitude() < 3
landing_left = moon.landing_spot_x - moon.landing_spot_width
landing_right = moon.landing_spot_x + moon.landing_spot_width
landing_on_platform_ok = landing_left < self.rect.centerx < landing_right
print(self.velocity.magnitude())
if landing_on_platform_ok and landing_velocity_ok:
self.landed = True
else:
self.alive = False
def check_landing(self, moon):
x, y = self.rect.midbottom
terrain_height = HEIGHT - moon.heights[x]
is_grounded = y > terrain_height
return is_grounded
def keyboard_input(self):
keystate = pygame.key.get_pressed()
delta = Vector2(0, 0)
if keystate[pygame.K_UP]:
delta.y -= self.engine
if keystate[pygame.K_LEFT]:
delta.x -= self.engine
if keystate[pygame.K_RIGHT]:
delta.x += self.engine
if not delta.magnitude() == 0:
self.fuel -= ROCKET_CONSUMPTION
self.fuel = max(0, min(1, self.fuel))
self.velocity += delta
def draw(self, surface):
surface.blit(self.image, self.rect)
if not self.alive:
pygame.draw.circle(surface, RED, self.rect.center,
random.randrange(5,25))
```
#### File: videojuegos/snake/fruit.py
```python
import pygame
from pygame.math import Vector2
from settings import *
import random
class Fruit(pygame.sprite.Sprite):
def __init__(self, groups):
pygame.sprite.Sprite.__init__(self, groups)
self.image = pygame.Surface((TILESIZE, TILESIZE))
self.colors = [RED, PINK, BLUE, YELLOW]
self.rect = self.image.get_rect()
self.teleport()
def teleport(self):
x = random.randrange(0, GRID_WIDTH)
y = random.randrange(0, GRID_HEIGHT)
self.rect.topleft = Vector2(x, y) * TILESIZE
self.image.fill(random.choice(self.colors))
``` |
{
"source": "joseluismarin/python-kw-ahref",
"score": 2
} |
#### File: joseluismarin/python-kw-ahref/Procesar KW Explorer Ahref.py
```python
import argparse
import sys
import pandas as pd
from nltk import SnowballStemmer
import spacy
import es_core_news_sm
from tqdm import tqdm
from unidecode import unidecode
import glob
import re
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--save', help='Nombre del archivo al guardar')
parser.add_argument('-c', '--clean', nargs='?', const=1, type=bool, default=True, help='Elimina todos los duplicados del listado')
parser.add_argument('-i', '--intent', nargs='?', const=1, type=bool, default=True, help='Activa el procesado de las intenciones de busqueda')
parser.add_argument('-l', '--location', nargs='?', const=1, type=bool, default=True, help='Nombre del archivo con la base de datos de las localizaciones')
args = parser.parse_args()
pd.options.mode.chained_assignment = None
nlp = es_core_news_sm.load()
spanishstemmer=SnowballStemmer('spanish')
def normalize(text):
text = unidecode(str(text))
doc = nlp(text)
words = [t.orth_ for t in doc if not t.is_punct | t.is_stop]
lexical_tokens = [t.lower() for t in words if len(t) > 3 and t.isalpha()]
return lexical_tokens
def raiz(kw):
#Calculamos la raiz semantica
stems = [spanishstemmer.stem(wd) for wd in kw]
raiz = " ".join(sorted(stems))
return raiz
# Abrimos todos los archivos CSV y los agregamos a un dataframe
archivos=[]
files = glob.glob("entrada/*.csv")
loop = tqdm(total = len(files), position = 0, leave = False)
for f in files:
loop.set_description("Unificando archivos...".format(f))
archivos.append(pd.read_csv(f))
loop.update(1)
df=pd.concat(archivos,ignore_index='True')
loop.close()
print('Archivos cargados... OK')
# Eliminamos duplicados
if args.clean:
df = df.drop_duplicates()
print('Duplicados eliminados... OK')
# Bucle principal de procesado
loop = tqdm(total = len(df.index), position = 0, leave = False)
df['Raiz semantica'] = ''
print(df)
for i in df.index:
loop.set_description("Calculando raices...".format(i))
kw_a = normalize(df.loc[i,'Keyword'])
#Calculamos la raiz semantica
df.loc[i,'Raiz semantica'] = raiz(kw_a)
loop.update(1)
#print('Kw ' + str(index) + ' de ' + str(len(df.index)))
loop.close()
print('Calculado raices semanticas... OK')
df = df.sort_values(by=['Raiz semantica', 'Volume'], ascending=[True,False])
df = df.reset_index(drop=True)
# Agrupamos las keywords segun su raiz semantica y el volumen de busquedas
loop = tqdm(total = len(df.index), position = 0, leave = False)
df['Grupo'] = ''
for i in df.index:
loop.set_description("Agrupando...".format(i))
if i == 0:
df.loc[i,'Grupo'] = df.loc[i,'Keyword']
elif df.loc[i,'Raiz semantica'] == df.loc[i-1,'Raiz semantica']:
df.loc[i,'Grupo'] = df.loc[i-1,'Grupo']
else:
df.loc[i,'Grupo'] = df.loc[i,'Keyword']
loop.update(1)
loop.close()
print('Agrupado... OK')
df.to_csv('kw_procesado.csv', index=False)
print('Archivo kw_procesado.csv creado... OK')
gdf = (df.groupby('Grupo', as_index=False)
.agg({'Volume':'sum','Clicks':'sum','Difficulty':'mean','CPC':'mean','CPS':'mean','Return Rate':'mean','Keyword':' | '.join}))
# Detectamos la intencion de busqueda de la kw: Informacional, transacional, navegacional
if args.intent:
intenciones = pd.read_csv('Data/intenciones.csv')
loop = tqdm(total = len(intenciones.index), position = 0, leave = False)
gdf['Intencion'] = ''
for i in intenciones.index:
loop.set_description("Detectando intenciones de busqueda...".format(i))
row = gdf[gdf['Grupo'].str.match(str(intenciones.loc[i,'Patron']))]
if row is not None:
gdf.loc[row.index,'Intencion'] = intenciones.loc[i,'Tipo']
loop.update(1)
loop.close()
print('Intenciones de busqueda... OK')
# Detectamos la ubicacion de la palabra clave.
if args.location:
ubicaciones = pd.read_csv('Data/ubicaciones.csv')
loop = tqdm(total = len(ubicaciones.index), position = 0, leave = False)
gdf['Ubicacion'] = ''
gdf['Tipo ubicacion'] = ''
for i in ubicaciones.index:
loop.set_description("Detectando ubicaciones...".format(i))
row = gdf[gdf['Grupo'].str.match(str(ubicaciones.loc[i,'Ubicacion']))]
if row is not None:
gdf.loc[row.index,'Ubicacion'] = ubicaciones.loc[i,'Ubicacion']
gdf.loc[row.index,'Tipo ubicacion'] = ubicaciones.loc[i,'Tipo']
loop.update(1)
loop.close()
print('Ubicaciones... OK')
gdf.to_csv('kw_agrupado.csv',index=False)
print('Archivo kw_agrupado.csv creado... OK')
print('Proceso finalizado... OK')
``` |
{
"source": "joseluis-max/AirBnB_clone",
"score": 3
} |
#### File: tests/test_models/test_amenity.py
```python
import unittest
import os
import models
from models.amenity import Amenity
from models.base_model import BaseModel
class TestAmenity(unittest.TestCase):
"""Amenity testing
"""
# testing types attributes
def test_01_name_type(self):
amenity = Amenity()
self.assertEqual(type(amenity.name), str)
# testing instance of amenity
def test_02_instace_amenity(self):
amenity = Amenity()
self.assertIsInstance(amenity, Amenity)
def test_class(self):
"""Test class"""
self.assertEqual(Amenity.name, "")
self.assertTrue(issubclass(Amenity, BaseModel))
def test_docstring(self):
""" function test_docstring """
msj = "Module doesnt have docstring"
obj = models.amenity.__doc__
self.assertIsNotNone(obj, msj)
msj = "Classes doesnt have docstring"
self.assertIsNotNone(obj, msj)
def test_executable_file(self):
""" function test_executable_file """
is_read_true = os.access("models/engine/file_storage.py", os.R_OK)
self.assertTrue(is_read_true)
is_write_true = os.access("models/engine/file_storage.py", os.W_OK)
self.assertTrue(is_write_true)
is_exec_true = os.access("models/engine/file_storage.py", os.X_OK)
self.assertTrue(is_exec_true)
```
#### File: tests/test_models/test_review.py
```python
import unittest
import os
import models
from models.review import Review
class TestReview(unittest.TestCase):
"""Review testing
"""
# testing types attributes
def test_01_place_id_type(self):
review = Review()
self.assertEqual(type(review.place_id), str)
def test_02_user_id_type(self):
review = Review()
self.assertEqual(type(review.user_id), str)
def test_03_text_type(self):
review = Review()
self.assertEqual(type(review.text), str)
# testing instance of review
def test_04_instace_amenity(self):
review = Review()
self.assertIsInstance(review, Review)
def test_docstring(self):
""" function test_docstring """
msj = "Module doesnt have docstring"
obj = models.review.__doc__
self.assertIsNotNone(obj, msj)
msj = "Classes doesnt have docstring"
self.assertIsNotNone(obj, msj)
def test_executable_file(self):
""" function test_executable_file """
is_read_true = os.access("models/engine/file_storage.py", os.R_OK)
self.assertTrue(is_read_true)
is_write_true = os.access("models/engine/file_storage.py", os.W_OK)
self.assertTrue(is_write_true)
is_exec_true = os.access("models/engine/file_storage.py", os.X_OK)
self.assertTrue(is_exec_true)
``` |
{
"source": "joseluis-max/AirBnB_clone_v2",
"score": 3
} |
#### File: joseluis-max/AirBnB_clone_v2/100-clean_web_static.py
```python
from fabric.api import run, local, env
env.hosts = ['192.168.3.11', '172.16.31.10']
def do_clean(number=0):
""" Clean files oldest
"""
if number == 0 or number == 1:
number = 2
else:
number = int(number) + 1
local("""ls -t versions | tail +{} |
sed 's/^/versions\//' | xargs -d '\n' rm -rf"""
.format(number))
run("""ls -t /data/web_static/releases | tail +{} |
sed 's/^/data/web_static/releases\//' | xargs -d '\n' rm -rf"""
.format(number))
```
#### File: AirBnB_clone_v2/models/base_model.py
```python
import uuid
from datetime import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, DateTime
from datetime import datetime
Base = declarative_base()
class BaseModel:
"""A base class for all hbnb models"""
id = Column(String(60), unique=True, nullable=False, primary_key=True)
create_at = Column(DateTime, default=datetime.utcnow, nullable=False)
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False)
def __init__(self, *args, **kwargs):
"""Instatntiates a new model"""
if not kwargs:
self.id = str(uuid.uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
else:
try:
kwargs['updated_at'] = datetime\
.strptime(kwargs['updated_at'],
'%Y-%m-%dT%H:%M:%S.%f')
kwargs['created_at'] = \
datetime.strptime(kwargs['created_at'],
'%Y-%m-%dT%H:%M:%S.%f')
del kwargs['__class__']
except KeyError:
kwargs['id'] = str(uuid.uuid4())
kwargs['updated_at'] = datetime.now()
kwargs['created_at'] = datetime.now()
self.__dict__.update(kwargs)
def __str__(self):
"""Returns a string representation of the instance"""
cls = (str(type(self)).split('.')[-1]).split('\'')[0]
return '[{}] ({}) {}'.format(cls, self.id, self.__dict__)
def save(self):
"""Updates updated_at with current time when instance is changed"""
from models import storage
self.updated_at = datetime.now()
storage.new(self)
storage.save()
def to_dict(self):
"""Convert instance into dict format"""
dictionary = {}
dictionary.update(self.__dict__)
dictionary.update({'__class__':
(str(type(self)).split('.')[-1]).split('\'')[0]})
dictionary['created_at'] = self.created_at.isoformat()
dictionary['updated_at'] = self.updated_at.isoformat()
if '_sa_instance_state' in dictionary:
del dictionary['_sa_instance_state']
return dictionary
def delete(self):
"""Delete the current instance from the storage by calling
the method delete
"""
from models import storage
storage.delete(self)
```
#### File: models/engine/file_storage.py
```python
import json
class FileStorage:
"""This class manages storage of hbnb models in JSON format"""
__file_path = 'file.json'
__objects = {}
def delete(self, obj=None):
"""Delete a object of __objects"""
if obj is not None:
for key, value in self.__objects.items():
if value == obj:
del self.__objects[key]
break
def all(self, cls=None):
"""Returns a dictionary of models currently in storage"""
if cls is not None:
instances = {}
for key, value in self.__objects.items():
if value.__class__ == cls:
instances[key] = value
return instances
return self.__objects
def new(self, obj):
"""Adds new object to storage dictionary"""
self.all().update({obj.to_dict()['__class__'] + '.' + obj.id: obj})
def save(self):
"""Saves storage dictionary to file"""
with open(FileStorage.__file_path, 'w') as f:
temp = {}
temp.update(FileStorage.__objects)
for key, val in temp.items():
temp[key] = val.to_dict()
json.dump(temp, f)
def reload(self):
"""Loads storage dictionary from file"""
from models.base_model import BaseModel
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
classes = {
'BaseModel': BaseModel, 'User': User, 'Place': Place,
'State': State, 'City': City, 'Amenity': Amenity,
'Review': Review
}
try:
temp = {}
with open(FileStorage.__file_path, 'r') as f:
temp = json.load(f)
for key, val in temp.items():
self.all()[key] = classes[val['__class__']](**val)
except FileNotFoundError:
pass
def close(self):
""" call reload() method for deserializing the JSON file to objects """
self.reload()
``` |
{
"source": "JoseLuisRojasAranda/coco-category-cpy",
"score": 3
} |
#### File: JoseLuisRojasAranda/coco-category-cpy/tests.py
```python
import sys
import cv2
import json
def main():
render_image("../copied_images/000000434201.jpg")
def render_image(img_path):
image = cv2.imread(img_path)
with open(img_path + ".json") as json_file:
data = json.load(json_file)
for box in data["bboxes"]:
x = int(box["center_x"])
y = int(box["center_y"])
w = int(box["width"])
h = int(box["height"])
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 1)
cv2.imshow("test_ssd", image)
cv2.waitKey(0)
main()
``` |
{
"source": "JoseLuisRojasAranda/Sorting-ComplexityGraph",
"score": 4
} |
#### File: Sorting-ComplexityGraph/code/Graficador.py
```python
import datetime as time
import numpy as np
from matplotlib import pyplot as plt
import AlgoritmosOrdenacion as sort
# Configuaracion
inicio = 0 # Tamano inicial del arreglo
aumento = 1 # Aumento del tamano del arreglo
tamMax = 1000001 # Tamano maximo del arreglo
#arr = [] # Arreglo generado aleatoriamente
bubbleT = [] # Tiempo del bubble sort
insertionT = [] # Tiempo del insertion sort
mergeT = [] # Tiempo del merge sort
tamX = [] # Valores de la grafica en X
# Prueba los algoritmos de ordenacion y regresa un arreglo con los tiempos de ejecucion
def ProbarOrdenacion(n):
res = []
arr = []
# Bubble sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.BubbleSort(arr)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
# Insertion sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.InsertionSort(arr)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
# Merge sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.MergeSort(arr, 0, n-1)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
return res
# Dibuja la grafica
def dibujar():
# plt.scatter(i, y)
plt.plot(tamX, bubbleT, 'b')
plt.plot(tamX, insertionT, 'r')
plt.plot(tamX, mergeT, 'g')
plt.title("Algoritmos de ordenacion")
plt.xlabel("Tamano del arreglo")
plt.ylabel("Tiempo")
plt.legend(["bubble sort", "insertion sort", "merge sort"])
# Funcion main
def main():
tam = inicio
while tam < tamMax:
res = ProbarOrdenacion(tam)
bubbleT.append(res[0])
insertionT.append(res[1])
mergeT.append(res[2])
tamX.append(tam)
tam += aumento
dibujar()
plt.pause(0.05)
print("----------------------------------")
print("Tiempos:")
print(tamX)
print("Bubble Sort:")
print(bubbleT)
print("Insertion Sort:")
print(insertionT)
print("Merge Sort:")
print(mergeT)
main()
dibujar()
plt.show()
``` |
{
"source": "JoseLuisRojasAranda/tfmodels",
"score": 2
} |
#### File: datasets/coco/coco_for_SSD.py
```python
import tensorflow as tf
import cv2
# Regresar un directoria para poder acceder modulo de otra carpeta
import sys
sys.path.append("..")
from ops.SSD import SSD_data_pipeline, SSD_load_dataset
import mobilenetv2
sys.path.append("datasets/")
print(sys.path)
def main():
model = mobilenetv2.MobileNetV2_SSD
fmaps_arr = model.get_fmaps_array()
cats = ["orange", "apple", "banana"]
aspect_ratios = [1, 2, 3, 1/2, 1/3]
img_size = model.get_input_size()
process_for_ssd(fmaps_arr, cats, img_size, aspect_ratios)
def process_for_ssd(fmaps_array, categories_array, img_size, aspect_ratios):
pipeline = SSD_data_pipeline(feature_maps=fmaps_array,
categories_arr=categories_array, img_size=img_size,
aspect_ratios=aspect_ratios)
pipeline.preprocess_tfrecord_coco("cocofruits.tfrecord",
"ssd_preprocess.tfrecord")
dataset = SSD_load_dataset("ssd_preprocess.tfrecord")
main()
```
#### File: src/models/mobilenetv1.py
```python
import tensorflow as tf
from tensorflow.keras import Model
from ops.conv_ops import normal_conv
from ops.conv_blocks import D1x1Block
#
# Implementacion de MobilenetV1, suponiendo un input size de 224x224x3
#
class MobileNetV1(Model):
def __init__(self, classes, width_multiplier=1):
super(MobileNetV1, self).__init__()
self.m_layers = []
a = width_multiplier
self.m_layers.append(normal_conv(int(a*32), (3, 3), strides=[1, 2, 2, 1]))
self.m_layers.append(tf.keras.layers.BatchNormalization())
self.m_layers.append(tf.keras.layers.Activation("relu"))
self.m_layers.append(D1x1Block(int(a*64), 1))
self.m_layers.append(D1x1Block(int(a*128), 2))
self.m_layers.append(D1x1Block(int(a*128), 1))
self.m_layers.append(D1x1Block(int(a*256), 2))
self.m_layers.append(D1x1Block(int(a*256), 1))
self.m_layers.append(D1x1Block(int(a*512), 2))
for _ in range(5):
self.m_layers.append(D1x1Block(int(a*512), 1))
self.m_layers.append(D1x1Block(int(a*1024), 2))
self.m_layers.append(D1x1Block(int(a*1024), 1))
self.m_layers.append(tf.keras.layers.AveragePooling2D(pool_size=(7,7), strides=(1,1)))
self.m_layers.append(tf.keras.layers.Flatten())
self.m_layers.append(tf.keras.layers.Dense(1024))
self.m_layers.append(tf.keras.layers.Dropout(0.5, name="dropout"))
self.m_layers.append(tf.keras.layers.Dense(classes))
self.m_layers.append(tf.keras.layers.Activation("softmax"))
def call(self, inputs, training=False):
x = inputs
for l in self.m_layers:
# print(x.get_shape().as_list())
if (l.name == "dropout" and training == False) == False:
x = l(x)
return x
```
#### File: src/models/mobilenetv2.py
```python
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras import layers
from models.ops.conv_ops import normal_conv, ReLU6, pointwise_conv
from models.ops.conv_blocks import BottleneckResidualBlock, basic_conv_block
from models.ops.conv_blocks import pwise_conv_block, separable_conv_block
from models.ops.model_layers import LayerList
from models.ops.SSD import SSD_layer
#
# Implementacion de MobilenetV2, suponiendo un input size de 224x224x3
#
class MobileNetV2(Model):
@staticmethod
def build_model(classes, width_multiplier=1):
a = width_multiplier
model = Sequential()
def crearBloques2(input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = 1
l_res = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
model.add(l_res)
l = basic_conv_block(int(a*32), (3, 3), stride=2, dropout=0.25, activation="ReLU6", name="layer_0")
model.add(l)
# los bloques de bottleneck intermedios
crearBloques2(32, 1, a*16, 1, 1)
crearBloques2(16, 6, a*24, 2, 2)
crearBloques2(24, 6, a*32, 3, 2)
crearBloques2(32, 6, a*64, 4, 2)
crearBloques2(69, 6, a*96, 3, 1)
crearBloques2(96, 6, a*160, 3, 2)
crearBloques2(160, 6, a*320, 1, 1)
# ultima convolucion
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_conv1x1")
model.add(l)
# Average Pooling y Fully Connected
model.add(layers.AveragePooling2D(pool_size=(7,7), strides=(1,1)))
model.add(layers.Flatten())
model.add(layers.Dense(1280))
model.add(layers.Dropout(0.5, name="dropout"))
model.add(layers.Dense(classes))
model.add(layers.Activation("softmax"))
return model
#
# Args:
# classes: el numero de classes que realizara predicciones
# width_multiplier: numero para controlar la complejidad del modelo
#
def __init__(self, classes, width_multiplier=1):
super(MobileNetV2, self).__init__()
a = width_multiplier
self.classes = classes
self.m_layers = LayerList()
# convolucion inicial
l = basic_conv_block(int(a*32), (3, 3), stride=2,
dropout=0.25, activation="ReLU6", name="layer_0")
self.m_layers.add(l)
# los bloques de bottleneck intermedios
self.crearBloques(32, 1, a*16, 1, 1)
self.crearBloques(16, 6, a*24, 2, 2)
self.crearBloques(24, 6, a*32, 3, 2)
self.crearBloques(32, 6, a*64, 4, 2)
self.crearBloques(69, 6, a*96, 3, 1)
self.crearBloques(96, 6, a*160, 3, 2)
self.crearBloques(160, 6, a*320, 1, 1)
# ultima convolucion
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_{}_conv1x1".format(len(self.m_layers)))
self.m_layers.add(l)
# Average Pooling y Fully Connected
self.m_layers.add(layers.AveragePooling2D(pool_size=(7,7),
strides=(1,1)), training_arg=False)
self.m_layers.add(layers.Flatten(), training_arg=False)
self.m_layers.add(layers.Dense(1280))
self.m_layers.add(layers.Dropout(0.5, name="dropout"), only_training=True)
self.m_layers.add(layers.Dense(classes))
self.m_layers.add(layers.Activation("softmax"))
# Crea BottleneckResidualBlock n veces
def crearBloques(self, input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = len(self.m_layers)
l = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
self.m_layers.add(l)
def call(self, inputs, training=False):
x = self.m_layers.feed_forward(inputs, training)
return x
@staticmethod
def get_input_size():
return 224
# Implementacion de SSD framework para object detection con arquitectura
# de MobileNetV2, SSD esta configurado de la siguiente manera segun paper:
# - first SSD layer: expansion de layer 15 stride=16
# - second and rest SSD layer: ultima layer stride=32
class MobileNetV2_SSD(Model):
def __init__(self, classes, width_multiplier=1):
super(MobileNetV2_SSD, self).__init__()
#self.classes = classes
a = width_multiplier
self.classes = classes
self.m_layers = LayerList()
self.saved_block = 13 # output que guarda para ssd_lite
# convolucion inicial
l = basic_conv_block(int(a*32), (3, 3), stride=2,
dropout=0.25, activation="ReLU6", name="layer_0")
self.m_layers.add(l)
# los bloques de bottleneck intermedios
self.crearBloques(32, 1, a*16, 1, 1)
self.crearBloques(16, 6, a*24, 2, 2)
self.crearBloques(24, 6, a*32, 3, 2)
self.crearBloques(32, 6, a*64, 4, 2)
self.crearBloques(69, 6, a*96, 3, 1)
self.crearBloques(96, 6, a*160, 3, 2)
self.crearBloques(160, 6, a*320, 1, 1)
# ultima convolucion
l_num = len(self.m_layers)
l = pwise_conv_block(int(a*1280), dropout=0.25, activation="ReLU6",
name="layer_{}_conv1x1".format(l_num))
self.m_layers.add(l, save_as="last_layer")
# SSD extra feature layers
l = separable_conv_block(512, 2, name="ssd_feature_layer_1")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(256, 2, name="ssd_feature_layer_2")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(256, 2, name="ssd_feature_layer_3")
self.m_layers.add(l, save_as=l.name)
l = separable_conv_block(128, 2, name="ssd_feature_layer_4")
self.m_layers.add(l, save_as=l.name)
# SSD classifier
l = SSD_layer(classes=self.classes, num_fmap=1, total_fmaps=5,
img_size=320, name="ssd_layer_1")
self.m_layers.add(l, save_as=l.name, custom_input="layer_13",
custom_input_index=0)
l = SSD_layer(classes=self.classes, num_fmap=2, total_fmaps=5,
img_size=320, name="ssd_layer_2")
self.m_layers.add(l, save_as=l.name, custom_input="last_layer")
l = SSD_layer(classes=self.classes, num_fmap=3, total_fmaps=5,
img_size=320, name="ssd_layer_3")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_1")
l = SSD_layer(classes=self.classes, num_fmap=4, total_fmaps=5,
img_size=320, name="ssd_layer_4")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_2")
l = SSD_layer(classes=self.classes, num_fmap=5, total_fmaps=5,
img_size=320, name="ssd_layer_5")
self.m_layers.add(l, save_as=l.name, custom_input="ssd_feature_layer_4")
# Crea BottleneckResidualBlock n veces
def crearBloques(self, input_channels, t, c, n, s):
for i in range(n):
# Solo el primer bloque tiene stride 2
# a partir del segundo bottleneck el numero de input_channels es igual al output_channels
if i > 0:
s = 1
input_channels = c
l_num = len(self.m_layers)
l = BottleneckResidualBlock(input_channels, int(c), stride=s, t=t,
name="layer_{}_BottleneckResidualBlock".format(l_num))
save_as = None
if l_num == self.saved_block:
save_as = "layer_{}".format(l_num)
self.m_layers.add(l, save_as=save_as)
def call(self, inputs, training=False):
x = self.m_layers.feed_forward(inputs, training)
return x
@staticmethod
def get_fmaps_array():
return [(20, 20), (10, 10), (5, 5), (3, 3), (1, 1)]
@staticmethod
def get_input_size():
return 320
```
#### File: models/ops/conv_blocks.py
```python
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.nn import relu6
from models.ops import conv_ops as ops
# Bloque comun de convolucion que consiste:
# > conv2d
# > batch normalization
# > activation
# > dropout
class basic_conv_block(layers.Layer):
# Args:
# filters: entero con el numero de filtros de la convolucion
# kernel: par de numeros con tamaño del kernel
# strides: arreglo de strides
# dropout: fraccion a la cual se le va aplicar dropout
# activation: el topo de activacion de la capa
def __init__(self,
filters,
kernel,
stride=1,
dropout=0.25,
activation="ReLU",
name="conv_block", **kwargs):
super(basic_conv_block, self).__init__(name=name, **kwargs)
self.filters = filters
self.kernel = kernel
self.strides = [1, stride, stride, 1]
self.dropout = dropout
self.activation = activation
self.conv = ops.normal_conv(filters, kernel, strides=self.strides, name=name+"_conv2d")
self.bn = layers.BatchNormalization(name=name+"_bn")
if self.activation == "ReLU":
self.activation = layers.Activation("relu", name=name+"_relu")
if self.activation == "ReLU6":
self.activation = ops.ReLU6(name=name+"_relu6")
self.dropout = layers.Dropout(dropout, name=name+"_dropout")
#
# serializa la configuracion de la capa
def get_config(self):
config = super(basic_conv_block, self).get_config()
config.update({
"filters": self.filters,
"kernel": self.kernel,
"stride": self.strides[1],
"dropout": self.dropout,
"activation": self.activation
})
return config
def call(self, inputs, training=None):
# Operacion depth wise
x = self.conv(inputs)
x = self.bn(x)
x = self.activation(x)
if training == True:
x = self.dropout(x)
return x
# Bloque de pointwise convolution
# > pointwise
# > batch normalization
# > activation
# > dropout
class pwise_conv_block(layers.Layer):
# Args:
# filters: entero con el numero de filtros de la convolucion
# strides: arreglo de strides
# dropout: fraccion a la cual se le va aplicar dropout
# activation: el topo de activacion de la capa
def __init__(self,
filters,
stride=1,
dropout=0.25,
activation="ReLU",
name="pwise_conv_block", **kwargs):
super(pwise_conv_block, self).__init__(name=name, **kwargs)
self.filters = filters
self.strides = [1, stride, stride, 1]
self.dropout = dropout
self.activation = activation
self.conv = ops.pointwise_conv(filters, strides=self.strides, name=name+"_pwise_conv")
self.bn = layers.BatchNormalization(name=name+"_bn")
if self.activation == "ReLU":
self.activation = layers.Activation("relu", name=name+"_relu")
if self.activation == "ReLU6":
self.activation = ops.ReLU6(name=name+"_relu6")
self.dropout = layers.Dropout(dropout, name=name+"_dropout")
#
# serializa la configuracion de la capa
def get_config(self):
config = super(pwise_conv_block, self).get_config()
config.update({
"filters": self.filters,
"stride": self.strides[1],
"dropout": self.dropout,
"activation": self.activation
})
return config
def call(self, inputs, training=None):
# Operacion depth wise
x = self.conv(inputs)
x = self.bn(x)
x = self.activation(x)
if training == True:
x = self.dropout(x)
return x
#
# Bloque de capas de mobilenetV1 que realiza lo siguiente
# > 3x3 Depthwise conv, stride=(1|2)
# > Batch Normalization
# > ReLU Activation
# > 1x1xfilters Conv (Pointwise conv)
# > Batch Normalization
# > ReLU Activations
#
class separable_conv_block(layers.Layer):
#
# Crea el bloque segun los argumentos
# Args:
# filters: numero de filtros que realizara la Pointwise Conv
# stride: stride de la layer Depthwise Conv, 1 o 2
# name: nombre del bloque
#
def __init__(self,
filters,
stride,
dropout=0.25,
name="separable_conv_block", **kwargs):
super(separable_conv_block, self).__init__(name=name, **kwargs)
self.filters = filters
self.stride = stride
self.dropout = dropout
# Asegura de que el filters sea un entero
if type(filters) is float:
filters = int(filters)
# deptwise operation
self.dwise = ops.depthwise_conv((3,3), strides=[1, stride, stride, 1])
self.dwbn = layers.BatchNormalization()
self.dwrelu = layers.Activation("relu")
self.dwdo = layers.Dropout(dropout)
#point wise operation
self.pwise = ops.pointwise_conv(filters)
self.pwbn = layers.BatchNormalization()
self.pwrelu = layers.Activation("relu")
self.pwdo = layers.Dropout(dropout)
#
# serializa la configuracion de la capa
def get_config(self):
config = super(separable_conv_block, self).get_config()
config.update({
"filters": self.filters,
"stride": self.stride,
"dropout": self.dropout
})
return config
def call(self, inputs, training=None):
# Operacion depth wise
x = self.dwise(inputs)
x = self.dwbn(x)
x = self.dwrelu(x)
if training == True:
x = self.dwdo(x)
# Luego point wise convolution
x = self.pwise(x)
x = self.pwbn(x)
x = self.pwrelu(x)
if training == True:
x = self.pwdo(x)
return x
#
# Bloque basico para MobileNetV2, realiza lo siguiente:
# > (1x1xinput_channels*t) conv
# > Batch Normalization
# > ReLU6
# > 3x3 Depthwise conv, stride=(1|2)
# > Batch Normalization
# > ReLU6
# > (1x1xoutput_channels) conv
# > Si stride == 1 entonces residual = output + input
#
class BottleneckResidualBlock(layers.Layer):
#
# Crea el bloque segun los argumentos
# Args:
# input_channels: numero de channels que entraran al bloque
# filters: numero de filtros del volumen final
# stride: stride de la layer Depthwise Conv, 1 o 2
# t: expansion factor, por defecto 6
# dropout: cantidad de dropout que se realizara
# name: nombre del bloque
#
def __init__(self,
input_channels,
filters,
stride=1,
t=6,
dropout=0.25,
store_output=False,
name="BottleneckResidualBlock", **kwargs):
super(BottleneckResidualBlock, self).__init__(name=name, **kwargs)
# Asegura de que el input_channels sea un entero
if type(input_channels) is float:
input_channels = int(input_channels)
# Asegura de que el filters sea un entero
if type(filters) is float:
filters = int(filters)
self.input_channels = input_channels
self.output_channels = filters
self.stride = stride
self.t = t
self.dropout = dropout
self.store_output = store_output
self.expansion_output = None
self.block_output = None
self.pw_exp = ops.pointwise_conv(input_channels * t, name=name + "_expansion_conv")
self.bn_exp = layers.BatchNormalization(name=name+"_expansion_bn")
self.do_exp = layers.Dropout(self.dropout, name=name+"_expansion_do")
self.dwise = ops.depthwise_conv((3,3), strides=[1, stride, stride, 1], name=name+"_depthwise_conv")
self.bn_dwise = layers.BatchNormalization(name=name+"_depthwise_bn")
self.do_dwise = layers.Dropout(self.dropout, name=name+"_depthwise_do")
self.pw_bottleneck = ops.pointwise_conv(self.output_channels, name=name+"_bottleneck_conv")
self.bn_bottleneck = layers.BatchNormalization(name=name+"_bottleneck_bn")
self.do_bottleneck = layers.Dropout(self.dropout, name=name+"_bottleneck_do")
# En caso de que el input y output no concuerden,
# se realiza un 1x1 conv para que concuerdes
# if self.input_channels != self.output_channels:
# self.pw_residual = ops.pointwise_conv(self.output_channels)
#
# serializa la configuracion de la capa
def get_config(self):
config = super(BottleneckResidualBlock, self).get_config()
config.update({
"input_channels": self.input_channels,
"filters": self.output_channels,
"stride": self.stride,
"t": self.t,
"dropout": self.dropout,
"store_output": self.store_output
})
return config
def call(self, inputs, training=None):
residual = inputs
# Expansion de los channels de entrada
x = self.pw_exp(inputs)
x = self.bn_exp(x)
x = relu6(x)
if training == True:
x = self.do_exp(x)
res_expansion = x
# Realisamos la depthwise convolution
x = self.dwise(x)
x = self.bn_dwise(x)
x = relu6(x)
if training == True:
x = self.do_dwise(x)
res_depthwise = x
# Bottleneck para reducir los channels de salida
x = self.pw_bottleneck(x)
x = self.bn_bottleneck(x)
# checa si hay que sumar el residual
if self.stride == 1:
if self.input_channels == self.output_channels:
x = x + residual
#residual = self.pw_residual(residual)
#x = x + residual
if training == True:
x = self.do_bottleneck(x)
res_bottleneck = x
return res_bottleneck
```
#### File: models/ops/conv_ops.py
```python
import tensorflow as tf
from tensorflow.nn import depthwise_conv2d, conv2d, bias_add, relu6
from tensorflow.keras import layers
from tensorflow.keras.initializers import GlorotNormal
from tensorflow.keras.regularizers import l2
#
# Layer que realiza activacion ReLU6
#
class ReLU6(layers.Layer):
def __init__(self, name="ReLU6", **kwargs):
super(ReLU6, self).__init__(name=name, **kwargs)
def call(self, inputs):
return relu6(inputs)
#
# Layer que realiza una convulucion estandar
#
class normal_conv(layers.Layer):
#
# Asigna los parametros de layer para realizar la convulucion
# Args:
# name: nombre de la layer
# f_kernel: tamaño del kernel que realiza la convulucion
# num_filters: el numero de filtros de la convulucion
# strides: el stride de la convolucion
# padding: el padding que se aplicara, por defecto 'SAME'
# intializer: para los pesos, por defecto GlorotNormal (Xavier)
# regularizer: para los pesos, por defecto L2
# use_bias: si se aplica bias despues de la convulucion
# weight_decay: hyperparametro para regularizacion
#
def __init__(self,
num_filters,
f_kernel,
name="normal_conv",
strides=[1,1,1,1],
padding="SAME",
initializer=None,
regularizer=None,
use_bias=False,
weight_decay=1e-4,
**kwargs):
super(normal_conv, self).__init__(name=name, **kwargs)
# Asegura de que el num_filters sea un entero
if type(num_filters) is float:
num_filters = int(num_filters)
self.f_kernel = f_kernel
self.num_filters = num_filters
self.strides = strides
self.padding = padding
self.use_bias = use_bias
if initializer == None:
self.w_initializer = GlorotNormal()
else:
self.w_initializer = initializer
if regularizer == None:
self.w_regularizer = l2(weight_decay)
else:
selw.w_regularizer = regularizer
#
# Serializa las propiedades de la capa
def get_config(self):
config = super(normal_conv, self).get_config()
config.update({
"num_filters": self.num_filters,
"f_kernel": self.f_kernel,
"strides": self.strides,
"padding": self.padding,
"use_bias": self.use_bias
})
return config
#
# Crea weights y biases dependiendo del input_shape de call()
#
def build(self, input_shape):
# dimensiones de la convulucion
nc_h, nc_w = self.f_kernel
in_channels = input_shape[-1]
self.w = self.add_weight(shape=[nc_h, nc_w, in_channels, self.num_filters],
initializer=self.w_initializer,
regularizer=self.w_regularizer,
trainable=True,
name=self.name + "_w")
if self.use_bias:
self.b = self.add_weight(shape=[self.num_filters],
initializer="zeros",
trainable=True,
name=self.name+"_b")
#
# Realiza la operacion al argumento inputs
# Args:
# inputs: tensor de shape (batch, heigh, width, channels)
#
def call(self, inputs, training=None):
conv = conv2d(inputs, self.w, self.strides, self.padding)
if self.use_bias:
return bias_add(conv, self.b)
return conv
#
# Layer que realiza una depthwise convolution, a la
# cual solo se le aplica un filtro a cada channel del input
#
class depthwise_conv(layers.Layer):
#
# Asigna los parametros de layer para realizar la depthwsie convulution
# Args:
# name: nombre de la layer
# f_kernel: tamaño del kernel que realiza la convulucion
# channel_multiplier: el numero de filtros por channel del input
# strides: el stride de la convolucion
# padding: el padding que se aplicara, por defecto 'SAME'
# intializer: para los pesos, por defecto GlorotNormal (Xavier)
# regularizer: para los pesos, por defecto L2
# use_bias: si se aplica bias despues de la convulucion
# weight_decay: hyperparametro para regularizacion
#
def __init__(self,
f_kernel,
name="depthwise_conv",
channel_multiplier=1,
strides=[1,1,1,1],
padding="SAME",
initializer=None,
regularizer=None,
use_bias=False,
weight_decay=1e-4,
**kwargs):
super(depthwise_conv, self).__init__(name=name, **kwargs)
self.f_kernel = f_kernel
self.channel_multiplier = channel_multiplier
self.strides = strides
self.padding = padding
self.use_bias = use_bias
self.weight_decay = weight_decay
if initializer == None:
self.w_initializer = GlorotNormal()
else:
self.w_initializer = initializer
if regularizer == None:
self.w_regularizer = l2(weight_decay)
else:
self.w_regularizer = regularizer
#
# Serializa las propiedades de la capa
def get_config(self):
config = super(depthwise_conv, self).get_config()
config.update({
"f_kernel": self.f_kernel,
"channel_multiplier": self.channel_multiplier,
"strides": self.strides,
"padding": self.padding,
"use_bias": self.use_bias,
"weight_decay": self.weight_decay
})
#
# Crea weights y biases dependiendo del input_shape de call()
#
def build(self, input_shape):
# print("Input shape: {}".format(input_shape.as_list()))
in_channels = input_shape[-1]
nc_h, nc_w = self.f_kernel
self.w = self.add_weight(shape=[nc_h, nc_w, in_channels, self.channel_multiplier],
initializer=self.w_initializer,
regularizer=self.w_regularizer,
trainable=True,
name=self.name+"_w")
if self.use_bias:
self.b = self.add_weight(shape=[in_channels * self.channel_multiplier],
initializer="zeros",
trainable=True,
name=self.name+"_b")
#
# Realiza la operacion al argumento inputs
# Args:
# inputs: tensor de shape (batch, heigh, width, channels)
#
def call(self, inputs, training=None):
conv = depthwise_conv2d(inputs, self.w, self.strides, self.padding)
if self.use_bias:
return bias_add(conv, self.b)
return conv
#
# Layer que realiza una pointwise convolution, a la
# cual solo se le convolutions de 1x1
#
class pointwise_conv(layers.Layer):
#
# Asigna los parametros de layer para realizar la pointwise convulution
# Args:
# name: nombre de la layer
# num_filters: numero de filtros del volumen final
# strides: el stride de la convolucion
# padding: el padding que se aplicara, por defecto 'SAME'
# intializer: para los pesos, por defecto GlorotNormal (Xavier)
# regularizer: para los pesos, por defecto L2
# use_bias: si se aplica bias despues de la convulucion
# weight_decay: hyperparametro para regularizacion
#
def __init__(self,
num_filters,
name="pointwise_conv",
strides=[1,1,1,1],
padding="VALID",
initializer=None,
regularizer=None,
use_bias=False,
weight_decay=1e-4,
**kwargs):
super(pointwise_conv, self).__init__(name=name, **kwargs)
# Asegura de que el num_filters sea un entero
if type(num_filters) is float:
num_filters = int(num_filters)
self.f_kernel = (1, 1)
self.num_filters = num_filters
self.strides = strides
self.padding = padding
self.use_bias = use_bias
self.weight_decay = weight_decay
if initializer == None:
self.w_initializer = GlorotNormal()
else:
self.w_initializer = initializer
if regularizer == None:
self.w_regularizer = l2(weight_decay)
else:
self.w_regularizer = regularizer
#
# Serializa las propiedades de la capa
def get_config(self):
config = super(pointwise_conv, self).get_config()
config.update({
"num_filters": self.num_filters,
"strides": self.strides,
"padding": self.padding,
"use_bias": self.use_bias,
"weight_deacay": self.weight_decay
})
def build(self, input_shape):
in_channels = input_shape[-1]
nc_h, nc_w = self.f_kernel
self.w = self.add_weight(shape=[nc_h, nc_w, in_channels, self.num_filters],
initializer=self.w_initializer,
regularizer=self.w_regularizer,
trainable=True,
name=self.name+"_w")
if self.use_bias:
self.b = self.add_weight(shape=[self.num_filters],
initializer="zeros",
trainable=True,
name=self.name+"_b")
def call(self, inputs, training=None):
conv = conv2d(inputs, self.w, self.strides, self.padding)
if self.use_bias:
return bias_add(conv, self.b)
return conv
```
#### File: models/ops/SSD.py
```python
import math
import random
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.initializers import GlorotNormal
from tensorflow.keras.regularizers import l2
from models.ops.conv_ops import normal_conv, depthwise_conv, pointwise_conv, ReLU6
# Regresar un directoria para poder acceder modulo de otra carpeta
import sys
sys.path.append("..")
from datasets.coco import tfrecord_coco
from datasets.datasets_features import bytes_feature
from models.tests.test_bboxes import draw_bbox
sys.path.append("ops/")
"""
ATENCION:
Ahorita el desarrollo de SSD para FrutAI esta en standby, para poder mejor
probar la idea con un dataset mas simple, ya despues se continuara con el
desarrollo.
Los pendientes para terminar SSD:
- Agregar la categiria de fondo al preprocesamiento de img
- Corregir data augmentation cuando se realiza expand
- Generar dataset preprocesado
- Crear loss function de ssd
- Entrenar modelo
- Metodos para decodificacion de output, como:
- Non-max supression
- Resize de bboxes, etc.
"""
class ssd_lite_conv(layers.Layer):
# Args:
# filters: numero de filtros que se aplica en total
# kernel: tamaño del kernel
def __init__(self, filters, kernel=(3, 3), name="ssd_lite_conv", **kwargs):
super(ssd_lite_conv, self).__init__(name=name, **kwargs)
self.filters = filters
self.kernel = kernel
self.dwise = depthwise_conv(self.kernel, strides=[1, 1, 1, 1],
padding="SAME", name=name+"_dwise_conv")
self.dwbn = layers.BatchNormalization(name=name+"_dwise_bn")
self.dwrelu6 = ReLU6(name=name+"_dwise_relu6")
self.pwise = pointwise_conv(self.filters)
def get_config(self):
config = super(ssd_lite_conv, self).get_config()
config.update({
"filters": self.filters,
"kernel": self.kernel})
return config
def call(self, inputs, training=None):
x = self.dwise(inputs)
x = self.dwbn(inputs)
x = self.dwrelu6(inputs)
x = self.pwise(inputs)
return x
class SSD_layer(layers.Layer):
#
# Constructor de la layer
# Args:
# classes: cantidad de categorias a clasificar
# priors: el numero de priors de cada feature
def __init__(self,
classes=3,
aspect_ratios=[1, 2, 3, 1/2, 1/3],
num_fmap=1,
total_fmaps=3,
img_size=224,
initializer=None,
regularizer=None,
weight_decay=1e-4,
name="SSD_layer", **kwargs):
super(SSD_layer, self).__init__(name=name, **kwargs)
self.classes = classes
self.aspect_ratios = aspect_ratios
# calcula el numero de priors dependiendo de los aspect ratios
# siguiendo la implemetacion del paper
self.priors = compute_num_priors(aspect_ratios)
self.num_fmap = num_fmap
self.total_fmaps = total_fmaps
self.img_size = img_size
if initializer == None:
self.w_initializer = GlorotNormal()
else:
self.w_initializer = initializer
if regularizer == None:
self.w_regularizer = l2(weight_decay)
else:
selw.w_regularizer = regularizer
# Realiza la prediccion de la seguriada de la clase y del tipo
# de bounding box
self.conv_conf = ssd_lite_conv(self.priors*self.classes)
"""
self.conv_conf = normal_conv(self.priors*self.classes, (3, 3),
name=name+"_conv_conf",
padding="SAME")
"""
# Realiza la prediccion del offset de las default box,
# el numero de filtros es de num_priors * 4(dx,dy,dw,dh)
self.conv_loc = ssd_lite_conv(self.priors*4)
"""
self.conv_loc = normal_conv(self.priors*4, (3, 3),
name=name+"_conv_loc",
padding="SAME")
"""
def get_config(self):
config = super(SSD_layer, self).get_config()
config.update({
"classes": self.classes,
"aspect_ratios": self.aspect_ratios,
"num_fmap": self.num_fmap,
"total_fmaps": self.total_fmaps,
"img_size": self.img_size
})
return config
# Recive el feature map y calcula lo siguiente:
# conf: tensor shape (batch, features, features, priors, classes)
# loc: tensor shape (batch, features, features, priors, 4(dx,dy,dw,dh)
# priors: tensor shape (features, features, priors, 4(cx, cy, w, h))
# con eso se puede obtener, una version con todo junto para el loss
# shape[batch, features*features, priors, classes+4(dx, dy, dw, dh)+4(cx, cy, w h)]
def call(self, inputs, training=None):
b_size = inputs.get_shape().as_list()[0]
features = inputs.get_shape().as_list()[1]
conf = self.conv_conf(inputs)
loc = self.conv_loc(inputs)
bpriors = PriorsBoxes(batch_size=b_size, features=features, num_fmap=self.num_fmap,
total_fmaps=self.total_fmaps, aspect_ratios=self.aspect_ratios,
img_size=self.img_size)
# reshape clasification de las convoluciones
conf = tf.reshape(conf, [b_size, features*features,
self.priors, self.classes])
loc = tf.reshape(loc, [b_size, features*features,
self.priors, 4])
bpriors = tf.cast(bpriors, tf.float32)
prediction = tf.concat([conf, loc, bpriors], -1)
return prediction
#
# Metodo que calcula el numero de priors dependiendo de cuantos aspect ratios
# se usen
# Args:
# aspect_ratios: arreglo de proporciones
# Returns:
# priors: entero que representa el numero de default boxes
def compute_num_priors(aspect_ratios):
priors = 0
for ratio in aspect_ratios:
priors += 1
# en caso de ratio == 1, se agrega otro ratio
if ratio == 1:
priors += 1
return priors
# Metodo que calcula los priorboxes de un feature map
# Args:
# features: entero mxm de un feature map
# num_fmap: number of feature map of m feature maps
# total_fmaps: the total number of feature maps of the network
# aspect_ratios: arreglo de proporciones
# img_size: tamaño de la imagen original
# Returns:
# Tensor with boxes loc of shape (features, features, priors, 4(cx, cy, w, h))
def PriorsBoxes(batch_size=None,
features=None,
num_fmap=None,
total_fmaps=None,
aspect_ratios=None,
img_size=None):
# metodo de calcula la escala de las cajas
def compute_scale(k, m):
s_min = 0.15
s_max = 0.9
s_k = s_min + (((s_max - s_min)/(m - 1))*(k - 1))
return s_k
# calcula el ancho y alto de una caja segun su escala y proporcion
def box_size(scale, aspect_ratio):
h = scale/math.sqrt(aspect_ratio)
w = scale*math.sqrt(aspect_ratio)
return h, w
s_k = compute_scale(num_fmap, total_fmaps)
priors = 0
heights = []
widths = []
# Calcula los tamaños de las bounding boxes
for ar in aspect_ratios:
priors += 1
bh, bw = box_size(s_k, ar)
heights.append(bh)
widths.append(bw)
# cuando el ratio es 1, se calcual otro segun el paper
if ar == 1:
priors += 1
s_k_p = compute_scale(num_fmap+1, total_fmaps)
bh, bw = box_size(s_k_p, ar)
heights.append(bh)
widths.append(bw)
default_boxes = np.zeros((features, features, priors, 4))
cell_size = 1 / features
cell_center = cell_size / 2
for i in range(features):
for j in range(features):
for p in range(priors):
h, w = heights[p], widths[p]
x = j*cell_size + cell_center
y = i*cell_size + cell_center
default_boxes[i, j, p, 0] = x
default_boxes[i, j, p, 1] = y
default_boxes[i, j, p, 2] = w
default_boxes[i, j, p, 3] = h
default_boxes *= img_size
default_boxes = tf.convert_to_tensor(default_boxes)
# Checa si se especifico un batch_size en los parametros
# si si, agrega una dimension y duplica las otras mediante tiling
if batch_size == None:
return default_boxes
else:
default_boxes = tf.reshape(default_boxes, [features*features, priors, 4])
default_boxes = tf.expand_dims(default_boxes, 0)
default_boxes = tf.tile(default_boxes, [batch_size, 1, 1, 1])
return default_boxes
#
# Convirte cordenadas (cx,cy,w,h) a (x,y,w,h)
# Args:
# loc: tensorf of shape [4]
# Returns:
# x, y, w, h: posicion y tamaño como enteros
def bbox_center_to_rect(loc):
w = loc[2]
h = loc[3]
x = loc[0] - (w/2)
y = loc[1] - (h/2)
return int(x), int(y), int(w), int(h)
#
# Converte cordenadas (x, y, w, h) a (cx, cy, w, h)
# Args:
# loc: tensor of shape [4]
# Returns:
# cx, cy, w, h: posicion y tamaño como enteros
def bbox_rect_to_center(loc):
w = loc[2]
h = loc[3]
cx = loc[0] + (w/2)
cy = loc[1] + (h/2)
return int(x), int(y), int(w), int(h)
#
# Converte cordenadas (x, y, w, h) a (cx, cy, w, h)
# Args:
# loc: tensor of shape [4]
# Returns:
# tensor of shape [4] (cx, cy, w, h)
def tbbox_rect_to_center(loc):
w = loc[2]
h = loc[3]
cx = loc[0] + (w/2)
cy = loc[1] + (h/2)
return tf.convert_to_tensor(np.array([cx, cy, w, h]))
def rect_to_coord(box):
_box = np.copy(box)
box[0] = _box[0]
box[1] = _box[1]
box[2] = box[0] + _box[2]
box[3] = box[1] + _box[3]
return box
#
# Calcula el jaccard overlap o intesection over union IOU
# entre dos bounding boxes
# Args:
# t_boxA: tensor of shape [4] de (x, y, w, h)
# t_boxB: tensor of shape [4] de (x, y, w, h)
# Returns:
# iou = float de 0.0 a 1.0
def intersection_over_union(t_boxA, t_boxB):
# Se convierte los tensores a numpy arrays
boxA = np.array(t_boxA)
boxB = np.array(t_boxB)
boxA = rect_to_coord(boxA)
boxB = rect_to_coord(boxB)
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
# Pipeline encargada de procesar un batch de training set, para
# realizar la estrategia de matching para crear la informacion de
# ground truth
class SSD_data_pipeline(object):
# Metodos del objeto
# init: iniicalizacion con los parametros de la arquitectura
# process: procesa un batch de imagenes
# Inicializacion de los parametros acerca de la arquitectura de la red
# Argumentos:
# aspect_ratios: arreglo conteniendo los aspect ratios segun el paper
# feature_maps: arreglo conteniendo pares con los tamaños de los f maps
# categories_arr: arreglo de strings con los nombre de las categorias
# img_size: entero que contiene el numero de pixeles de un lado de la img
def __init__(self,
aspect_ratios=[1, 2, 3, 1/2, 1/3],
feature_maps=None,
categories_arr=None,
img_size=None):
self.aspect_ratios = aspect_ratios
self.feature_maps = feature_maps
self.categories_arr = categories_arr
self.num_categories = len(self.categories_arr)
self.img_size = img_size
self.num_priors = compute_num_priors(aspect_ratios)
self.categories_index = {}
# Creacion de indices de las categorias
for i in range(len(self.categories_arr)):
self.categories_index[self.categories_arr[i]] = i
# Procesa un batch de imagenes del data set de coco para convertirlos a training data
# Argumentos:
# path_to_tfrecord: string al dataset de coco en formato tfrecord
def preprocess_tfrecord_coco(self, path_to_tfrecord, res_path):
total_fmaps = len(self.feature_maps)
dataset_tfrecord_coco = tfrecord_coco.parse_dataset(path_to_tfrecord)
def gen_match(img_cats, img_locs, debugging=False, debug_image=None):
y_true = None
num_bboxes = locs.get_shape().as_list()[0]
num_matches = 0
if debugging:
for loc in locs:
draw_bbox(img=debug_image, bbox=loc)
for f in range(total_fmaps):
m = self.feature_maps[f][0]
priors = PriorsBoxes(features=m, num_fmap=f+1, total_fmaps=total_fmaps,
aspect_ratios=self.aspect_ratios, img_size=self.img_size)
feature_y = np.zeros((m, m, self.num_priors, 1 + self.num_categories + 4))
for i in range(m):
for j in range(m):
for p in range(self.num_priors):
prior = priors[i][j][p]
prior = bbox_center_to_rect(prior)
for b in range(num_bboxes):
iou = intersection_over_union(prior, img_locs[b])
if iou > 0.5:
num_matches += 1
match = tf.ones([1, 1])
# Se obtiene la categoria y se convierte a one hot
cat = img_cats[b].numpy().decode("UTF-8")
cat_one_hot = [self.categories_index[cat]]
cat_one_hot = tf.one_hot(cat_one_hot, self.num_categories)
# se calcula la diferencia del prior al ground truth
prior = tbbox_rect_to_center(prior)
loc = tbbox_rect_to_center(img_locs[b])
diff = tf.cast(tf.abs(prior - loc),
tf.float32)
diff = tf.expand_dims(diff, 0)
match_y = tf.concat([match, cat_one_hot, diff], -1)
feature_y[i][j][p] = match_y
if debugging:
draw_bbox(img=debug_image, bbox=prior, color=(255, 0, 0))
feature_y = tf.convert_to_tensor(feature_y)
if f == 0:
y_true = tf.identity(tf.reshape(feature_y, [m*m, self.num_priors, 1 +
self.num_categories + 4]))
else:
feature_y = tf.reshape(feature_y, [m*m, self.num_priors, 1 +
self.num_categories + 4])
y_true = tf.concat([y_true, tf.identity(feature_y)], 0)
if num_matches > 0:
y_true = tf.cast(y_true, tf.float32)
if num_matches == 0:
return None
return y_true
it = iter(dataset_tfrecord_coco)
writer = tf.io.TFRecordWriter(res_path)
def write_img_to_file(x_data, y_data):
x_data = tf.cast(x_data, tf.float32)
data = {
"x": bytes_feature(tf.io.serialize_tensor(x_data)),
"y": bytes_feature(tf.io.serialize_tensor(y_data))
}
example = tf.train.Example(features=tf.train.Features(feature=data))
writer.write(example.SerializeToString())
i = 0
for img_data in dataset_tfrecord_coco.take(1):
print("Processing image {}".format(i+1))
i += 1
# Decodificacion de imagen
image_string = np.frombuffer(img_data["img/str"].numpy(), np.uint8)
decoded_image = cv2.imdecode(image_string, cv2.IMREAD_COLOR)
# tamaños original de la imagen
y_, x_ = decoded_image.shape[0], decoded_image.shape[1]
# rescale de bbounding box
x_scalar = self.img_size / x_
y_scalar = self.img_size / y_
# Decodificacion de anotaciones
cats, locs = self.decode_bboxes(img_data["img/bboxes/category"],
img_data["img/bboxes/x"], img_data["img/bboxes/y"],
img_data["img/bboxes/width"],
img_data["img/bboxes/height"], x_scalar, y_scalar)
# Crea mask de los indices correctos
mask = self.mask_indices(img_data["img/bboxes/category"])
# Aplica mask
cats = tf.boolean_mask(cats, mask)
locs = tf.boolean_mask(locs, mask)
# Crea un patch para data augmentation
aug_image, locs, cats = ssd_sample_patch(decoded_image, locs, cats)
locs_cp = locs.copy()
# resize de la imagen y la convierte a un tensor
resized_img = cv2.resize(aug_image, (self.img_size, self.img_size))
aug_image_cp = resized_img.copy()
image_tensor = tf.convert_to_tensor(resized_img)
image_tensor /= 255 # normaliza entre 0-1
locs = tf.convert_to_tensor(locs)
cats = tf.convert_to_tensor(cats)
y = gen_match(cats, locs, debugging=True, debug_image=resized_img)
cv2.imshow("matching strategy", resized_img)
cv2.waitKey(0)
if y != None:
write_img_to_file(image_tensor, y)
# obtiene la imagen y localizaciones expandidas
ex_img, ex_locs = ssd_expand_image(aug_image_cp, locs_cp)
ex_img = cv2.resize(ex_img, (self.img_size, self.img_size))
image_tensor = tf.convert_to_tensor(ex_img)
image_tensor /= 255 # normaliza entre 0-1
ex_locs = tf.convert_to_tensor(ex_locs)
ex_y = gen_match(cats, ex_locs, debugging=True, debug_image=ex_img)
print("Expanded image")
cv2.imshow("matching strategy expanded", ex_img)
cv2.waitKey(0)
if ex_y != None:
write_img_to_file(image_tensor, ex_y)
writer.close()
# proces y cambia el formato de las annotacions de las images de tfrecord
# Args:
# cats: sparse tensor de strings con las categorias
# x: sparse tensor con las coordenadas x del bbox
# y: sparse tensor con las coordenadas y del bbox
# width: sparse tensor con en ancho del bbox
# height: sparse tensor con la altura del bbox
# x_scalar: scalar horizontal que se le aplica al bbox por el resize de la img
# y_scalar: scalar vertical que se le aplica al bbox por el resize de la img
def decode_bboxes(self, cats, x, y, width, height, x_scalar, y_scalar):
cats_tensor = []
loc_tensor = []
for i in cats.indices:
cat = cats.values[i[0]].numpy().decode("UTF-8")
_x = x.values[i[0]].numpy() * x_scalar
_y = y.values[i[0]].numpy() * y_scalar
_w = width.values[i[0]].numpy() * x_scalar
_h = height.values[i[0]].numpy() * y_scalar
cats_tensor.append(cat)
loc_tensor.append([_x, _y, _w, _h])
return tf.convert_to_tensor(cats_tensor), tf.convert_to_tensor(loc_tensor)
# Funcion que regresa un mask booleano de los bbox que se van usar para el
# modelo, segun las categirias a clasificar
# Args:
# sparse_tensor: sparse tensor con las cadenas de las categorias
def mask_indices(self, sparse_tensor):
indices = sparse_tensor.indices
mask = []
for i in indices:
index = i.numpy()[0]
cat = sparse_tensor.values[index]
cat = cat.numpy().decode("UTF-8")
mask.append(cat in self.categories_arr)
return mask
# Metodo que carga dataset ya preprocesado de un tfrecord
# Args:
# path_to_tfrecord: string con el path al archivo tfrecord
# Returns
# tensor_data: tensorflow Dataset object.
def SSD_load_dataset(path_to_tfrecord):
raw_data = tf.data.TFRecordDataset(path_to_tfrecord)
format_ = {
"x": tf.io.FixedLenFeature([],
tf.string),
"y": tf.io.FixedLenFeature([], tf.string)
}
def _parse_function(example):
return tf.io.parse_single_example(example, format_)
data = raw_data.map(_parse_function)
def _parse_tensors(example):
x = tf.io.parse_tensor(example["x"], tf.float32)
y_true = tf.io.parse_tensor(example["y"], tf.float32)
return x, y_true
tensor_data = data.map(_parse_tensors)
return tensor_data
#
# Metodos para SSD data augmentation
# Metodo que calcula IOU entre 2 batchs de bboxes
# Args:
# boxA: tensor of shape [?, 4] (x, y, w, h)
# boxB: tensor of shape [?, 4] (x, y, w, h)
def iou_batch(_boxA, _boxB):
boxA = np.copy(_boxA)
boxB = np.copy(_boxB)
# Convierte a (x, y, w+x, h+y)
boxA[:, 2:] = boxA[:, :2] + _boxA[:, 2:]
boxB[:, 2:] = boxB[:, :2] + _boxB[:, 2:]
# Calcula la interseccion
xA = tf.math.maximum(boxA[:, 0], boxB[:, 0])
yA = tf.math.maximum(boxA[:, 1], boxB[:, 1])
xB = tf.math.minimum(boxA[:, 2], boxB[:, 2])
yB = tf.math.minimum(boxA[:, 3], boxB[:, 3])
interArea = tf.math.maximum(0, xB - xA + 1) * tf.math.maximum(0, yB - yA + 1)
boxAArea = (boxA[:,2] - boxA[:,0] + 1) * (boxA[:,3] - boxA[:,1] + 1)
boxBArea = (boxB[:,2] - boxB[:,0] + 1) * (boxB[:,3] - boxB[:,1] + 1)
iou = interArea / (boxAArea + boxBArea - interArea)
return iou
# Metodo que genera un patch a la imagen, segun paper de ssd
# Args:
# image: tensor of shape (height, width, 3)
# locs: tensor con localizacion de bbox de shape [?, 4]
# cats: tensor con las categorias de los bbox, de shape [?]
# Returns:
# igual que args pero con el patch aplicado a la imagen
def ssd_sample_patch(image, locs, cats):
image = np.array(image)
locs = np.array(locs)
cats = np.array(cats)
sample_options = (
# Original input image
None,
# patch con min iou .1, .3, .7, .9
(0.1, None),
(0.3, None),
(0.7, None),
(0.9, None),
# random patch
(None, None)
)
height, width, _ = image.shape
while(True):
# Escoger un modo de forma aleatoria
mode = random.choice(sample_options)
if mode is None:
return image, locs, cats
min_iou, max_iou = mode
if min_iou is None:
min_iou = float("-inf")
if max_iou is None:
max_iou = float("inf")
# Maximos intentos (50)
for _ in range(50):
current_image = image
w = random.uniform(0.3*width, width)
h = random.uniform(0.3*height, height)
# aspcect ratio esta entre .5 y 2
if h / w < 0.5 or h / w > 2:
continue
left = random.uniform(0, width - w)
top = random.uniform(0, height - h)
# convert to rect
rect = np.array([int(left), int(top), int(w), int(h)])
# calcular iou
overlap = iou_batch(locs, np.array([rect]))
overlap = np.array(overlap)
# si se satisface las restricciones del iou
if overlap.min() < min_iou and max_iou > overlap.max():
continue
# Obtiene crop de la imagen
current_image = current_image[rect[1]:rect[1]+rect[3],
rect[0]:rect[0]+rect[2], :]
centers = locs[:, :2] + (locs[:, 2:] / 2.0)
# mask locs
m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])
m2 = (rect[0]+rect[2] > centers[:, 0]) * (rect[1]+rect[3] > centers[:, 1])
mask = m1 * m2
# si se tiene boxes validas
if not mask.any():
continue
# aplica mask a bboxes y cats
current_locs = locs[mask, :].copy()
current_cats = cats[mask].copy()
# cambia dataformat para corregir coordenadas de bboxes
rect[2:] = rect[:2] + rect[2:]
current_locs[:, 2:] = current_locs[:,:2] + current_locs[:, 2:]
# should we use the box left and top corner or the crop's
current_locs[:, :2] = np.maximum(current_locs[:, :2], rect[:2])
# adjust to crop (by substracting crop's left,top)
current_locs[:, :2] -= rect[:2]
current_locs[:, 2:] = np.minimum(current_locs[:, 2:], rect[2:])
# adjust to crop (by substracting crop's left,top)
current_locs[:, 2:] -= rect[:2]
# regreisa al formato correcto (x,y,w,h)
current_locs[:, 2:] = current_locs[:, 2:] - current_locs[:, :2]
return current_image, current_locs, current_cats
# Metodo que expande una imagen, rellena lo demas con el mean
# esto segun la implementacion descrita en el paper SSD
# Args:
# image: tensor con la imagen de shape [width, height, 3]
# locs: tensor con los bboxes de la img [?, 4]
# Returns:
# same as input, but expanded
def ssd_expand_image(image, locs):
image = np.array(image)
locs = np.array(locs)
height, width, depth = image.shape
ratio = random.uniform(2, 4)
left = random.uniform(0, width*ratio - width)
top = random.uniform(0, height*ratio - height)
expand_image = np.zeros((int(height*ratio), int(width*ratio), depth),
dtype=image.dtype)
expand_image[:, :, :] = np.mean(image)
expand_image[int(top):int(top+height), int(left):int(left+width)] = image
image = expand_image
locs = locs.copy()
locs[:, :2] += (int(left), int(top))
return image, locs
```
#### File: models/tests/test_ssd_dataaug.py
```python
import tensorflow as tf
import numpy as np
import cv2
import json
import random
# Regresar un directoria para poder acceder modulo de otra carpeta
import sys
sys.path.append("..")
from ops.SSD import iou_batch, intersection_over_union, ssd_sample_patch
from ops.SSD import ssd_expand_image
sys.path.append("tests/")
from test_bboxes import draw_bbox
def main():
path_to_image = "test_images/000000000670.jpg"
path_to_ann = "test_images/000000000670.jpg.json"
with open(path_to_ann) as json_text:
ann = json.loads(json_text.read())
image = cv2.imread(path_to_image)
bboxes_numpy = np.ones((len(ann["bboxes"]), 4))
cats = []
for i in range(len(ann["bboxes"])):
bbox = ann["bboxes"][i]
x = bbox["center_x"]
y = bbox["center_y"]
w = bbox["width"]
h = bbox["height"]
bboxes_numpy[i, :] = [x, y, w, h]
cats.append(bbox["category_id"])
#bboxes_tensor[i, 0] = x
#draw_bbox(img=image, bbox=(x, y, w, h))
aug_image, aug_bboxes = ssd_expand_image(image, bboxes_numpy)
aug_image, aug_bboxes, aug_cats = ssd_sample_patch(aug_image, aug_bboxes, cats)
for box in aug_bboxes:
draw_bbox(img=aug_image, bbox=(box[0], box[1], box[2], box[3]))
cv2.imshow("aug_image", aug_image)
cv2.waitKey(0)
main()
```
#### File: tfmodels/src/test_f360.py
```python
import os
import glob
import json
import cv2
import tensorflow as tf
import numpy as np
def main():
#path_to_model = "trained_models/f360_vgg_01/"
path_to_model = "trained_models/f360_MobileNetV2_04/"
path_to_imgs = "datasets/test-report/"
with open(path_to_model+"setup.json", "r") as data:
setup = json.load(data)
#w, h, _ = 100, 100, 3 #setup["input_shape"]
w, h, _ = setup["input_shape"]
classes = setup["classes"]
print("[INFO] Loading model...")
model = tf.keras.models.load_model(path_to_model+"model.h5")
_ = input("[INFO] Click to continue")
#model = tf.keras.models.load_model(path_to_model+"checkpoints/model_checkpoint_5.h5")
img_folders = glob.glob(path_to_imgs + "*")
img_paths = []
for folder in img_folders:
for i in glob.glob(folder + "/*"):
img_paths.append(i)
img_paths = glob.glob(path_to_imgs+"*")
img_paths = sorted(img_paths)
print(img_paths)
i = 0
for img_path in img_paths:
image = cv2.imread(img_path)
original_image = np.copy(image)
image = tf.convert_to_tensor(image)
image /= 255
image = tf.image.resize(image, [w, h])
image = tf.expand_dims(image, 0)
prediction = model.predict(image)
prediction *= 100.0
prediction = tf.cast(prediction, tf.int8)
print(prediction)
index = tf.math.argmax(prediction, axis=1)
index = tf.keras.backend.get_value(index)[0]
cat = classes[index]
print(cat)
cv2.putText(original_image, cat , (10, 45), cv2.FONT_HERSHEY_SIMPLEX,
1.8, (0, 255, 0), 4)
cv2.imshow("Test images", original_image)
cv2.waitKey(0)
cv2.imwrite(path_to_imgs+"res_{}.png".format(i), original_image)
i += 1
main()
```
#### File: tfmodels/src/train_f360.py
```python
import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # ERROR
import shutil
from os import path
import json
import tensorflow as tf
import cv2
import numpy as np
# Importa datasets
from datasets.Fruits360.f360_dataset import f360_load_dataset
from datasets.mnist.mnist_dataset import load_mnist_dataset_resize, load_mnist_dataset
# Importa modelos
from models.mobilenetv2 import MobileNetV2
from models.test_models import mnist_model
from models.smallervggnet import SmallerVGGNet
from tensorflow.keras.models import Sequential
# Import funciones para entrenameinto
from training_utils.training import continue_training
from training_utils.training import train_model
def main():
train_f360()
#train_mnist()
def f360_train_setup():
setup = {
"info": """Entrenando Fruits 360 dataset con MobileNetV2 con weights de imagnet y RMSprop,
input shape de [96, 96, 3], se esta entrenando el modelo completo. El
dataset es un subdataset del dataset completo""",
"path": "trained_models/f360_MobileNetV2_d15/",
"dataset_path": "datasets/Fruits360/F360-3-18-O0/",
"num_classes": 0,
"classes": [],
"input_shape": (96, 96, 3),
"epochs": 400,
"batch_size": 6,
"loss": "categorical_crossentropy",
"metrics": ["accuracy"],
"learning_rate": 0.0001,
"seed": 123321,
"dataset_info": " "
}
return setup
def train_f360():
setup = f360_train_setup()
tf.random.set_seed(setup["seed"])
np.random.seed(setup["seed"])
w, h, _ = setup["input_shape"]
train, test, info = f360_load_dataset(path=setup["dataset_path"], resize=w,
num_classes=setup["num_classes"])
train = train.shuffle(int(info["train_size"] / info["num_classes"])).batch(setup["batch_size"])
test = test.batch(setup["batch_size"])
setup["dataset_info"] = info
setup["classes"] = info["categories"]
setup["num_classes"] = info["num_classes"]
#model = SmallerVGGNet.build(input_shape=(100, 100, 3), classes=3)
#model = tf.keras.applications.MobileNetV2(include_top=True,
# weights="imagenet",classes=3, input_shape=(100, 100, 3))
model = mnv2_transfer_model(num_classes=setup["num_classes"],
input_shape=setup["input_shape"])
#model = mnv2_finetune_model(num_classes=3, input_shape=(96, 96, 3))
train_model(setup, model, (train, test))
def mnv2_transfer_model(num_classes=None, input_shape=None):
# Obtiene el modelo base que proporciona keras
# este no incluye el top, porque es custom
base_model = tf.keras.applications.MobileNetV2(include_top=False,
weights="imagenet", input_shape=input_shape)
base_model.trainable = True
# Agrega un classficador al final del modelo
global_average_layer = tf.keras.layers.GlobalAveragePooling2D(name="gap")
prediction_layer = tf.keras.layers.Dense(num_classes, name="dense")
activation_layer = tf.keras.layers.Activation("softmax", name="activation")
# Crea un nuevo modelo con el base_model y clasficador
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer,
activation_layer])
return model
def mnv2_finetune_model(num_classes=None, input_shape=None):
# Obtiene el modelo de MobileNetV2 con transfer learning
base_model = mnv2_transfer_model(num_classes=num_classes, input_shape=input_shape)
# El numero de layers que se van a congelar
fine_tune_at = 50
# Congela las layers
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
return base_model
main()
```
#### File: src/training_utils/training.py
```python
import os
from os import path
import json
import shutil
import tensorflow as tf
import numpy as np
# Importa cosas de Keras API
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
# Importa callbacks del modelo
from training_utils.callbacks import TrainingCheckPoints
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Importa cosas para graficar el entrenameinto
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
# Function that continues the training of a model
# Args:
# path_to_model: path were to find the model and setup
# dataset: tuple of tensorflow dataset of (train, test)
def continue_training(path_to_model, dataset):
if not path.exists(path_to_model):
print("[ERROR] El path a la carpeta del modelo no existe")
return
# carga el setup del modelo
setup = None
with open(path_to_model+"setup.json", "r") as data:
setup = json.load(data)
# carga el estado de entrenamiento
state = None
with open(path_to_model+"checkpoints/"+"training_state.json", "r") as data:
state = json.load(data)
print("[INFO] Continuando entrenameinto de modelo.")
# carga el modelo
model_name = "model_checkpoint_{}.h5".format(state["epoch"]-1)
model = tf.keras.models.load_model(path_to_model+"checkpoints/"+model_name)
# vuelve a compilar el modelo
opt = Adam(lr=state["learning_rate"])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], initial_epoch=state["epoch"],
path=setup["path"], continue_train=True, classes=setup["classes"])
# Method that starts the model training
# Args:
# setup: Dictionary with the model setup
# model: the keras.Model architecture to train
# dataset: tuple of tensorflow dataset of (train, test)
def train_model(setup, model, dataset):
# Asegura que el path sea el correcto
if not path.exists(setup["path"]):
os.makedirs(setup["path"])
else:
# Borra las carpetas si ya existen
if path.exists(setup["path"]+"checkpoints"):
shutil.rmtree(setup["path"]+"checkpoints")
if path.exists(setup["path"]+"logs"):
shutil.rmtree(setup["path"]+"logs")
# crea carpeta donde se van a guardar los checkpoints
if not path.exists(setup["path"]+"checkpoints"):
os.mkdir(setup["path"] + "checkpoints")
# Escribe el setup del entrenamiento
with open(setup["path"]+"setup.json", "w") as writer:
json.dump(setup, writer, indent=4)
print("[INFO] Entrenando modelo.")
# Dibuja la arquitectura del modelo
plot_model(model, to_file=setup["path"]+"model_architecture.png",
show_shapes=True, show_layer_names=True, expand_nested=False)
# Crea optimizador, por defecto Adam
opt = Adam(lr=setup["learning_rate"])
#opt = RMSprop(lr=setup["learning_rate"])
# Compila el modelo
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], path=setup["path"], classes=setup["classes"])
# Metodo, que entrena un modelo ya compilado, implementa callbacks de
# tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre
# mejoras en el loss, tambien grafica y crea matriz de confusion
# Args:
# compiled_model: keras.Model ya compilado
# dataset: tuple of tensorflow dataset of (train, test)
# opt: keras.Optimizer used in training
# epochs: The number of epochs to train
# initial_epoch: Epoch to start training, 0 for normal training
# continue_train: if the model is continuing training
# classes: array of classes that the model predict
def fit_model(compiled_model=None, # El modelo debe de estar complicado
dataset=None,
opt=None,
epochs=None,
initial_epoch=0,
path=None,
continue_train=False,
classes=None):
# obtiene el dataset
train, test = dataset
# Callbacks durante entrenamiento
relative = 0
if initial_epoch >= 1:
relative = initial_epoch
callbacks = [
#TrainingCheckPoints(path+"checkpoints/", relative_epoch=relative),
CSVLogger(path+"training_log.csv", append=continue_train),
TensorBoard(log_dir=path+"logs")
]
# Entrena el modelo
history = compiled_model.fit(train, initial_epoch=initial_epoch, epochs=epochs,
callbacks=callbacks, validation_data=test)
# Guarda el modelo
print("[INFO] Serializing model.")
compiled_model.save(path + "model.h5")
# Crea grafica del entrenamiento
graph_model_metrics(csv_path=path+"training_log.csv",
img_path=path+"metrics_graph.png")
# Crea confusion matrix
if test != None:
print("[INFO] Creando matriz de confusion")
graph_confusion_matrix(model=compiled_model, test_dataset=test,
classes=classes, path=path+"confusion_matrix.png")
def load_model(path):
model = tf.keras.models.load_model(path + "model.h5")
with open(path + "setup.json", "r") as data:
setup = json.load(data)
return model, setup["classes"]
```
#### File: tfmodels/src/train.py
```python
import os
import shutil
from os import path
import json
import tensorflow as tf
import numpy as np
import cv2
# Importa datasets
from datasets.AOBDataset.aob_dataset import load_dataset
from datasets.data_aug import *
# Import model stuff
from models.mobilenetv2 import MobileNetV2
from models.custom_layers import ColorExtractor
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Import training utils
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
def main():
setup = train_setup()
train, test = dataset_pipeline(setup)
save_setup(setup)
train_model(setup=setup, dataset=(train, test))
def train_setup():
setup = {
"info": """
Training MobileNetV2 with ImageNet weights and RMSprop optimizer.
first training the last fully connected layer, then using
fine tunning from the 100th layer.
""",
#"path": "trained_modelsMCPR/KMeans/MNV2_bag/",
"path": "trained_modelsMCPR/KMeans/MNV2_bag5/",
"include_bag": False,
"color_data": True,
"color_type": "KMeans", # KMeans, RGB, HIST
"dataset_path": "datasets/AOBDataset/",
"num_classes": 3,
"classes": [],
"input_shape": (224, 224, 3),
"epochs": 20,
"ft_epochs": 20,
"batch_size": 50,
"loss": "categorical_crossentropy",
"metrics": ["accuracy"],
"learning_rates": [
0.0001,
0.0001/10],
"fine_tune_at": 100,
"seed": 123321,
"dataset_info": " "
}
setup["dataset_path"] += "aob"
if setup["include_bag"] == True:
setup["dataset_path"] += "_bag"
else:
setup["dataset_path"] += "_nobag"
if setup["color_data"] == True:
if setup["color_type"] == "RGB":
setup["dataset_path"] += "_RGB/"
elif setup["color_type"] == "HIST":
setup["dataset_path"] += "_HIST/"
elif setup["color_type"] == "KMeans":
setup["dataset_path"] += "_IMG/"
else:
setup["dataset_path"] += "_IMG/"
return setup
# Creates the enviroment for training
# Args:
# setup: dictionary with the training setup of the model
def save_setup(setup):
# Creates the training directory
if not path.exists(setup["path"]):
os.makedirs(setup["path"])
else:
# Erase the logs dir
if path.exists(setup["path"]+"logs"):
shutil.rmtree(setup["path"]+"logs")
# Saves the setup in JSON file
with open(setup["path"]+"setup.json", "w") as writer:
json.dump(setup, writer, indent=4)
# Function for the dataset pipeline
# Args:
# setup: dictionary with the training setup of the model
def dataset_pipeline(setup):
# loads the dataset from AOB
train, test, info = load_dataset(path=setup["dataset_path"],
color_data=setup["color_data"], color_type=setup["color_type"])
# adds ifnormation of the dataset to the training setup
setup["dataset_info"] = info
setup["classes"] = info["categories"]
setup["num_classes"] = info["num_classes"]
# Checks if there is color data to extract it
if setup["color_data"] == True and not setup["color_type"] == "KMeans":
def _join_inputs(x, c, y):
return (x, c), y
train = train.map(_join_inputs)
test = test.map(_join_inputs)
#train = train.map(color_aug)
train = train.shuffle(int(info["train_size"] / info["num_classes"])).batch(setup["batch_size"])
test = test.batch(setup["batch_size"])
return train, test
# Function that creates the multi input model
def k_model(setup):
input_img = tf.keras.Input(shape=setup["input_shape"])
base_model = tf.keras.applications.MobileNetV2(include_top=False,
alpha=1.0, weights="imagenet", input_shape=setup["input_shape"])
base_model.trainable = False
# Adds classifer head at the end of the model
global_average_layer = tf.keras.layers.GlobalAveragePooling2D(name="gap")
conv_dense = tf.keras.layers.Dense(64, activation="relu", name="conv_dense")
x = base_model(input_img)
x = global_average_layer(x)
x = conv_dense(x)
# Color data layers
resize_images = Lambda(lambda b: tf.image.resize(b, [100, 100]),
name="resize")
color_extractor = ColorExtractor(3, 20, trainable=False)
num_dense1 = tf.keras.layers.Dense(32, activation="relu", name="color_dense1")
num_dense2 = tf.keras.layers.Dense(64, activation="relu", name="color_dense2")
y = resize_images(input_img)
y = color_extractor(y)
y = num_dense1(y)
y = num_dense2(y)
combined = tf.keras.layers.Concatenate()([x, y])
prediction_layer = tf.keras.layers.Dense(setup["num_classes"],
activation="relu", name="dense")
activation_layer = tf.keras.layers.Activation("softmax", name="activation")
z = prediction_layer(combined)
z = activation_layer(z)
# Creates the new model
model = tf.keras.Model(inputs=[input_img], outputs=z)
# Creates layers dictionaire
layers_dict = {
"base_cnn": base_model,
"global_average": global_average_layer,
"conv_dense": conv_dense,
"color_extractor": color_extractor,
"num_dense1": num_dense1,
"concat": combined,
"prediction": prediction_layer,
"activation": activation_layer }
return model, layers_dict
# Function that creates the multi input model
def multi_input_model(setup):
input_img = tf.keras.Input(shape=setup["input_shape"])
if setup["color_type"] == "RGB":
input_col = tf.keras.Input(shape=(3,))
elif setup["color_type"] == "HIST":
input_col = tf.keras.Input(shape=(765,))
base_model = tf.keras.applications.MobileNetV2(include_top=False,
alpha=1.0, weights="imagenet", input_shape=setup["input_shape"])
base_model.trainable = False
# Adds classifer head at the end of the model
global_average_layer = tf.keras.layers.GlobalAveragePooling2D(name="gap")
#conv_dense = tf.keras.layers.Dense(3, activation="relu", name="conv_dense")
conv_dense = tf.keras.layers.Dense(64, activation="relu", name="conv_dense")
x = base_model(input_img)
x = global_average_layer(x)
x = conv_dense(x)
# Numerical data layers
#num_dense1 = tf.keras.layers.Dense(500, activation="relu", name="color_dense1")
num_dense1 = tf.keras.layers.Dense(256, activation="relu", name="color_dense1")
num_dense2 = tf.keras.layers.Dense(64, activation="relu", name="color_dense2")
#num_dense3 = tf.keras.layers.Dense(64, activation="relu", name="color_dense3")
y = num_dense1(input_col)
y = num_dense2(y)
#y = num_dense3(y)
combined = tf.keras.layers.Concatenate()([x, y])
prediction_layer = tf.keras.layers.Dense(setup["num_classes"], name="dense")
activation_layer = tf.keras.layers.Activation("softmax", name="activation")
z = prediction_layer(combined)
z = activation_layer(z)
# Creates the new model
model = tf.keras.Model(inputs=[input_img, input_col], outputs=z)
# Creates layers dictionaire
layers_dict = {
"base_cnn": base_model,
"global_average": global_average_layer,
"conv_dense": conv_dense,
"num_dense1": num_dense1,
#"num_dense2": num_dense2,
"concat": combined,
"prediction": prediction_layer,
"activation": activation_layer }
return model, layers_dict
# Function that creates the standard model
def std_model(setup):
base_model = tf.keras.applications.MobileNetV2(include_top=False,
alpha=1.0, weights="imagenet", input_shape=setup["input_shape"])
base_model.trainable = False
# Adds classifer head at the end of the model
global_average_layer = tf.keras.layers.GlobalAveragePooling2D(name="gap")
prediction_layer = tf.keras.layers.Dense(setup["num_classes"], name="dense")
activation_layer = tf.keras.layers.Activation("softmax", name="activation")
# Creates the new model
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer,
activation_layer])
# Creates layer dictionaire
layers_dict = {
"base_cnn": base_model,
"global_average": global_average_layer,
"prediction": prediction_layer,
"activation": activation_layer }
return model, layers_dict
# Function that trains the model
def train_model(setup=None, dataset=None):
tf.random.set_seed(setup["seed"])
np.random.seed(setup["seed"])
train, test = dataset
if setup["color_data"] == True:
if setup["color_type"] == "KMeans":
model, l_dict = k_model(setup)
else:
model, l_dict = multi_input_model(setup)
else:
model, l_dict = std_model(setup)
base_model = l_dict["base_cnn"]
# Plots the model
plot_model(model, to_file=setup["path"]+"model_architecture.png",
show_shapes=True, show_layer_names=True, expand_nested=False)
# Compiles the model
opt = RMSprop(lr=setup["learning_rates"][0])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
# Model callbacks
callbacks = [
CSVLogger(setup["path"]+"training_log.csv", append=False),
TensorBoard(log_dir=setup["path"]+"logs")
]
# Trains the model
print("[INFO] Training model")
_ = model.fit(train, epochs=setup["epochs"], callbacks=callbacks,
validation_data=test)
# Fine tunning the mode
base_model.trainable = True
# Num of layers in the base model: 155
fine_tune_at = setup["fine_tune_at"]
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
opt = RMSprop(lr=setup["learning_rates"][1])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
# Model callbacks
callbacks = [
CSVLogger(setup["path"]+"training_log.csv", append=True),
TensorBoard(log_dir=setup["path"]+"logs")
]
# Trains the model
print("[INFO] Fine tune phase")
total_epochs = setup["epochs"] + setup["ft_epochs"]
_ = model.fit(train, initial_epoch=setup["epochs"], epochs=total_epochs,
callbacks=callbacks, validation_data=test)
# Saves model
"""
print("[INFO] Serializing model")
model.save(setup["path"] + "model.h5")
"""
# Graph model metrics
print("[INFO] Graphing metrics")
graph_model_metrics(csv_path=setup["path"] + "training_log.csv",
img_path=setup["path"]+"metrics_graph.png")
main()
``` |
{
"source": "joseluisvelasco/STELLOPT",
"score": 2
} |
#### File: STELLOPT/pySTEL/VMECplot.py
```python
import sys, os
os.environ['ETS_TOOLKIT'] = 'qt4'
import matplotlib
matplotlib.use("Qt4Agg")
import matplotlib.pyplot as _plt
import numpy as np #For Arrays
from math import pi
#QT4
from PyQt4 import uic, QtGui
from PyQt4.QtGui import QMainWindow, QApplication, qApp, QVBoxLayout, QSizePolicy,QIcon
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#QT5
#from PyQt5 import uic, QtGui, QtWidgets
#from PyQt5.QtWidgets import QMainWindow, QApplication, QVBoxLayout, QSizePolicy
#from PyQt5.QtGui import QIcon
#from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from libstell.libstell import read_vmec, cfunct, sfunct, torocont, isotoro, calc_jll
from matplotlib.figure import Figure
from mpl_toolkits import mplot3d
try:
qtCreatorPath=os.environ["STELLOPT_PATH"]
except KeyError:
print("Please set environment variable STELLOPT_PATH")
sys.exit(1)
#qtCreatorFile = "/u/slazerso/src/STELLOPT_GCC/pySTEL/VMECplot.ui" # Enter file here.
qtCreatorFile = qtCreatorPath+"/pySTEL/VMECplot.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class MyApp(QMainWindow):
def __init__(self):
super(MyApp, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setStyleSheet("background-color: white;");
#self.ui.PlotButtons.setStyleSheet("background-color: white;");
self.statusBar().showMessage('Ready')
self.ui.plot_list = ['Summary','-----1D-----','Iota','q','Pressure',\
'<Buco>','<Bvco>','<jcuru>','<jcurv>','<j.B>', '-----3D------','|B|','sqrt(g)',\
'B^u','B^v','B_s','B_u','B_v','j^u','j^v', 'jll', 'j.B','---Special---','LPK']
files = sorted(os.listdir('.'))
for name in files:
if(name[0:4]=='wout'):
self.ui.FileName.addItem(name)
# Init
self.vmec_data=read_vmec(self.ui.FileName.currentText())
self.ui.PlotList.addItems(self.ui.plot_list)
self.ui.PlotButtons.setEnabled(0)
self.ns = self.vmec_data['ns']
self.nu = self.vmec_data['mpol']*4
self.nv = self.vmec_data['ntor']*4*self.vmec_data['nfp']
self.nv2 = self.vmec_data['ntor']*4
if self.nu < 128:
self.nu = 128
if self.nv < 64:
self.nv = 64
self.TransformVMEC(self)
self.s=0
self.u=0
self.v=0
self.ui.rhoslider.setMaximum(self.ns-1)
self.ui.uslider.setMaximum(self.nu-1)
self.ui.vslider.setMaximum((self.nv/self.vmec_data['nfp']))
# Plot figure
self.fig = Figure(figsize=(2,2),dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.ui.plot_widget.addWidget(self.canvas)
#self.canvas.draw()
# Callbacks
self.ui.FileName.currentIndexChanged.connect(self.FileSelect)
self.ui.PlotList.currentIndexChanged.connect(self.PlotSelect)
self.ui.rho_button.toggled.connect(self.CutSelect)
self.ui.pol_button.toggled.connect(self.CutSelect)
self.ui.tor_button.toggled.connect(self.CutSelect)
self.ui.flux_button.toggled.connect(self.CutSelect)
self.ui.poltor_button.toggled.connect(self.CutSelect)
self.ui.RZ_button.toggled.connect(self.CutSelect)
self.ui.ThreeD_button.toggled.connect(self.CutSelect)
self.ui.rhoslider.valueChanged.connect(self.CutSelect)
self.ui.uslider.valueChanged.connect(self.CutSelect)
self.ui.vslider.valueChanged.connect(self.CutSelect)
self.ui.savebutton.clicked.connect(self.plot_to_file)
def FileSelect(self,i):
self.vmec_data=read_vmec(self.ui.FileName.currentText())
#self.ui.PlotList.addItems(self.ui.plot_list)
self.ns = self.vmec_data['ns']
self.nu = self.vmec_data['mpol']*4
self.nv = self.vmec_data['ntor']*4*self.vmec_data['nfp']
self.nv2 = self.vmec_data['ntor']*4
if self.nu < 32:
self.nu = 32
if self.nv < 16:
self.nv = 16
self.TransformVMEC(self)
self.s=0
self.u=0
self.v=0
self.ui.rhoslider.setMaximum(self.ns-1)
self.ui.uslider.setMaximum(self.nu-1)
self.ui.vslider.setMaximum(self.nv-1)
self.ui.PlotButtons.setEnabled(0)
self.ui.PlotList.setCurrentIndex(0)
self.update_plot(self)
def PlotSelect(self,i):
plot_name = self.ui.PlotList.currentText()
# Handle Enable/Disable
self.ui.PlotButtons.setEnabled(1)
self.ui.rho_button.setEnabled(1)
self.ui.pol_button.setEnabled(1)
self.ui.tor_button.setEnabled(1)
self.ui.flux_button.setEnabled(1)
self.ui.poltor_button.setEnabled(1)
self.ui.RZ_button.setEnabled(1)
self.ui.ThreeD_button.setEnabled(1)
self.ui.rhoslider.setEnabled(1)
self.ui.uslider.setEnabled(1)
self.ui.vslider.setEnabled(1)
if (i==0):
self.ui.PlotButtons.setEnabled(0)
self.ui.rhoslider.setEnabled(0)
self.ui.uslider.setEnabled(0)
self.ui.vslider.setEnabled(0)
elif (i<10):
self.ui.rho_button.setChecked(1)
self.ui.pol_button.setEnabled(0)
self.ui.tor_button.setEnabled(0)
self.ui.flux_button.setEnabled(0)
self.ui.poltor_button.setEnabled(0)
self.ui.RZ_button.setEnabled(0)
self.ui.ThreeD_button.setEnabled(0)
self.ui.rhoslider.setEnabled(0)
self.ui.uslider.setEnabled(0)
self.ui.vslider.setEnabled(0)
self.s=0; self.u=0; self.v=0;
self.update_plot(self)
else:
self.ui.rho_button.setChecked(1)
self.CutSelect(self)
def CutSelect(self,i):
self.ui.rhoslider.setEnabled(1)
self.ui.uslider.setEnabled(1)
self.ui.vslider.setEnabled(1)
#print(self.ui.rho_button.isChecked())
if (self.ui.rho_button.isChecked()):
self.ui.rhoslider.setEnabled(0)
self.u = self.ui.uslider.value()
self.v = self.ui.vslider.value()
elif (self.ui.pol_button.isChecked()):
self.ui.uslider.setEnabled(0)
self.s = self.ui.rhoslider.value()
self.v = self.ui.vslider.value()
elif (self.ui.tor_button.isChecked()):
self.ui.vslider.setEnabled(0)
self.s = self.ui.rhoslider.value()
self.u = self.ui.uslider.value()
elif (self.ui.flux_button.isChecked()):
self.ui.uslider.setEnabled(0)
self.ui.vslider.setEnabled(0)
self.s = self.ui.rhoslider.value()
elif (self.ui.poltor_button.isChecked()):
self.ui.rhoslider.setEnabled(0)
self.ui.vslider.setEnabled(0)
self.u = self.ui.uslider.value()
elif (self.ui.RZ_button.isChecked()):
self.ui.rhoslider.setEnabled(0)
self.ui.uslider.setEnabled(0)
self.v = self.ui.vslider.value()
elif (self.ui.ThreeD_button.isChecked()):
self.ui.uslider.setEnabled(0)
self.ui.vslider.setEnabled(0)
self.s = self.ui.rhoslider.value()
self.update_plot(self)
def update_plot(self,i):
#self.ui.plot_widget.addWidget(self.canvas)
plot_name = self.ui.PlotList.currentText();
self.fig.clf()
#self.fig.delaxes(self.ax)
self.ax = self.fig.add_subplot(111)
if (plot_name == 'Summary'):
print(plot_name)
elif (plot_name == 'Iota'):
self.ax.plot(self.nflux,self.vmec_data['iotaf'])
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('iota')
self.ax.set_title('Rotational Transform')
#self.ax.set(xlabel='s',ylabel='iota',aspect='square')
elif (plot_name == 'q'):
self.ax.plot(self.nflux,1.0/self.vmec_data['iotaf'])
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('q')
self.ax.set_title('Safety Factor')
elif (plot_name == 'Pressure'):
self.ax.plot(self.nflux,self.vmec_data['presf']/1000)
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('Pressure [kPa]')
self.ax.set_title('Pressure Profile')
elif (plot_name == '<Buco>'):
self.ax.plot(self.nflux,self.vmec_data['buco'])
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('<B^u> [T]')
self.ax.set_title('Flux surface Averaged B^u')
elif (plot_name == '<Bvco>'):
self.ax.plot(self.nflux,self.vmec_data['bvco'])
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('<B^v> [T]')
self.ax.set_title('Flux surface Averaged B^v')
elif (plot_name == '<jcuru>'):
self.ax.plot(self.nflux,self.vmec_data['jcuru']/1000)
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('<j^u> [kA/m^2]')
self.ax.set_title('Flux surface Averaged j^u')
elif (plot_name == '<jcurv>'):
self.ax.plot(self.nflux,self.vmec_data['jcurv']/1000)
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('<j^v> [kA/m^2]')
self.ax.set_title('Flux surface Averaged j^v')
elif (plot_name == '<j.B>'):
self.ax.plot(self.nflux,self.vmec_data['jdotb']/1000)
self.ax.set_xlabel('Normalized Flux')
self.ax.set_ylabel('<j.B> [T*kA/m^2]')
self.ax.set_title('Flux surface Averaged j.B')
elif (plot_name == 'LPK'):
self.ax.plot(self.r[self.ns-1,:,0],self.z[self.ns-1,:,0],color='red')
self.ax.plot(self.r[0,0,0],self.z[0,0,0],'+',color='red')
self.ax.plot(self.r[self.ns-1,:,int(self.nv2/4)],self.z[self.ns-1,:,int(self.nv2/4)],color='green')
self.ax.plot(self.r[0,0,int(self.nv2/4)],self.z[0,0,int(self.nv2/4)],'+',color='green')
self.ax.plot(self.r[self.ns-1,:,int(self.nv2/2)],self.z[self.ns-1,:,int(self.nv2/2)],color='blue')
self.ax.plot(self.r[0,0,int(self.nv2/2)],self.z[0,0,int(self.nv2/2)],'+',color='blue')
self.ax.set_xlabel('R [m]')
self.ax.set_ylabel('Z [m]')
self.ax.set_title('LPK Plot')
self.ax.set_aspect('equal')
elif (plot_name[0] == '-'):
print(plot_name)
else:
# First load the value based on plot
if (plot_name=='|B|'):
val = self.b
elif (plot_name=='sqrt(g)'):
val = self.g
elif (plot_name=='B^u'):
val = self.bu
elif (plot_name=='B^v'):
val = self.bv
elif (plot_name=='B_s'):
val = self.b_s
elif (plot_name=='B_u'):
val = self.b_u
elif (plot_name=='B_v'):
val = self.b_v
elif (plot_name=='j^u'):
val = self.cu/self.g
elif (plot_name=='j^v'):
val = self.cv/self.g
elif (plot_name=='jll'):
val = calc_jll(self.vmec_data, self.theta, self.zeta)
elif (plot_name=='j.B'):
val = (self.cu*self.bu+self.cv*self.bv)/self.g
# Now handle the type of plot
if (self.ui.rho_button.isChecked()):
self.ax.plot(self.nflux,val[:,self.u,self.v])
self.ax.set_xlabel('Normalized Flux')
elif (self.ui.pol_button.isChecked()):
self.ax.plot(self.theta,val[self.s,:,self.v])
self.ax.set_xlabel('Poloidal Angle [rad]')
elif (self.ui.tor_button.isChecked()):
self.ax.plot(self.zeta2,val[self.s,self.u,0:self.nv2+1])
self.ax.set_xlabel('Toroidal Angle [rad]')
elif (self.ui.flux_button.isChecked()):
self.ax.pcolormesh(np.squeeze(val[self.s,:,0:self.nv2+1]),cmap='jet',shading='gouraud')
self.ax.set_xlabel('Toroidal Angle [rad]')
self.ax.set_ylabel('Poloidal Angle [rad]')
elif (self.ui.poltor_button.isChecked()):
self.ax.pcolormesh(np.squeeze(val[:,self.u,0:self.nv2+1]),cmap='jet',shading='gouraud')
self.ax.set_xlabel('Toroidal Angle [rad]')
self.ax.set_ylabel('Normalized Flux')
elif (self.ui.RZ_button.isChecked()):
#self.ax.pcolormesh(self.r[:,:,self.v],self.z[:,:,self.v],val[:,:,self.v],cmap='jet',shading='gouraud')
cax = self.ax.pcolor(self.r[:,:,self.v],self.z[:,:,self.v],val[:,:,self.v],cmap='jet')
self.fig.colorbar(cax)
self.ax.set_xlabel('R [m]')
self.ax.set_ylabel('Z [m]')
self.ax.set_aspect('equal')
elif (self.ui.ThreeD_button.isChecked()):
self.fig.delaxes(self.ax)
self.ax = isotoro(self.r,self.z,self.zeta,self.s,val,fig=self.fig)
self.ax.grid(False)
self.ax.set_axis_off()
self.canvas.draw()
def TransformVMEC(self, i):
self.nflux = np.ndarray((self.ns,1))
self.theta = np.ndarray((self.nu,1))
self.zeta = np.ndarray((self.nv,1))
self.zeta2 = np.ndarray((self.nv2+1,1))
for j in range(self.ns): self.nflux[j]=j/(self.ns-1)
for j in range(self.nu): self.theta[j]=2*pi*j/(self.nu-1)
for j in range(self.nv): self.zeta[j]=2*pi*j/((self.nv-1))
self.zeta2=self.zeta[0:self.nv2+1]
self.r=cfunct(self.theta,self.zeta,self.vmec_data['rmnc'],self.vmec_data['xm'],self.vmec_data['xn'])
self.z=sfunct(self.theta,self.zeta,self.vmec_data['zmns'],self.vmec_data['xm'],self.vmec_data['xn'])
self.b=cfunct(self.theta,self.zeta,self.vmec_data['bmnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.g=cfunct(self.theta,self.zeta,self.vmec_data['gmnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.bu=cfunct(self.theta,self.zeta,self.vmec_data['bsupumnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.bv=cfunct(self.theta,self.zeta,self.vmec_data['bsupvmnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.cu=cfunct(self.theta,self.zeta,self.vmec_data['currumnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.cv=cfunct(self.theta,self.zeta,self.vmec_data['currvmnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.b_s=sfunct(self.theta,self.zeta,self.vmec_data['bsubsmns'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.b_u=cfunct(self.theta,self.zeta,self.vmec_data['bsubumnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
self.b_v=cfunct(self.theta,self.zeta,self.vmec_data['bsubvmnc'],self.vmec_data['xm_nyq'],self.vmec_data['xn_nyq'])
def plot_to_file(self,i):
text = self.ui.saveas_filename.toPlainText();
self.fig.savefig('./'+text, dpi=300)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MyApp()
window.show()
sys.exit(app.exec_())
``` |
{
"source": "joseluizbrits/sobre-python",
"score": 3
} |
#### File: cfb-cursos/agenda/__init__.py
```python
import sqlite3
from sqlite3 import Error
from os import system
def ConexaoBanco():
caminho = "C:\\Users\\VITOR\\Documents\\BRITS\\Programação\\agenda.db"
con = None
try:
con = sqlite3.connect(caminho)
except Error as ex:
print(ex)
return con
def query(conexao, sql):
try:
c = conexao.cursor()
c.execute(sql)
conexao.commit()
except Error as ex:
print(ex)
else:
print('\033[32m''Operação realizada com sucesso''\033[m')
def consultar(conexao, sql):
c = conexao.cursor()
c.execute(sql)
res = c.fetchall()
return res
def menuPrincipal():
print('1 - Inserir novos registros')
print('2 - Deletar Registro')
print('3 - Atualizar Registro')
print('4 - Consultar ID')
print('5 - Consultar nomes')
print('6 - Sair')
def menuInserir():
vnome = str(input('Digite o nome: '))
vtelefone = str(input('Digite o telefone: '))
vemail = str(input('Digite o e-mail: '))
vsql = f"SELECT INTO tb_contatos (T_NOMECONTATO, T_TELEFONECONTATO, T_EMAILCONTATO) VALUES ('{vnome}', '{vtelefone}', '{vemail}')"
vcon = ConexaoBanco()
query(vcon, vsql)
vcon.close()
def menuDeletar():
vid = str(input('Digite o ID do registro a ser deletado: '))
vsql = f"DELETE FROM tb_contatos WHERE N_IDCONTATO = {vid}"
vcon = ConexaoBanco()
query(vcon, vsql)
vcon.close()
def menuAtualizar():
vid = str(input('Digite o ID do registro a ser alterado: '))
vcon = ConexaoBanco()
r = consultar(vcon, f"SELECT * FROM tb_contatos WHERE N_IDCONTATO = {vid}")
rnome = r[0][1]
rtelefone = r[0][2]
remail = r[0][3]
vnome = str(input('Digite o nome: '))
vtelefone = str(input('Digite o telefone: '))
vemail = str(input('Digite o e-mail: '))
if len(rnome) == 0:
vnome = rnome
if len(rtelefone) == 0:
vtelefone = rtelefone
if len(remail) == 0:
vemail = remail
vsql = f"UPDATE tb_contatos SET T_NOMECONTATO = '{vnome}', T_TELEFONECONTATO = '{vtelefone}', T_EMAILCONTATO = '{vemail}' WHERE N_IDCONTATO = {vid}"
query(vcon, vsql)
vcon.close()
def menuConsultarID():
vsql = "SELECT * FROM tb_contatos"
vcon = ConexaoBanco()
res = consultar(vcon, vsql)
vlim = 10
vcont = 0
for r in res:
print(f'ID:{r[0]:_<3} Nome:{r[1]:_<30} Telefone:{r[2]:_<14} E-mail:{r[3]:_<30}')
vcont += 1
if vcont >= vlim:
vcont = 0
system('pause')
print('Fim da lista')
vcon.close()
system('pause')
def menuConsultarNomes():
vnome = str(input('Digite o nome: '))
vsql = f"SELECT * FROM tb_contatos WHERE T_NOMECONTATO LIKE '%{vnome}%'"
vcon = ConexaoBanco()
res = consultar(vcon, vsql)
vlim = 10
vcont = 0
for r in res:
print(f'ID:{r[0]:_<3} Nome:{r[1]:_<30} Telefone:{r[2]:_<14} E-mail:{r[3]:_<30}')
vcont += 1
if vcont >= vlim:
vcont = 0
system('pause')
print('Fim da lista')
vcon.close()
system('pause')
opc = 0
while opc != 6:
menuPrincipal()
opc = int(input('Digite um opção: '))
if opc == 1:
menuInserir()
elif opc == 2:
menuDeletar()
elif opc == 3:
menuAtualizar()
elif opc == 4:
menuConsultarID()
elif opc == 5:
menuConsultarNomes()
elif opc == 6:
print('\033[1m''PROGRAMA FINALIZADO''\033[m')
else:
print('\033[31m''Opção inválida''\033[m')
system('pause')
```
#### File: sobre-python/cfb-cursos/combobox.py
```python
from tkinter import *
from tkinter import ttk
def imprimirEsporte():
ve = cb_esportes.get()
print(f'Esporte: {ve}')
app = Tk()
app.title('BLOCO')
app.geometry('500x300')
listaEsportes = ['Futebol', 'Vôlei', 'Basquete']
lb_esportes = Label(app, text='Esportes')
lb_esportes.pack()
cb_esportes = ttk.Combobox(app, values=listaEsportes)
cb_esportes.set('Futebol') # set('Futebol') para definir o Futebol como padrão
cb_esportes.pack()
btn_esportes = Button(app, text='Esporte selecionado', command=imprimirEsporte)
btn_esportes.pack()
app.mainloop()
```
#### File: sobre-python/cfb-cursos/radiobutton.py
```python
from tkinter import *
def imprimirEsporte():
ve = vesporte.get()
if ve == 'f':
print('Esporte Futebol')
elif ve == 'v':
print('Esporte Vôlei')
elif ve == 'b':
print('Esporte Basquete')
else:
print('Selecione um esporte')
app = Tk()
app.title('BLOCO')
app.geometry('500x300')
vesporte = StringVar()
lb_esportes = Label(app, text='Esportes')
lb_esportes.pack()
rb_futebol = Radiobutton(app, text='Futebol', value='f', variable=vesporte)
rb_futebol.pack()
rb_volei = Radiobutton(app, text='Vôlei', value='v', variable=vesporte)
rb_volei.pack()
rb_basquete = Radiobutton(app, text='Basquete', value='b', variable=vesporte)
rb_basquete.pack()
btn_esporte = Button(app, text='Esporte selecionado', command=imprimirEsporte)
btn_esporte.pack()
app.mainloop()
```
#### File: sobre-python/curso-em-video/ex098.2.py
```python
from time import sleep
def contador(i, f, p):
if p < 0:
p *= -1
if p == 0:
p = 1
print('-=' * 20)
print('\033[1m'f'Contagem de {i} até {f} de {p} em {p}''\033[m')
sleep(2.5)
if i < f:
cont = i
while cont <= f :
print(f'{cont} ', end='', flush=True)
sleep(0.5)
cont += p
print('FIM!')
else:
cont = i
while cont >= f:
print(f'{cont} ', end='', flush=True)
sleep(0.5)
cont += p
print('FIM!')
# Programa Principal
contador(1, 10, 1)
contador(10, 0, 2)
print('-=' * 20)
print('\033[1m''Agora é sua vez de personalizar a contagem!''\033[1m')
ini = int(input('Início: '))
fim = int(input('Fim: '))
pas = int(input('Passo: '))
contador(ini, fim, pas)
```
#### File: lib/arquivo/__init__.py
```python
from ex115.lib.interface import *
def arquivoExiste(nome):
try:
a = open(nome, 'rt') # open é para abrir um arquivo e 'rt'
a.close() # significa read (ler) um text (texto)
except FileNotFoundError:
return False
else:
return True
def criarArquivo(nome):
try:
a = open(nome, 'wt+') # 'wt+' significa write (escrever) um text (texto) e o
a.close() # + é para criar um arquivo de texo, caso ele não exista
except:
print('\033[31m''Houve um erro na criação do arquivo!\033[m')
else:
print('\033[32m'f'Arquivo {nome} criado com sucesso!\033[m')
def lerArquivo(nome):
try:
a = open(nome, 'rt')
except:
print('\033[31m''Erro ao ler o arquivo!')
else:
cabeçalho('PESSOAS CADASTRADAS')
for linha in a:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
finally:
a.close()
def cadastrar(arq, nome='desconhecido', idade=0):
try:
a = open(arq, 'at') # 'at' significa append (anexar) text (texto) num arquivo de texto
except:
print('\033[31m''Houve um ERRO na abertura do arquivo!\033[m')
else:
try:
a.write(f'{nome}; {idade}\n')
except:
print('\033[31m''Houve um erro na hora de escrever os dados!')
else:
print('\033[32m'f'Novo registro de {nome} adicionado')
a.close()
``` |
{
"source": "joselupy/ap-latam",
"score": 2
} |
#### File: aplatam/old/_2_balance_classes.py
```python
import os
import sys
import numpy as np
import glob
from shutil import copyfile
import shutil
#train_dir = 'data/hires/256_256_0/all/'
aug = 2
def cnn_training_dir_structure(train_dir):
np.random.seed(33)
pos_files = glob.glob(os.path.join(train_dir, 't', '*.jpg'))
neg_files = glob.glob(os.path.join(train_dir, 'f', '*.jpg'))
training_pos_files = set(
np.random.choice(
pos_files, int(round(len(pos_files) * 0.75)), replace=False))
training_neg_files = set(
np.random.choice(
neg_files, int(round(len(pos_files) * aug * 0.75)), replace=False))
testing_pos_files = set(pos_files) - set(training_pos_files)
testing_neg_files = set(
np.random.choice(
list(set(neg_files) - set(training_neg_files)),
len(testing_pos_files),
replace=False))
#testing_pos_files = [pos_file for pos_file in pos_files if not (pos_file in training_pos_files)]
#testing_neg_files = [neg_file for neg_file in neg_files if not (neg_file in training_neg_files)]
print(len(training_pos_files), len(training_neg_files))
shutil.rmtree('data_keras/train_hires_balanced/')
shutil.rmtree('data_keras/validation_hires_balanced/')
labels = {
'training_pos_files': training_pos_files,
'training_neg_files': training_neg_files,
'testing_pos_files': testing_pos_files,
'testing_neg_files': testing_neg_files
}
for label in list(labels.keys()):
for fname in labels[label]:
base_fname = os.path.basename(fname)
splitted_label = label.split("_")
if splitted_label[0] == 'training' and splitted_label[1] == 'pos':
if not os.path.exists('data_keras/train_hires_balanced/vya/'):
os.makedirs('data_keras/train_hires_balanced/vya/')
copyfile(
fname,
os.path.join(
'data_keras/train_hires_balanced/vya/{0}'.format(
base_fname)))
if splitted_label[0] == 'training' and splitted_label[1] == 'neg':
if not os.path.exists(
'data_keras/train_hires_balanced/no_vya/'):
os.makedirs('data_keras/train_hires_balanced/no_vya/')
copyfile(
fname,
os.path.join(
'data_keras/train_hires_balanced/no_vya/{0}'.format(
base_fname)))
if splitted_label[0] == 'testing' and splitted_label[1] == 'pos':
if not os.path.exists(
'data_keras/validation_hires_balanced/vya/'):
os.makedirs('data_keras/validation_hires_balanced/vya/')
copyfile(
fname,
os.path.join(
'data_keras/validation_hires_balanced/vya/{0}'.format(
base_fname)))
if splitted_label[0] == 'testing' and splitted_label[1] == 'neg':
if not os.path.exists(
'data_keras/validation_hires_balanced/no_vya/'):
os.makedirs('data_keras/validation_hires_balanced/no_vya/')
copyfile(
fname,
os.path.join(
'data_keras/validation_hires_balanced/no_vya/{0}'.
format(base_fname)))
print('Done')
def main(args):
cnn_training_dir_structure(args.input_dir)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Creates directory structure for cnn training',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_dir', help='Path where jpg tiles are stored')
args = parser.parse_args()
main(args)
```
#### File: aplatam/old/_4_predict.py
```python
import glob
import os
import sys
from functools import partial
import keras.models
import numpy as np
import pyproj
import rasterio as rio
import tqdm
from keras.preprocessing.image import ImageDataGenerator
from shapely.geometry import box, mapping, shape
from shapely.ops import transform
from skimage import exposure
from .util import sliding_windows, window_to_bounds
def write_geojson(shapes_and_probs, output_path):
import json
from shapely.geometry import mapping
d = {'type': 'FeatureCollection', 'features': []}
for shape, prob in shapes_and_probs:
project = partial(
pyproj.transform,
pyproj.Proj(init='epsg:3857'),
pyproj.Proj(init='epsg:4326'))
shape_wgs = transform(project, shape)
feat = {
'type': 'Feature',
'geometry': mapping(shape_wgs),
'properties': {
'prob': prob
}
}
d['features'].append(feat)
with open(output_path, 'w') as f:
f.write(json.dumps(d))
def predict_image(fname, model, size, step_size=None, rescale_intensity=False):
if not step_size:
step_size = size
with rio.open(fname) as src:
imgs = []
windows = []
for window in sliding_windows(size, step_size, src.shape):
window_box = box(*window_to_bounds(window, src.transform))
img = np.dstack([src.read(b, window=window) for b in range(1, 4)])
if rescale_intensity:
low, high = np.percentile(img, (2, 98))
img = exposure.rescale_intensity(img, in_range=(low, high))
img = img / 255.
preds = model.predict(np.array([img]))
preds_b = preds[:, 0]
for i in np.nonzero(preds_b > 0.3)[0]:
print((window, float(preds_b[i])))
windows.append((window_box, float(preds_b[i])))
#if cur_windows:
# name, _ = os.path.splitext(os.path.basename(fname))
# output = '/tmp/{}_windows.geojson'.format(name)
# write_geojson(cur_windows, output)
return windows
def predict_images(input_dir, output, model, size, **kwargs):
all_windows = []
files = glob.glob(os.path.join(input_dir, '**/*.tif'), recursive=True)
print(files)
for fname in tqdm.tqdm(files):
all_windows.extend(predict_image(fname, model, size, **kwargs))
print('Done! Found {} matching windows on all files'.format(
len(all_windows)))
write_geojson(all_windows, output)
print('{} written'.format(output))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Classify a set of hi-res images',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('model_file', help='HDF5 Keras model file path')
parser.add_argument(
'input_dir', help='Path where test hi-res images are stored')
parser.add_argument(
'--step-size',
default=None,
help='Step size of sliding windows (if none, same as size)')
parser.add_argument(
'--rescale-intensity',
action='store_true',
default=False,
help='Rescale intensity with 2-98 percentiles')
parser.add_argument('output', help='GeoJSON output file')
args = parser.parse_args()
datagen = ImageDataGenerator(rescale=1. / 255)
model = keras.models.load_model(args.model_file)
img_size = model.input_shape[1]
step_size = None
if args.step_size:
step_size = int(args.step_size)
predict_images(
args.input_dir,
args.output,
model,
img_size,
step_size=step_size,
rescale_intensity=args.rescale_intensity)
``` |
{
"source": "joseLus007/final_pro",
"score": 2
} |
#### File: Projeto_Joaninha/pedidos/models.py
```python
from django.db import models
from django.urls import reverse_lazy
class Pedidos(models.Model):
nome_cliente=models.CharField('nome_cliente',max_length=100)
email=models.CharField('E-mail',max_length=200,unique=True)
lista_pedidos=models.CharField('pedidos',max_length=500)
forma_de_pagamento=models.CharField('forma_de_pagamento', max_length=100)
endereco=models.CharField('endereço',max_length=2000)
numero_contato=models.CharField('contato',max_length=12)
class Meta:
ordering=("lista_pedidos",)
def __str__(self):
return self.lista_pedidos
def get_absolute_url(self):
return reverse_lazy("pedidos:pedidos_detail",kwargs={'pk':self.pk})
```
#### File: Projeto_Joaninha/produto/views.py
```python
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.generic import CreateView,UpdateView
from .models import Produto
from .forms import ProdutoForm
@login_required
def Produto_list(request):
template_name='produto_list.html'
objects=Produto.objects.all()
context={'object_list':objects}
return render(request, template_name, context)
def Produto_detail(request,pk):
template_name='produto_detail.html'
obj=Produto.objects.get(pk=pk)
context={'object':obj}
return render(request, template_name, context)
def Produto_add(request):
template_name='produto_form.html'
return render(request,template_name)
class ProdutoCreate(CreateView):
model=Produto
template_name='produto_form.html'
form_class=ProdutoForm
class ProdutoUpdate(UpdateView):
model=Produto
template_name='produto_form.html'
form_class=ProdutoForm
``` |
{
"source": "joseLus007/Repositorio_projeto",
"score": 2
} |
#### File: Ambiente_PRO/Projeto/create_data_pedidos.py
```python
import os
import django
from Projeto_Joaninha.pedidos.models import Pedidos
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Projeto_Joaninha.settings")
django.setup()
def main ():
t=Pedidos()
t.nome_cliente='<NAME>'
t.email='<EMAIL>'
t.prato='bife'
t.acompanhamentos='batata'
t.saladas='Sem Salada'
t.forma_de_pagamento='Cartão'
t.endereco='rua nao sei'
t.numero_contato='9121222'
main()
```
#### File: Ambiente_PRO/Projeto/create_data.py
```python
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Projeto_Joaninha.settings")
django.setup()
import string
import timeit
from random import choice, random, randint
from Projeto_Joaninha.produto.models import Produto
class Utils:
''' Métodos genéricos. '''
'para gerar numeros aleatorios'
@staticmethod
def gen_digits(max_length):
return str(''.join(choice(string.digits) for i in range(max_length)))
class ProdutoClass:
@staticmethod
def criar_produtos(produtos):
Produto.objects.all().delete()
aux = []
for produto in produtos:
data = dict(
produto=produto,
ncm=Utils.gen_digits(8),
preco=random() * randint(10, 50),
estoque=randint(10, 200),
)
obj =Produto(**data)
aux.append(obj)
Produto.objects.bulk_create(aux)
produtos = (
'Arroz',
'feijâo',
'Batata palha',
'Batata',
'farinha de milho',
'sal',
'tomate',
'leite',
'mostada',
'peixe',
'Frango',
'Porco',
'Bovina',
'Manteiga',
'Hamburguer',
'salame',
)
tic = timeit.default_timer()
ProdutoClass.criar_produtos(produtos)
toc = timeit.default_timer()
print('Tempo:', toc - tic)
```
#### File: Projeto_Joaninha/estoque/models.py
```python
from django.contrib.auth.models import User
from django.db import models
from Projeto_Joaninha.core.models import TimeStampedModel
from Projeto_Joaninha.produto.models import Produto
MOVIMENTO = (
('e', 'entrada'),
('s', 'saida'),
)
class Estoque(TimeStampedModel):
funcionario=models.ForeignKey(User,on_delete=models.CASCADE)
nf=models.PositiveIntegerField('nota fiscal', null=True, blank=True)
movimento=models.CharField(max_length=1, choices=MOVIMENTO)
class Meta:
ordering = ('-created',)
def __str__(self):
return str(self.pk)
class EstoqueItens(models.Model):
estoque=models.ForeignKey(Estoque,on_delete=models.CASCADE)
produto=models.ForeignKey(Produto,on_delete=models.CASCADE)
quantidade=models.PositiveIntegerField()
saldo=models.PositiveIntegerField()
class Meta:
ordering = ('pk',)
def __str__(self):
return '{} - {} - {}'.format(self.pk, self.estoque.pk, self.produto)
``` |
{
"source": "joselusl/ars_lib_helpers",
"score": 3
} |
#### File: ars_lib_helpers/source/ars_lib_helpers.py
```python
import numpy as np
from numpy import *
import os
# ROS
import rospy
import tf_conversions as tf
def normalize(v):
norm = np.linalg.norm(v)
if norm < 0.00001:
return v
return v / norm
class Quaternion:
@staticmethod
def normalize(v):
return normalize(v)
@staticmethod
def zerosQuat():
return Quaternion.normalize(np.array([1.0, 0.0, 0.0, 0.0], dtype=float))
@staticmethod
def zerosQuatSimp():
return Quaternion.normalize(np.array([1.0, 0.0], dtype=float))
@staticmethod
def setQuatSimp(v):
return Quaternion.normalize(v)
@staticmethod
def checkConsistencyQuat(v):
tol=0.2
norm = np.linalg.norm(v)
if(abs(1.0-norm)>=tol):
vout = Quaternion.zerosQuat()
else:
vout = Quaternion.normalize(v)
return vout
@staticmethod
def getSimplifiedQuatRobotAtti(robot_atti_quat):
robot_atti_quat_tf = np.roll(robot_atti_quat, -1)
robot_atti_ang = tf.transformations.euler_from_quaternion(robot_atti_quat_tf, axes='sxyz')
robot_atti_ang_yaw = robot_atti_ang[2]
robot_atti_hor_quat_tf = tf.transformations.quaternion_from_euler(0, 0, robot_atti_ang_yaw, axes='sxyz')
robot_atti_hor_quat = np.roll(robot_atti_hor_quat_tf, 1)
robot_atti_quat_simp = Quaternion.zerosQuatSimp()
robot_atti_quat_simp[0] = robot_atti_hor_quat[0]
robot_atti_quat_simp[1] = robot_atti_hor_quat[3]
robot_atti_quat_simp = Quaternion.normalize(robot_atti_quat_simp)
return robot_atti_quat_simp
@staticmethod
def quatProd(p, q):
prod = Quaternion.zerosQuat()
prod[0] = p[0]*q[0] - p[1]*q[1] - p[2]*q[2] - p[3]*q[3]
prod[1] = p[0]*q[1] + p[1]*q[0] + p[2]*q[3] - p[3]*q[2]
prod[2] = p[0]*q[2] - p[1]*q[3] + p[2]*q[0] + p[3]*q[1]
prod[3] = p[0]*q[3] + p[1]*q[2] - p[2]*q[1] + p[3]*q[0]
return prod
@staticmethod
def quatConj(p):
conj = Quaternion.zerosQuat()
conj[0] = p[0]
conj[1] = -p[1]
conj[2] = -p[2]
conj[3] = -p[3]
return conj
@staticmethod
def quatSimpProd(q1, q2):
qr = Quaternion.zerosQuatSimp()
qr[0] = q1[0]*q2[0]-q1[1]*q2[1]
qr[1] = q1[0]*q2[1]+q1[1]*q2[0]
return qr
@staticmethod
def quatSimpConj(p):
conj = Quaternion.zerosQuatSimp()
conj[0] = p[0]
conj[1] = -p[1]
return conj
@staticmethod
def computeDiffQuatSimp(atti_quat_simp_1, atti_quat_simp_2):
error_quat_simp = Quaternion.zerosQuatSimp()
error_quat_simp[0] = atti_quat_simp_1[0]*atti_quat_simp_2[0]+atti_quat_simp_1[1]*atti_quat_simp_2[1]
error_quat_simp[1] = atti_quat_simp_1[1]*atti_quat_simp_2[0]-atti_quat_simp_1[0]*atti_quat_simp_2[1]
if(error_quat_simp[0] < 0):
error_quat_simp = -1 * error_quat_simp
return error_quat_simp
@staticmethod
def quatSimpFromAngle(angle):
quatSimp = Quaternion.zerosQuatSimp()
quatSimp[0] = math.cos(0.5*angle)
quatSimp[1] = math.sin(0.5*angle)
if(quatSimp[0] < 0):
quatSimp = -1 * quatSimp
return quatSimp
@staticmethod
def angleFromQuatSimp(quatSimp):
angle = 0.0
if(quatSimp[0] < 0):
quatSimp = -1 * quatSimp
angle = 2.0 * math.atan(quatSimp[1]/quatSimp[0])
return angle
@staticmethod
def angleDiffFromQuatSimp(atti_quat_simp_1, atti_quat_simp_2):
return Quaternion.angleFromQuatSimp(Quaternion.computeDiffQuatSimp(atti_quat_simp_1, atti_quat_simp_2))
@staticmethod
def errorFromQuatSimp(quatSimp):
error = 0.0
if(quatSimp[0] < 0):
quatSimp = -1 * quatSimp
error = 2.0 * quatSimp[1].item()
return error
@staticmethod
def errorDiffFromQuatSimp(atti_quat_simp_1, atti_quat_simp_2):
return Quaternion.errorFromQuatSimp(Quaternion.computeDiffQuatSimp(atti_quat_simp_1, atti_quat_simp_2))
@staticmethod
def rotMat2dFromAngle(angle):
rotMat = np.zeros((2,2), dtype=float)
rotMat[0,0] = math.cos(angle)
rotMat[0,1] = -math.sin(angle)
rotMat[1,0] = math.sin(angle)
rotMat[1,1] = math.cos(angle)
return rotMat
@staticmethod
def rotMat3dFromAngle(angle):
rotMat = np.zeros((3,3), dtype=float)
rotMat[0:2, 0:2] = Quaternion.rotMat2dFromAngle(angle)
rotMat[2,2] = 1.0
return rotMat
@staticmethod
def rotMat2dFromQuatSimp(quatSimp):
rotMat = Quaternion.rotMat2dFromAngle(Quaternion.angleFromQuatSimp(quatSimp))
return rotMat
@staticmethod
def rotMat3dFromQuatSimp(quatSimp):
rotMat = Quaternion.rotMat3dFromAngle(Quaternion.angleFromQuatSimp(quatSimp))
return rotMat
@staticmethod
def diffRotMat3dWrtAngleFromAngle(angle):
diffRotMat = np.zeros((3,3), dtype=float)
diffRotMat[0,0] = -math.sin(angle)
diffRotMat[0,1] = -math.cos(angle)
diffRotMat[1,0] = math.cos(angle)
diffRotMat[1,1] = -math.sin(angle)
diffRotMat[2,2] = 0.0
return diffRotMat
class Pose:
position = np.zeros((3,), dtype=float)
attitude_quat = np.array([1.0, 0.0, 0.0, 0.0], dtype=float)
def __init__(self):
self.position = np.zeros((3,), dtype=float)
self.attitude_quat = np.array([1.0, 0.0, 0.0, 0.0], dtype=float)
return
class PoseSimp:
parent_frame = ''
child_frame = ''
position = np.zeros((3,), dtype=float)
attitude_quat_simp = np.array([1.0, 0.0], dtype=float)
def __init__(self):
parent_frame = ''
child_frame = ''
self.position = np.zeros((3,), dtype=float)
self.attitude_quat_simp = np.array([1.0, 0.0], dtype=float)
return
class PoseAlgebra:
@staticmethod
def computeDiffQuatSimp(atti_quat_simp_1, atti_quat_simp_2):
return Quaternion.computeDiffQuatSimp(atti_quat_simp_1, atti_quat_simp_2)
@staticmethod
def computeScalarDiffFromDiffQuatSimp(delta_atti_quat_simp):
if(delta_atti_quat_simp[0] < 0):
delta_atti_quat_simp = -1 * delta_atti_quat_simp
error_att = 2.0 * delta_atti_quat_simp[1].item()
return error_att
@staticmethod
def computeAngleDiffFromDiffQuatSimp(delta_atti_quat_simp):
if(delta_atti_quat_simp[0] < 0):
delta_atti_quat_simp = -1 * delta_atti_quat_simp
error_att = 2.0 * math.atan(delta_atti_quat_simp[1]/delta_atti_quat_simp[0])
return error_att
@staticmethod
def computePoseSimpDifference(posi_1, atti_quat_simp_1, posi_2, atti_quat_simp_2):
# Position
delta_posi = posi_1 - posi_2
# Attitude
delta_atti_quat_simp = PoseAlgebra.computeDiffQuatSimp(atti_quat_simp_1, atti_quat_simp_2)
# End
return delta_posi, delta_atti_quat_simp
@staticmethod
# pc = p1 + p2
def computePoseSimpComposition(posi_1, atti_quat_simp_1, posi_2, atti_quat_simp_2):
# Position
posi_comp = np.matmul(Quaternion.rotMat3dFromQuatSimp(atti_quat_simp_1), posi_2) + posi_1
# Attitude
atti_quat_simp_comp = Quaternion.quatSimpProd(atti_quat_simp_1, atti_quat_simp_2)
# End
return posi_comp, atti_quat_simp_comp
@staticmethod
# pc = -p1
def computePoseSimpInversion(posi_1, atti_quat_simp_1):
# Position
posi_inv = -np.matmul(np.transpose(Quaternion.rotMat3dFromQuatSimp(atti_quat_simp_1)), posi_1)
# Attitude
atti_quat_simp_inv = Quaternion.quatSimpConj(atti_quat_simp_1)
# End
return posi_inv, atti_quat_simp_inv
@staticmethod
# pc = p1 - p2
def computePoseSimpPostSubstraction(posi_1, atti_quat_simp_1, posi_2, atti_quat_simp_2):
#
posi_2_inv, atti_quat_simp_2_inv = PoseAlgebra.computePoseSimpInversion(posi_2, atti_quat_simp_2)
#
posi_subs, atti_quat_simp_subs = PoseAlgebra.computePoseSimpComposition(posi_1, atti_quat_simp_1, posi_2_inv, atti_quat_simp_2_inv)
# End
return posi_subs, atti_quat_simp_subs
@staticmethod
# pc = - p1 + p2
def computePoseSimpPreSubstraction(posi_1, atti_quat_simp_1, posi_2, atti_quat_simp_2):
#
posi_1_inv, atti_quat_simp_1_inv = PoseAlgebra.computePoseSimpInversion(posi_1, atti_quat_simp_1)
#
posi_subs, atti_quat_simp_subs = PoseAlgebra.computePoseSimpComposition(posi_1_inv, atti_quat_simp_1_inv, posi_2, atti_quat_simp_2)
# End
return posi_subs, atti_quat_simp_subs
class Conversions:
@staticmethod
def convertVelLinFromRobotToWorld(robot_velo_lin_robot, robot_atti_quat_in, flag_quat_simp=True):
robot_atti_quat = np.zeros((4,), dtype=float)
if(flag_quat_simp):
robot_atti_quat[0] = robot_atti_quat_in[0]
robot_atti_quat[3] = robot_atti_quat_in[1]
else:
robot_atti_quat = robot_atti_quat_in
robot_atti_quat = Quaternion.normalize(robot_atti_quat)
robot_atti_quat_tf = np.roll(robot_atti_quat, -1)
robot_atti_ang = tf.transformations.euler_from_quaternion(robot_atti_quat_tf, axes='sxyz')
robot_atti_ang_yaw = robot_atti_ang[2]
robot_velo_lin_world = np.zeros((3,), dtype=float)
robot_velo_lin_world[0] = math.cos(robot_atti_ang_yaw)*robot_velo_lin_robot[0]-math.sin(robot_atti_ang_yaw)*robot_velo_lin_robot[1]
robot_velo_lin_world[1] = math.sin(robot_atti_ang_yaw)*robot_velo_lin_robot[0]+math.cos(robot_atti_ang_yaw)*robot_velo_lin_robot[1]
robot_velo_lin_world[2] = robot_velo_lin_robot[2]
return robot_velo_lin_world
@staticmethod
def convertVelAngFromRobotToWorld(robot_velo_ang_robot, robot_atti_quat_in, flag_quat_simp=True):
return robot_velo_ang_robot
@staticmethod
def convertVelLinFromWorldToRobot(robot_velo_lin_world, robot_atti_quat_in, flag_quat_simp=True):
robot_atti_quat = np.zeros((4,), dtype=float)
if(flag_quat_simp):
robot_atti_quat[0] = robot_atti_quat_in[0]
robot_atti_quat[3] = robot_atti_quat_in[1]
else:
robot_atti_quat = robot_atti_quat_in
robot_atti_quat = Quaternion.normalize(robot_atti_quat)
robot_atti_quat_tf = np.roll(robot_atti_quat, -1)
robot_atti_ang = tf.transformations.euler_from_quaternion(robot_atti_quat_tf, axes='sxyz')
robot_atti_ang_yaw = robot_atti_ang[2]
robot_velo_lin_robot = np.zeros((3,), dtype=float)
robot_velo_lin_robot[0] = math.cos(robot_atti_ang_yaw)*robot_velo_lin_world[0]+math.sin(robot_atti_ang_yaw)*robot_velo_lin_world[1]
robot_velo_lin_robot[1] = -math.sin(robot_atti_ang_yaw)*robot_velo_lin_world[0]+math.cos(robot_atti_ang_yaw)*robot_velo_lin_world[1]
robot_velo_lin_robot[2] = robot_velo_lin_world[2]
return robot_velo_lin_robot
@staticmethod
def convertVelAngFromWorldToRobot(robot_velo_ang_world, robot_atti_quat_in, flag_quat_simp=True):
return robot_velo_ang_world
class Circle3D:
id_lab = None
position = None
attitude_quat_simp = None
parent_frame = None
circle_radius = None
def __init__(self):
self.id_lab = -1
self.position = np.zeros((3,), dtype=float)
self.attitude_quat_simp = Quaternion.zerosQuatSimp()
self.parent_frame = ''
self.circle_radius = 0.0
return
def isPointInCircle(point, circle_center, circle_radius):
circle_impl_equ = (point[0]-circle_center[0])**2 + (point[1]-circle_center[1])**2 - circle_radius**2
if(circle_impl_equ < 1.0):
return True
else:
return False
def distancePointCircle(point_2d, circle_center_2d, circle_radius):
distance_circle = 0.0
distance_center = np.linalg.norm(circle_center_2d-point_2d)
if(distance_center <= circle_radius):
distance_circle = 0.0
else:
distance_circle = distance_center - circle_radius
return distance_circle
def distanceSegmentCircle(point_2d_segment_1, point_2d_segment_2, circle_center_2d, circle_radius):
# LINE
v_s1 = np.zeros((2,), dtype=float)
v_s1 = point_2d_segment_2 - point_2d_segment_1
v_s1 = normalize(v_s1)
# The two waypoints are the same!
if(np.linalg.norm(v_s1) < 0.00001):
return distancePointCircle(point_2d_segment_1, circle_center_2d, circle_radius)
v_s2 = np.zeros((2,), dtype=float)
v_s2[0] = v_s1[1]
v_s2[1] = -v_s1[0]
mat_int_s1_s2 = np.array([v_s1, -v_s2]).T
sol_int_s1_s2 = np.matmul(np.linalg.inv(mat_int_s1_s2), (circle_center_2d - point_2d_segment_1))
point_int_l = sol_int_s1_s2[1] * v_s2 + circle_center_2d
dist_pi_c = abs(sol_int_s1_s2[1].item())
# SEGMENT
dist_p1_pi = np.linalg.norm(point_2d_segment_1-point_int_l)
dist_p1_p2 = np.linalg.norm(point_2d_segment_1-point_2d_segment_2)
dist_p2_pi = np.linalg.norm(point_2d_segment_2-point_int_l)
distance_circle = 0.0
if(dist_p1_pi<=dist_p1_p2 and dist_p2_pi<=dist_p1_p2):
# Case easy
if(dist_pi_c<circle_radius):
distance_circle = 0.0
elif(dist_pi_c==circle_radius):
distance_circle = 0.0
else:
distance_circle = dist_pi_c - circle_radius
else:
dist_p1_c = np.linalg.norm(point_2d_segment_1-circle_center_2d)
if(dist_p1_c<=circle_radius):
dist_p1_circ = 0.0
else:
dist_p1_circ = dist_p1_c-circle_radius
dist_p2_c = np.linalg.norm(point_2d_segment_2-circle_center_2d)
if(dist_p2_c<=circle_radius):
dist_p2_circ = 0.0
else:
dist_p2_circ = dist_p2_c-circle_radius
distance_circle = min(dist_p1_circ, dist_p2_circ)
return distance_circle
def pointOverSegment(point, point_segment_1, point_segment_2):
flag_degradated = False
point_over_segment = np.zeros((3,), dtype=float)
v_s1 = np.zeros((3,), dtype=float)
v_s1 = point_segment_2 - point_segment_1
v_s1 = normalize(v_s1)
v_1_0 = point - point_segment_1
dot_prod = np.dot(v_s1, v_1_0)
point_over_segment = dot_prod * v_s1 + point_segment_1
dist_p1_ps = np.linalg.norm(point_over_segment - point_segment_1)
dist_p2_ps = np.linalg.norm(point_over_segment - point_segment_2)
dist_p1_p2 = np.linalg.norm(point_segment_2 - point_segment_1)
if(dist_p1_ps<=dist_p1_p2 and dist_p2_ps<=dist_p1_p2):
pass
else:
flag_degradated = True
if(dist_p1_ps < dist_p2_ps):
point_over_segment = point_segment_1
else:
point_over_segment = point_segment_2
return point_over_segment, flag_degradated
``` |
{
"source": "joselusl/ars_obstacle_avoidance_student",
"score": 2
} |
#### File: ars_obstacle_avoidance_student/source/ars_obstacle_avoidance_ros_node.py
```python
import numpy as np
from numpy import *
import os
import rospy
from ars_obstacle_avoidance_ros import *
def main():
ars_obstacle_avoidance_ros = ArsObstacleAvoidanceRos()
ars_obstacle_avoidance_ros.init()
ars_obstacle_avoidance_ros.open()
try:
ars_obstacle_avoidance_ros.run()
except rospy.ROSInterruptException:
pass
return 0
''' MAIN '''
if __name__ == '__main__':
main()
``` |
{
"source": "joselusl/ars_sim_collision_detection",
"score": 2
} |
#### File: ars_sim_collision_detection/source/ars_sim_collision_detection_ros.py
```python
import numpy as np
from numpy import *
import os
# ROS
import rospy
import rospkg
import std_msgs.msg
from std_msgs.msg import Bool
from std_msgs.msg import Header
import geometry_msgs.msg
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import TwistStamped
import visualization_msgs.msg
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
import tf_conversions
import tf2_ros
#
import ars_lib_helpers
class ArsSimCollisionDetectionRos:
#######
# Robot size radius
robot_size_radius = 0.3
# Robot pose subscriber
robot_pose_sub = None
# Obstacles static sub
obstacles_static_sub = None
# Obstacles dynamic sub
obstacles_dynamic_sub = None
# Robot collision pub
robot_collision_pub = None
# Robot Pose
flag_robot_pose_set = False
robot_posi = None
robot_atti_quat_simp = None
# Obstacles static
obstacles_static_msg = None
# Obstacles dynamic
obstacles_dynamic_msg = None
#########
def __init__(self):
# Robot size radius
self.robot_size_radius = 0.3
#
self.flag_robot_pose_set = False
self.robot_posi = np.zeros((3,), dtype=float)
self.robot_atti_quat_simp = ars_lib_helpers.Quaternion.zerosQuatSimp()
#
self.obstacles_static_msg = MarkerArray()
#
self.obstacles_dynamic_msg = MarkerArray()
# end
return
def init(self, node_name='ars_sim_collision_detection_node'):
#
# Init ROS
rospy.init_node(node_name, anonymous=True)
# Package path
pkg_path = rospkg.RosPack().get_path('ars_sim_collision_detection')
#### READING PARAMETERS ###
# TODO
###
# End
return
def open(self):
# Subscribers
#
self.robot_pose_sub = rospy.Subscriber('robot_pose', PoseStamped, self.robotPoseCallback)
#
self.obstacles_static_sub = rospy.Subscriber('obstacles_static', MarkerArray, self.obstaclesStaticCallback)
#
self.obstacles_dynamic_sub = rospy.Subscriber('obstacles_dynamic', MarkerArray, self.obstaclesDynamicCallback)
# Publishers
#
self.robot_collision_pub = rospy.Publisher('robot_collision', Bool, queue_size=1)
# End
return
def run(self):
rospy.spin()
return
def robotPoseCallback(self, robot_pose_msg):
#
self.flag_robot_pose_set = True
# Position
self.robot_posi[0] = robot_pose_msg.pose.position.x
self.robot_posi[1] = robot_pose_msg.pose.position.y
self.robot_posi[2] = robot_pose_msg.pose.position.z
# Attitude quat simp
robot_atti_quat = ars_lib_helpers.Quaternion.zerosQuat()
robot_atti_quat[0] = robot_pose_msg.pose.orientation.w
robot_atti_quat[1] = robot_pose_msg.pose.orientation.x
robot_atti_quat[2] = robot_pose_msg.pose.orientation.y
robot_atti_quat[3] = robot_pose_msg.pose.orientation.z
self.robot_atti_quat_simp = ars_lib_helpers.Quaternion.getSimplifiedQuatRobotAtti(robot_atti_quat)
#
self.checkCollisionRobotObstacles()
#
return
def obstaclesStaticCallback(self, obstacles_static_msg):
self.obstacles_static_msg = obstacles_static_msg
#
return
def obstaclesDynamicCallback(self, obstacles_dynamic_msg):
self.obstacles_dynamic_msg = obstacles_dynamic_msg
#
return
def checkCollisionRobotObstacles(self):
flag_collision_detected = False
# Check
if(self.flag_robot_pose_set):
# Obstacles static
for obst_i_msg in self.obstacles_static_msg.markers:
if(obst_i_msg.action == 0):
# Check distance
if(obst_i_msg.type == 3):
obst_i_posi = np.zeros((3,), dtype=float)
obst_i_posi[0] = obst_i_msg.pose.position.x
obst_i_posi[1] = obst_i_msg.pose.position.y
obst_i_posi[2] = obst_i_msg.pose.position.z
obst_i_rad = obst_i_msg.scale.x/2.0
distance = np.linalg.norm(obst_i_posi-self.robot_posi)
if(distance <= obst_i_rad+self.robot_size_radius):
flag_collision_detected = True
else:
print("Unknown obstacle type:"+obst_i_msg.type)
# Obstacles dynamic
for obst_i_msg in self.obstacles_dynamic_msg.markers:
if(obst_i_msg.action == 0):
# Check distance
if(obst_i_msg.type == 3):
obst_i_posi = np.zeros((3,), dtype=float)
obst_i_posi[0] = obst_i_msg.pose.position.x
obst_i_posi[1] = obst_i_msg.pose.position.y
obst_i_posi[2] = obst_i_msg.pose.position.z
obst_i_rad = obst_i_msg.scale.x/2.0
distance = np.linalg.norm(obst_i_posi-self.robot_posi)
if(distance <= obst_i_rad+self.robot_size_radius):
flag_collision_detected = True
else:
print("Unknown obstacle type!!")
# Publish
flag_collision_detected_msg = Bool()
flag_collision_detected_msg.data = flag_collision_detected
self.robot_collision_pub.publish(flag_collision_detected_msg)
#
return
``` |
{
"source": "joselusl/ars_sim_mapper",
"score": 2
} |
#### File: ars_sim_mapper/source/ars_sim_mapper_ros.py
```python
import numpy as np
from numpy import *
import os
import copy
# ROS
import rospy
import rospkg
import std_msgs.msg
from std_msgs.msg import Header
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped
import visualization_msgs.msg
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import tf_conversions
import tf2_ros
#
import ars_lib_helpers
class ArsSimMapperRos:
#######
# Robot frame
world_frame = None
# Covariance on mapping of position
cov_map_stat_pos = None
cov_map_dyna_pos = None
# Covariance on mapping of sizes
cov_map_stat_siz = None
cov_map_dyna_siz = None
# Robot pose subscriber
robot_pose_sub = None
# Obstacles static sub
obstacles_static_sub = None
# Obstacles dynamic sub
obstacles_dynamic_sub = None
# Obstacles detected pub
flag_pub_obstacles_detected_world = False
obstacles_detected_world_pub = None
# Obstacles static
obstacles_static_msg = None
# Obstacles dynamic
obstacles_dynamic_msg = None
# Obstacles detected
obstacles_detected_world_msg = None
# Obstacle Detection loop
# freq
obstacle_detect_loop_freq = None
# Timer
obstacle_detect_loop_timer = None
#########
def __init__(self):
# Robot frame
self.world_frame = 'world'
# Covariance on mapping of position
self.cov_map_stat_pos = {'x': 0.0001, 'y': 0.0001, 'z': 0.000001}
self.cov_map_dyna_pos = {'x': 0.01, 'y': 0.01, 'z': 0.00001}
# Covariance on mapping of sizes
self.cov_map_stat_siz = {'R': 0.0001, 'h': 0.000001}
self.cov_map_dyna_siz = {'R': 0.01, 'h': 0.0001}
#
self.obstacles_static_msg = MarkerArray()
#
self.obstacles_dynamic_msg = MarkerArray()
#
self.obstacles_detected_world_msg = MarkerArray()
# Obstacle Detection loop
# freq
self.obstacle_detect_loop_freq = 0.1
# Timer
self.obstacle_detect_loop_timer = None
# end
return
def init(self, node_name='ars_sim_mapper_node'):
#
# Init ROS
rospy.init_node(node_name, anonymous=True)
# Package path
pkg_path = rospkg.RosPack().get_path('ars_sim_mapper')
#### READING PARAMETERS ###
# TODO
###
# End
return
def open(self):
# Subscribers
#
self.obstacles_static_sub = rospy.Subscriber('obstacles_static', MarkerArray, self.obstaclesStaticCallback, queue_size=1)
#
self.obstacles_dynamic_sub = rospy.Subscriber('obstacles_dynamic', MarkerArray, self.obstaclesDynamicCallback, queue_size=1)
# Publishers
#
self.obstacles_detected_world_pub = rospy.Publisher('estim_map_world', MarkerArray, queue_size=1, latch=True)
# Timers
#
self.obstacle_detect_loop_timer = rospy.Timer(rospy.Duration(1.0/self.obstacle_detect_loop_freq), self.obstacleDetectorLoopTimerCallback)
# End
return
def run(self):
rospy.spin()
return
def obstaclesStaticCallback(self, obstacles_static_msg):
self.obstacles_static_msg = obstacles_static_msg
#
return
def obstaclesDynamicCallback(self, obstacles_dynamic_msg):
self.obstacles_dynamic_msg = obstacles_dynamic_msg
#
return
def detectObstacles(self):
#
self.obstacles_detected_world_msg = MarkerArray()
self.obstacles_detected_world_msg.markers = []
# Obstacles static
for obst_i_msg in self.obstacles_static_msg.markers:
if(obst_i_msg.action == 0):
# Check distance
if(obst_i_msg.type == 3):
obst_i_posi_world = np.zeros((3,), dtype=float)
obst_i_posi_world[0] = obst_i_msg.pose.position.x
obst_i_posi_world[1] = obst_i_msg.pose.position.y
obst_i_posi_world[2] = obst_i_msg.pose.position.z
obst_i_rad = obst_i_msg.scale.x/2.0
# Noises
#
posi_noise = np.zeros((3,), dtype=float)
posi_noise[0] = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_stat_pos['x']))
posi_noise[1] = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_stat_pos['y']))
posi_noise[2] = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_stat_pos['z']))
#
radius_noise = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_stat_siz['R']))
height_noise = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_stat_siz['h']))
############
# obstacle wrt World
obst_i_world_msg = []
obst_i_world_msg = copy.deepcopy(obst_i_msg)
# Change color
obst_i_world_msg.color.r = 0.0
obst_i_world_msg.color.g = 0.0
obst_i_world_msg.color.b = 1.0
obst_i_world_msg.color.a = 0.6
# Lifetime
obst_i_world_msg.lifetime = rospy.Duration(2.0*1.0/self.obstacle_detect_loop_freq)
#
obst_i_world_msg.pose.position.x = obst_i_posi_world[0] + posi_noise[0]
obst_i_world_msg.pose.position.y = obst_i_posi_world[1] + posi_noise[1]
obst_i_world_msg.pose.position.z = obst_i_posi_world[2] + posi_noise[2]
# Sizes with noise
obst_i_world_msg.scale.x += 2.0*radius_noise
obst_i_world_msg.scale.y += 2.0*radius_noise
obst_i_world_msg.scale.z += height_noise
# Append world
self.obstacles_detected_world_msg.markers.append(obst_i_world_msg)
else:
print("Unknown obstacle type:"+obst_i_msg.type)
# Obstacles dynamic
for obst_i_msg in self.obstacles_dynamic_msg.markers:
if(obst_i_msg.action == 0):
# Check distance
if(obst_i_msg.type == 3):
obst_i_posi_world = np.zeros((3,), dtype=float)
obst_i_posi_world[0] = obst_i_msg.pose.position.x
obst_i_posi_world[1] = obst_i_msg.pose.position.y
obst_i_posi_world[2] = obst_i_msg.pose.position.z
obst_i_rad = obst_i_msg.scale.x/2.0
# Noises
#
posi_noise = np.zeros((3,), dtype=float)
posi_noise[0] = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_dyna_pos['x']))
posi_noise[1] = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_dyna_pos['y']))
posi_noise[2] = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_dyna_pos['z']))
#
radius_noise = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_dyna_siz['R']))
height_noise = np.random.normal(loc = 0.0, scale = math.sqrt(self.cov_map_dyna_siz['h']))
############
# obstacle wrt World
obst_i_world_msg = []
obst_i_world_msg = copy.deepcopy(obst_i_msg)
# Change color
obst_i_world_msg.color.r = 0.0
obst_i_world_msg.color.g = 0.0
obst_i_world_msg.color.b = 1.0
obst_i_world_msg.color.a = 0.6
# Lifetime
obst_i_world_msg.lifetime = rospy.Duration(2.0*1.0/self.obstacle_detect_loop_freq)
#
obst_i_world_msg.pose.position.x = obst_i_posi_world[0] + posi_noise[0]
obst_i_world_msg.pose.position.y = obst_i_posi_world[1] + posi_noise[1]
obst_i_world_msg.pose.position.z = obst_i_posi_world[2] + posi_noise[2]
# Sizes with noise
obst_i_world_msg.scale.x += 2.0*radius_noise
obst_i_world_msg.scale.y += 2.0*radius_noise
obst_i_world_msg.scale.z += height_noise
# Append world
self.obstacles_detected_world_msg.markers.append(obst_i_world_msg)
else:
print("Unknown obstacle type!!")
# Publish
self.obstacles_detected_world_pub.publish(self.obstacles_detected_world_msg)
#
return
def obstacleDetectorLoopTimerCallback(self, timer_msg):
# Get time
time_stamp_current = rospy.Time.now()
#
self.detectObstacles()
#
return
``` |
{
"source": "joselusl/ars_sim_sensor_pos_robot",
"score": 2
} |
#### File: ars_sim_sensor_pos_robot/source/ars_sim_sensor_pos_robot_ros_node.py
```python
import numpy as np
from numpy import *
import os
import rospy
from ars_sim_sensor_pos_robot_ros import *
def main():
ars_sim_sensor_pos_robot_ros = ArsSimSensorPosRobotRos()
ars_sim_sensor_pos_robot_ros.init()
ars_sim_sensor_pos_robot_ros.open()
try:
ars_sim_sensor_pos_robot_ros.run()
except rospy.ROSInterruptException:
pass
return 0
''' MAIN '''
if __name__ == '__main__':
main()
``` |
{
"source": "joselvira/BiomecanicaPython",
"score": 3
} |
#### File: BiomecanicaPython/Funciones/Bland_AltmanPlot.py
```python
from __future__ import division, print_function #division #Ensure division returns float
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pandas as pd
__author__ = '<NAME>'
__version__ = '1.1.4'
__date__ = '18/03/2021'
#%%
def bland_altman_plot(data1, data2, unidad='', etiquetaCasos=False, regr=0, tcrit_Exacto=False, n_decimales=1, ax=None, show_text=None, show_bias_LOA=False, color_lin=None, *args, **kwargs):
"""Realiza gráfico de Bland-Altman para dos variables con medidas similares.
Ejemplo de explicación en: https://www.medcalc.org/manual/blandaltman.php
Parameters
----------
data1 : 1D array_like data pandas DataFrame.
data2 : 1D array_like data pandas DataFrame.
unidad: opcional, incluye la unidad de medida en las etiquetas de los ejes
etiquetaCasos: opcional, pone un número al lado de cada caso
regr: si es mayor que cero, incluye en el gráfico la línea de regresión con
el exponente indicado. También presenta el valor de la correlación
con su p, de R2 y del error medio cuadrático.
tcrit_Exacto : con False toma el valor t crítico = 1.96; con True lo
calcula a partir de n (#stats.t.ppf(q=0.975, df=n1 + n2-2))
n_decimales : especifica el número de decimales a mostrar en BIAS y LOA.
ax : ejes para el grafico resultante.
show_text : indica si muestra texto informativo.
Puede ser 'bias_loa', 'regr', 'publication', 'all'.
defoult=None.
show_bias_LOA : True/False. Muestra el valor del bias y limits of agreement
en el gráfico.
color_lin: Si se quiere controlar el color de las líneas bias y LOA. Por
defecto (None), mantiene color negro para bias y gris para LOA.
Útil cuando se quieren solapar varios grupos de datos en la misma
gráfica.
*args y **kwargs especificaciones de formato para los puntos del grafico.
Returns
-------
grafico Bland-Altman
Bias, media de las diferencias
LOA, limits of agreement.
Example
-------
bland_altman_plot(s1, s2, lw=0, color='k', s=40)
bias, LOA = bland_altman_plot(s1, s2, etiquetaCasos= True, regr=2, unidad='m', tcrit_Exacto=True, show_bias_LOA=True, lw=1, color='b', s=20, ax=ax)
Version history
---------------
'1.1.4':
Añadido el argumento n_decimales para poder especificar el número de decimales cuando muesra el BIAS y LOA.
'1.1.3':
Exporta los LOA sin multiplicar por 2.
Representa el texto del bias y LOA en las líneas correspondientes.
color_lin controla el color del texto de bias_loa.
Introducido parámetro show_text, que puede ser 'bias_loa', 'regr', 'publication' o 'all'. El parámetro show_bias_LOA
'1.1.2':
Corregidas las gráficas, con ax en lugar de plt. Antes no funcionaba con varios subplots.
Con color_lin se puede controlar el color de las líneas bias y LOA. Útil cuando se quieren solapar gráficas de varios conjuntos de datos.
'1.1.1':
Cálculo R2 con sklearn, con statsmodels falla por la versión de scikit.
Quita las filas con valores nulos.
'1.1.0':
Quitados adaptadores para etiquetas.
Se puede elegir el tcrit 1.96 o ajustarlo a la n.
"""
if len(data1) != len(data2):
raise ValueError('Los dos grupos de datos no tienen la misma longitud.')
#primero agrupa las dos variables en un dataframe para quitar los casos nulos de cualquiera de los dos.
data= pd.concat([pd.DataFrame(data1), pd.DataFrame(data2)], axis=1).dropna()
data1 = data.iloc[:,0]
data2 = data.iloc[:,1]
n1 = data1.notnull().sum() #pd.notnull(data1).count() #asi para que no cuente los NaN #np.isfinite(data1).count()
n2 = data2.notnull().sum() #pd.notnull(data2).count()
mean = np.mean([data1, data2], axis=0)
diff = np.array(data1 - data2) # Difference between data1 and data2
md = np.mean(diff) # Mean of the difference
sd = np.std(diff, axis=0, ddof=1) # Standard deviation of the difference
t_crit = 1.96 if tcrit_Exacto==False else stats.t.ppf(q=0.975, df=n1 + n2-2) #por defecto 1.96, de esta forma se ajusta a la n
if unidad!='':
unidad= ' ('+unidad+ ')'
# make plot if not axis was provided
if ax is None:
fig, ax = plt.subplots(1, 1)
#dibuja los puntos
ax.scatter(mean, diff, zorder=2, *args, **kwargs)
if regr!=0:
import seaborn as sns
#linear regresion
slope, intercept, r_value, p_value, std_err = stats.linregress(mean, diff)
#plt.plot(mean, slope*mean + intercept , 'r', alpha=.5, lw = 1)
#con cualquier exponente de regresión
orden=regr
#import statsmodels.api as sm #para las regresiones #FALLA POR versión de SCIKIT...
#R2 = sm.OLS(diff, xpoly).fit().rsquared
#Calcula el modelo de regresión para obtener R2
from sklearn.pipeline import Pipeline #estos para calcular las regresiones
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
X=mean
y=diff
polynomial_features = PolynomialFeatures(degree=orden, include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([('polynomial_features', polynomial_features),
('linear_regression', linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
y_predict = pipeline.predict(X[:, np.newaxis])
R2 = r2_score(y, y_predict)
MSE = mean_squared_error(y, y_predict)
#Gráfica de least squares fit line
if color_lin==None:
col='black'
else:
col=color_lin
sns.regplot(x=mean, y=diff, scatter=False, order=orden, ax=ax, line_kws={'color':col, 'alpha':0.6, 'lw':2})
cuadroTexto=dict(facecolor='white', alpha=0.4, edgecolor='none', boxstyle='round,pad=0.1,rounding_size=.5')
if show_text in ['regr', 'all']:
ax.text(0.02, 0.01, 'r= {0:.3f}, p= {1:.3f}, $R^2$= {2:.3f} MSE= {3:.3f}'.format(r_value, p_value, R2, MSE), fontsize=10,
horizontalalignment='left', verticalalignment='bottom', color=col, bbox=cuadroTexto, transform=ax.transAxes, zorder=2)
elif show_text in ['publication']:
ax.text(0.02, 0.01, 'r= {0:.3f}'.format(r_value), fontsize=10,
horizontalalignment='left', verticalalignment='bottom', color=col, bbox=cuadroTexto, transform=ax.transAxes, zorder=2)
#dibuja la línea horizontal del cero
ax.axhline(0.0, color='grey', linestyle='-', zorder=1, linewidth=1.0, solid_capstyle='round')
if color_lin==None:
#dibuja la línea horizontal de la media
ax.axhline(md, color='black', linestyle='-', zorder=1, linewidth=2.0, solid_capstyle='round')
#dibuja las líneas horizontales de los límites de acuerdo
ax.axhline(md + t_crit*sd, color='gray', zorder=1, linestyle='--', dashes=(5, 2), dash_capstyle='round', linewidth=1.5)
ax.axhline(md - t_crit*sd, color='gray', zorder=1, linestyle='--', dashes=(5, 2), dash_capstyle='round', linewidth=1.5)
else:
#dibuja la línea horizontal de la media
ax.axhline(md, color=color_lin, linestyle='-', zorder=1, linewidth=2.0, solid_capstyle='round')
#dibuja las líneas horizontales de los límites de confianza
ax.axhline(md + t_crit*sd, color=color_lin, zorder=1, linestyle='--', dashes=(5, 2), dash_capstyle='round', linewidth=1.5)
ax.axhline(md - t_crit*sd, color=color_lin, zorder=1, linestyle='--', dashes=(5, 2), dash_capstyle='round', linewidth=1.5)
if etiquetaCasos:
font = {'family': 'sans',
'color': 'red',
'weight': 'normal',
'size': 8,
'alpha': 0.7,
}
for num in range(len(data1)):
if ~np.isnan(mean[num]) and ~np.isnan(diff[num]):
plt.text(mean[num], diff[num], str(num), fontdict=font)
etiquetaY='Difference'
etiquetaY=etiquetaY + unidad
etiquetaX='Mean'
etiquetaX=etiquetaX + unidad
ax.set_xlabel(etiquetaX)
ax.set_ylabel(etiquetaY)
if show_text in ['bias_loa', 'publication', 'all'] or show_bias_LOA:
if color_lin==None:
color_lin='black'
cuadroTexto=dict(facecolor='white', alpha=0.4, edgecolor='none', boxstyle='round,pad=0.1,rounding_size=.5')
ax.text(ax.get_xlim()[1], md+(ax.get_ylim()[1]-ax.get_ylim()[0])/1000, 'Bias {0:.{dec}f}'.format(md, dec=n_decimales), fontsize=12, color=color_lin,
horizontalalignment='right', verticalalignment='bottom', bbox=cuadroTexto, transform=ax.transData, zorder=2)
ax.text(ax.get_xlim()[1], (md+t_crit*sd)+(ax.get_ylim()[1]-ax.get_ylim()[0])/1000, 'LOA {0:.{dec}f}'.format(md+t_crit*sd, dec=n_decimales), fontsize=10, color=color_lin,
horizontalalignment='right', verticalalignment='bottom', bbox=cuadroTexto, transform=ax.transData)
ax.text(ax.get_xlim()[1], (md-t_crit*sd)+(ax.get_ylim()[1]-ax.get_ylim()[0])/1000, 'LOA {0:.{dec}f}'.format(md-t_crit*sd, dec=n_decimales), fontsize=10, color=color_lin,
horizontalalignment='right', verticalalignment='bottom', bbox=cuadroTexto, transform=ax.transData)
plt.tight_layout()
return(md, t_crit*sd)
#%%
if __name__ == '__main__':
import pandas as pd
#%% Comprobar con los datos de la web https://rpubs.com/Cristina_Gil/B-A_analysis
metodo_A = np.array([1, 5, 10, 20, 50, 40, 50, 60, 70, 80, 90, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000])
metodo_B = np.array([8, 16, 30, 24, 39, 54, 40, 68, 72, 62, 122, 80, 181, 259, 275, 380, 320, 434, 479, 587, 626, 648, 738, 766, 793, 851, 871, 957, 1001, 960])
bland_altman_plot(metodo_A, metodo_B, regr=2, tcrit_Exacto=True)
plt.show()
#%%
#Crea un conjunto de medidas con dos instrumentos. El 2º es como el 1º pero con un error gausiano
np.random.seed(1)
mu1, sigma1 = 0, 10.5 # media y SD del instrumento 1
instr1 = np.random.normal(mu1, sigma1, 10)
mu2, sigma2 = 3.1, 10.1 # # media y SD que se añade al instrumento 1
instr2 = instr1+np.random.normal(mu2, sigma2, 10)
#instr1=pd.DataFrame(instr1)
#instr2=pd.DataFrame(instr2)
#Muestra los datos
plt.plot(instr1, 'bo')
plt.plot(instr2, 'ro')
plt.show()
#y un scatter plot
plt.plot(instr1, instr2, 'ro')
plt.show()
#%%
#Crea el Bland-Altman plot básico
bland_altman_plot(instr1, instr2)
plt.show()
#puede devolver los valores de bias (media de las diferencias) y limits of agreement, y también presentarlos en la gráfica
bias, LOA = bland_altman_plot(instr1, instr2, show_bias_LOA=True)
print('Bias = {:.2f}, LOA ={:.2f}'.format(bias, LOA))
#Se puede controlar el color de los puntos (color), su tamaño (s)
bland_altman_plot(instr1, instr2, color='r', s=80, show_bias_LOA=True)
#Se puede pedir que etiquete cada caso para poder identificarlos
bland_altman_plot(instr1, instr2, etiquetaCasos=True, color='b', show_bias_LOA=True)
#También puede calcular si existe tendendia en los datos. Presenta la R2 y Pearson
bland_altman_plot(instr1, instr2, regr=1, color='b', show_bias_LOA=True)
#para poder controlar el aspecto de los ejes, etiquetas, etc. incluirlo en una figura
fig, ax = plt.subplots()
bland_altman_plot(instr1, instr2, etiquetaCasos=False, ax=ax, color='k', s=40)
plt.title('Bland-Altman plot')
ax.set_ylabel('Bias')
ax.set_xlabel('Media entre instrumento 1 e instrumento 2')
ax.set_xlim([-30,30])
ax.set_ylim([-30,30])
plt.show()
#También puede calcular si existe tendendia en los datos. El número pasado en regs se utiliza como exponente de la línea de regresión utilizada.
#Presenta Pearson y p de la correlación lineal y la R2 de la regresión del polinomio con exponente indicado
bland_altman_plot(instr1, instr2, regr=1, color='b', show_bias_LOA=True)
bland_altman_plot(instr1, instr2, regr=2, color='b', show_bias_LOA=True)
#%%
np.random.seed(9999)
m1 = np.random.random(500)
m2 = np.random.random(500)
mediadif, LOA= bland_altman_plot(m1, m2, lw=0, color='k', s=40, show_text='all', regr=1, color_lin='grey')
plt.title('Bland-Altman Plot')
plt.show()
#%%
Datos = pd.read_excel(r"F:\Programacion\Python\Mios\Estadistica\EjemploDatos-Bland-AltmanPlot.xlsx", 'Hoja1', index_col=None, na_values=[" "])
bland_altman_plot(Datos.iloc[:,0], Datos.iloc[:,1], lw=0, color='k', s=40, show_text='bias_loa')
plt.title('Bland-Altman Plot')
plt.show()
#%%###############################################
Data1 = np.array([10.3, 5.1, 3.2, 19.1, 8.1, 11.7, 7.1, 13.9, 4.0, 20.1, 27.5, 6.4, 30.1, 13.0, 10.0,
16.8, 17.3, 3.0, 25.6, 19.3, 15.0, 27.3, 14.2, 13.0,14.4, 22.1, 19.0, 18.0, 13.0, 25.6,
18.4, 12.6, 25.5, 15.7, 20.2, 16.5, 19.3, 10.0, 18.8, 24.0, 22.8])
#Create an array with the second data set of group 1
Data2 = np.array([8.9, 4.0, 8.1, 21.2, 8.1, 12.0, 4.5, 13.9, 4.0, 20.1, 27.5, 6.4, 40.3, 13.0, 10.0, 32.2,
17.1, 9.4, 25.2, 18.8, 15.0, 27.3, 21.3, 13.0, 14.4,22.1, 17.9, 3.0, 13.0, 19.0, 18.4,
12.6, 25.5, 15.7, 21.2, 16.5, 19.3, 10.0, 30.8, 9.0, 22.8])
Data1=pd.Series(Data1)
Data2=pd.Series(Data2)
bland_altman_plot(Data2, Data1, unidad='cm')
plt.title('Bland-Altman Plot')
plt.show()
#%%###############################################
#cuando las muestras vienen de la misma población normal, cabe esperar que se salga de los límites 5 de cada 100
mu1, sigma1 = 0, 0.9 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 100)
mu2, sigma2 = 0, 0.9 # mean and standard deviation
s2 = np.random.normal(mu2, sigma2, 100)
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, ax=ax, lw=0, color='k', s=40)
plt.title('Dos muestras normales')
plt.show()
#%%###############################################
#Cuando son proporcionales sale una línea con pendiente
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 1000)
s2 = s1*1.5
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, ax=ax, lw=0, color='k', s=40, regr=1, show_text='publication')
plt.title('proporcionales')
plt.show()
#%%###############################################
#Cuando son exponenciales sale una curva rara
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 100)
s2 = s1**3
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, ax=ax, lw=0, color='k', s=40, regr=3, show_text='all')
plt.title('proporcionales')
plt.show()
#%%###############################################
#Cuando son iguales sale una línea horizontal=0
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 100)
s2 = s1
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, ax=ax, lw=0, color='k', s=40)
plt.title('iguales')
plt.show()
#%%###############################################
#Cuando son iguales + una diferencia aleatoria normal sale una nube que tiende hacia abajo
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 100)
mu2, sigma2 = 0.1, 0.9 # mean and standard deviation
s2 = s1+np.random.normal(mu2, sigma2, 100)
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
kargs={'lw':0, 'color':'b','s':40, 'alpha':0.5}
#bland_altman_plot(s1, s2, etiquetaCasos=True, ax=ax, **kargs, idioma='esp')#en python 2 no funciona
bland_altman_plot(s1, s2, etiquetaCasos=True, ax=ax, lw=0, color='b', regr=1, alpha=0.5, s=40)
plt.title('iguales + una diferencia aleatoria normal')
plt.show()
################################################
#%%
#Cuando son iguales + una diferencia constante sale una línea horizontal= a la diferencia cte.
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 100)
mu2, sigma2 = 0.1, 0.9 # mean and standard deviation
s2 = s1+10
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, etiquetaCasos=True, ax=ax, lw=0, color='k', s=40)
plt.title('iguales + una diferencia constante')
plt.show()
################################################
#%%
#Prueba cuando hay algún nan entre datos
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 10)
mu2, sigma2 = 0.1, 0.9 # mean and standard deviation
s2 = s1+np.random.normal(mu2, sigma2, 10)
s1=pd.Series(s1)
s2=pd.Series(s2)
s1[4]=np.nan
s2[4]=np.nan
#s2[6]=np.nan
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, etiquetaCasos=True, ax=ax, lw=0, color='k', s=40)
plt.title('iguales + una diferencia aleatoria normal')
plt.show()
################################################
#%% Calcula con correlación
#Cuando son iguales + una diferencia aleatoria normal sale una nube que tiende hacia abajo
#np.random.seed(1)
mu1, sigma1 = 0, 10.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 50)
mu2, sigma2 = 3.1, 10.1 # mean and standard deviation
s2 = s1+np.random.normal(mu2, sigma2, 50)
s1=pd.Series(s1)
s2=pd.Series(s2)
fig, ax = plt.subplots(1, 1, figsize=(4,3), dpi=150)
bland_altman_plot(s1, s2, etiquetaCasos=False, ax=ax, lw=0, color='k', regr=1, s=40)
plt.title('iguales + una diferencia aleatoria normal')
plt.show()
#%% con varios subplots
mu1, sigma1 = 0, 10.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 50)
mu2, sigma2 = 3.1, 10.1 # mean and standard deviation
s2 = s1+np.random.normal(mu2, sigma2, 50)
s1=pd.Series(s1)
s2=pd.Series(s2)
mu12, sigma12 = 10, 25.5 # mean and standard deviation
s12 = np.random.normal(mu12, sigma12, 30)
mu22, sigma22 = 15.1, 20.1 # mean and standard deviation
s22 = s12+np.random.normal(mu22, sigma22, 30)
s12=pd.Series(s12)
s22=pd.Series(s22)
fig, ax = plt.subplots(1, 2, figsize=(10,5), dpi=150) #, constrained_layout=True
bland_altman_plot(s1, s2, etiquetaCasos=False, ax=ax[0], lw=0, color='k', regr=1, s=40, show_text='bias_loa', n_decimales=3)
#plt.xlim(0.244, 0.252)
ax[0].set_title('Gráfica1')
bland_altman_plot(s12, s22, etiquetaCasos=False, ax=ax[1], lw=0, color='k', regr=1, s=40)
#plt.xlim(0.244, 0.252)
ax[1].set_title('Gráfica2')
#%% Con color uniforme para cada conjunto de datos
fig, ax = plt.subplots(figsize=(5,5), dpi=150) #, constrained_layout=True
bland_altman_plot(s1, s2, etiquetaCasos=False, ax=ax, regr=1, s=70, lw=0, color='b', alpha=0.6, color_lin='b', show_text='bias_loa')
#plt.xlim(0.244, 0.252)
bland_altman_plot(s12, s22, etiquetaCasos=False, ax=ax, regr=1, s=70, lw=0, color='r', alpha=0.6, color_lin='r', show_text='bias_loa')
#plt.xlim(0.244, 0.252)
ax.set_title('Gráfica2')
# %%
```
#### File: BiomecanicaPython/Funciones/effectSize.py
```python
from __future__ import division, print_function #division #Ensure division returns float
# =============================================================================
# TAMAÑO EFECTO
# =============================================================================
import numpy as np
from scipy import stats
import pandas as pd
__author__ = '<NAME>'
__version__ = 'v.1.0.0'
__date__ = '15/05/2021'
"""
Modificaciones:
15/05/2021, v1.0.0
- Eliminado error por solid_capstyle='round' en la instrucción de crear las barras.
"""
# =============================================================================
# Funciones de ayuda, para quitar el cero inicial
# =============================================================================
def _remove_leading_zero(value, string):
if 1 > value > -1:
string = string.replace('0', '', 1)
return string
class MyFloat(float):
def __format__(self, format_string):
if format_string.endswith('z'): # 'fz' is format sting for floats without leading the zero
format_string = format_string[:-1]
remove_leading_zero = True
else:
remove_leading_zero = False
string = super(MyFloat, self).__format__(format_string)
return _remove_leading_zero(self, string) if remove_leading_zero else string
# `_remove_leading_zero` function is same as in the first example
#Ejemplos
#print('some text {:.3f} some more text'.format(MyFloat(.4444)))
#print('some text {:.3fz} some more text'.format(MyFloat(.4444)))
# =============================================================================
#Comprobada con el archivo Excel F:\Biomec\HerramientasCalculo\LimitesConfianza.xlsx
def Hedges_g(group1, group2, grouplabels=['Grupo1', 'Grupo2'], varlabels=[], muestras_dep=False, pctcl = 95, sdpooled= True, tipose='Nakagawa', leadingZero=False, decVariable=2, decES=3, decP=3, borders=False, grid=False, numbers=False, show=False, ax=None):
"""Calcula la g de Hedges.
Parameters
----------
group1 : 1D array_like data or 2D pandas DataFrame. Este es el de referencia, si es mayor, la diferencia es positiva.
group2 : 1D array_like data or 2D pandas DataFrame.
grouplabels : etiquetas de los dos grupos
varlabels : etiquetas de las variables o columnas. Si no se ponen las etiquetas son numeros correlativos
muestras_dep : si las muestras son dependientes (medidas repetidas) = True.
si las muestras son independientes = False (por defecto).
pctcl : porcentaje para los límites de confianza (por defecto 95%).
sdpooled : True calcula g con la desviación típica agrupada; con False utiliza la del primer grupo. Con comparación para muestras independientes mejor usar la pooled (por defecto).
tipose : tipo de Standard Error para calcular los CL: 'Nakagawa', según Nakagawa y Cuthill (2007) (por defecto)
'Otro' según curso metaanalisis (sin referencia).
leadingZero: False quita el cero inicial en p y ES
decVariable : número de decimales para la variable. Por defecto 2.
decP : número de decimales para la P. Por defecto 3.
decES : número de decimales para el ES. Por defecto 3.
borders: True muestra los bordes del grafico y los ticks. Por defecto False.
grid: True muestra la línea horizontal de cada variable (útil cualdo hay muchas variables). Por defecto False.
numbers: True incluye como etiqueta encima de cada barra su valor g y limites. Por defecto False.
show: True muestra la grafica o no. Por defecto False.
ax : axis para incluir el gráficos en una figura hecha de antemano
Returns
-------
DataFrame con los datos formateados para imprimir en Excel.
tabla con los datos x+-SD, p, g, g_inf y g_sup.
g : tamaño del efecto (Hedge's g).
g_inf : X% límite inferior.
g_sup : X% límite superior.
p : probabilidad del factor p del t test
Example
-------
Hedges_g(dfx, dfy)
ESCI, tabla=Hedges_g(dfx, dfy, grouplabels=['G1', 'G2'], varlabels=dfx.columns, muestras_dep=0, pctcl = 95, sdpooled= True, tipose='Nakagawa', decVariable=3, decP=3, decES=3, numbers=True, show=True, ax=ax)
"""
if len(group1.shape) == 1:
numCols=1
else:
numCols=group1.shape[1]
ESCL = np.full((numCols, 3), np.nan)
#Para tabla pandas
var1=[]
var2=[]
p=[]
es = []#np.full(len(ESCL[:,0]), np.nan)
dfTablaResumen=pd.DataFrame({'':varlabels, })
for col in range(numCols):
# x=np.array(var1[var1.iloc[:,col].name])
# y=np.array(var2[var2.iloc[:,col].name])
"""
Probar sustituir las siguientes lineas con esto:
if len(data.shape) == 1:
data = data.reshape(data.shape[0], 1)
"""
if len(group1.shape) == 1:
x=group1
y=group2
else:
x=group1[group1.iloc[:,col].name]
y=group2[group2.iloc[:,col].name]
#determina la n de cada variable, no vale len() por si hay valores NaN
nx= int(x.notnull().sum())#pd.notnull(x).count()
ny= int(y.notnull().sum())#pd.notnull(y).count()
pct = (100-float(100-pctcl)/2)/100
if muestras_dep == 1: #muestras dependientes
if sdpooled == True:
S=np.sqrt((((nx-1) * x.std()**2) + ((ny-1) * y.std()**2)) / (nx+ny-2.0))
else:
S= x.std() #np.std(x, ddof=1)
c_n_1 = 1-(3/float(4*nx-5)) #factor de corrección
g = c_n_1*((y.mean() - x.mean()) / S)
t_crit = stats.t.ppf(q=pct, df=nx-1)
if tipose=='Nakagawa':
SE = np.sqrt((2*(1-stats.pearsonr(x,y)[0])/nx) + g**2/(2*(nx-1)))
else:
SE = np.sqrt(((nx-1) / float(nx*(nx-3))) * (1+nx*g**2)-(g**2/c_n_1**2))
g_inf = g - t_crit * SE
g_sup = g + t_crit * SE
# ttestrel=[x,y]
# dfTtestrel=pd.concat(ttestrel, axis=1).dropna()
# t, p_val = stats.ttest_rel(dfTtestrel.iloc[:,0], dfTtestrel.iloc[:,1], axis=0, nan_policy='omit')
t, p_val = stats.ttest_rel(x, y, axis=0, nan_policy='omit')
else: #muestras independientes
if sdpooled == True:
S=np.sqrt((((nx-1) * x.std()**2) + ((ny-1) * y.std()**2)) / float(nx+ny-2))
else:
S= x.std() #np.std(x, ddof=1)
c_m = 1-(3/float(4*(nx + ny)-9)) #factor de corrección
g = c_m*((x.mean() - y.mean()) / S)
t_crit=stats.t.ppf(q=pct, df=nx+ny-2)
#t_crit=stats.t.interval(alpha=0.95, df=100-1, loc=0, scale=1) * np.sqrt(1+1/float(len(x)+len(y)))
# stats.t.interval(alpha=0.95, df=100-1, loc=0, scale=1) * np.sqrt(1+1/100)
# stats.t.ppf(q=0.975, df=100-1)
if tipose=='Nakagawa':
SE = np.sqrt(((nx+ny) / float(nx*ny)) + (g**2/float(2*(nx+ny-2))))
else:
SE = np.sqrt(((nx+ny) / float(nx*ny)) + (g**2/float(2*(nx+ny))))
#intervalo = t_crit*(np.sqrt(((len(x)+len(y)) / float(len(x)*len(y))) + (((np.mean(x)-np.mean(y)) / float(np.sqrt((((len(x)-1)*np.std(x, ddof=1)**2) + ((len(y)-1)*np.std(y, ddof=1)**2)) / float(len(x)+len(y)-2))))**2 / float(2*(len(x)+len(y))))))
g_inf = g - t_crit * SE
g_sup = g + t_crit * SE
########
#comprueba igualdad varianzas
w, p_lev=stats.levene(x.dropna(), y.dropna()) #CUANDO HAY ALGÚN NAN SALE NAN REVISAR
igualVarianzas=True #por defecto
if p_lev<0.05:
igualVarianzas=False
########
t, p_val = stats.ttest_ind(x, y, equal_var=igualVarianzas, nan_policy='omit')#CUANDO HAY ALGÚN NAN SALE NAN REVISAR
ESCL[col] = [g, g_inf, g_sup]
var1.append(('{0:.{dec}f} {1} {2:.{dec}f}').format(x.mean(), r'±', x.std(), dec=decVariable))#±
var2.append('{0:.{dec}f} {1} {2:.{dec}f}'.format(y.mean(), r'±', y.std(), dec=decVariable))
if(leadingZero==False):
p.append('{:.{dec}fz}'.format(MyFloat(p_val), dec=decP))
es.append('{:.{dec}fz} [{:.{dec}fz}, {:.{dec}fz}]'.format(MyFloat(ESCL[col,0]), MyFloat(ESCL[col,1]), MyFloat(ESCL[col,2]), dec=decES))
else:
p.append('{:.{dec}f}'.format(p_val, dec=decP))
es.append('{:.{dec}f} [{:.{dec}f}, {:.{dec}f}]'.format(ESCL[col,0], ESCL[col,1], ESCL[col,2], dec=decES))
###########################################
#Crea la tabla con los resultados en Pandas
###########################################
"""
#SEGUIR AQUÍIIIIIIIIIII
for i in range(len(ESCL[:,0])):
if numCols>1:#PROBAR METER TODO ESTO EN LA PARTE ANTERIOR
var1.append(('{0:.2f} {1} {2:.2f}').format(group1.iloc[:,i].mean(), '±', group1.iloc[:,i].std()))
var2.append('%.2f ± %.2f'%(group2.iloc[:,i].mean(), group2.iloc[:,i].std()))
else: #cuando solo hay una variable
var1.append(('{0:.2f} {1} {2:.2f}').format(x.mean(), '±', x.std()))
#var1.append(('%.2f '+u'±'+' %.2f').format(x.mean(), x.std()))
var2.append('%.2f ± %.2f'%(group2.mean(), y.std()))
#prueba t muestras relacionadas
#para muestras independientes ttest_ind(a, b[, axis, equal_var])
if muestras_dep==1:
t, p_val = stats.ttest_rel(x, y, axis=0)
else: #para muestras indep
########
#comprueba igualdad varianzas
w, p_lev=stats.levene(group1.iloc[:,i], group2.iloc[:,i])
igualVarianzas=True #por defecto
if p_lev<0.05:
igualVarianzas=False
########
t, p_val = stats.ttest_ind(group1.iloc[:,i], group2.iloc[:,i], equal_var=igualVarianzas)
p.append('%.3f'%p_val)
es.append('%.2f [%.2f, %.2f]'%(ESCL[i,0], ESCL[i,1], ESCL[i,2]))
"""
dfTablaResumen[grouplabels[0]] = var1
dfTablaResumen[grouplabels[1]] = var2
dfTablaResumen['p'] = p
dfTablaResumen['ES [95% CI]'] = es
dfTablaResumen.reindex(columns = ['', 'Var1', 'Var2', 'p' 'ES [95% CI]'])
#transforma el mas menos para que se pueda excribir en Excel
dfTablaResumen=dfTablaResumen.replace({'±': u'±'}, regex=True)
###########################################
if show:
_plot(ESCL, varlabels, decES, borders, numbers, grid, ax)
return ESCL, dfTablaResumen
def cohen_d(x,y):
from numpy import mean, std # version >= 1.7.1 && <= 1.9.1
from math import sqrt
return (mean(x) - mean(y)) / sqrt((std(x, ddof=1) ** 2 + std(y, ddof=1) ** 2) / 2.0)
def _plot(ESCL, varlabels, decES, borders, numbers, grid, axx):
"""Grafica de tamaños efecto."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
if axx is None:
fig, ax = plt.subplots(1, 1, figsize=(6, len(ESCL)*0.5))
else: ax=axx
#plt.rcParams.update(plt.rcParamsDefault)
#fig.subplots_adjust(top=.9, bottom=0.1, left=0.52, right=0.99)
out = ESCL[:, 1]*ESCL[:, 2] > 0 # CIs that don't contain the true mean
ind = np.arange(0, len(ESCL))
ind = ind[::-1]#les da la vuelta para que los ponga del primero al último
ax.axvline(x=0, color=[0, 0, 0], zorder=0) #linea vertical en el cero
# ax.spines['left'].set_position(('data', 1))
#ax.plot([ESCL[np.logical_not(out), 1], ESCL[np.logical_not(out), 2]], [ind[np.logical_not(out)], ind[np.logical_not(out)]], color=[0.4, 0.4, 0.4, 1], marker='', ms=10, linewidth=2) #barra solo para los no significativos
# ax.plot([ESCL[out, 1], ESCL[out, 2]], [ind[out], ind[out]], color=[1, 0, 0, 1], marker='', ms=10, linewidth=2) #barra solo para los significativos
# ax.plot(ESCL[:,0], ind, color=[0, 1, 0, 1], marker='.', ms=10, linestyle='')#marcador para el valor g
# ax.errorbar(ESCL[:,0], ind, xerr= [ESCL[:,0]-ESCL[:, 1], ESCL[:, 2]-ESCL[:,0]],
# fmt='o', ms=7, color='b', ecolor='r', capthick=2)
#barra solo para los no significativos
plotline, caps, barlinecols = ax.errorbar(ESCL[np.logical_not(out),0], ind[np.logical_not(out)], xerr= [ESCL[np.logical_not(out),0]-ESCL[np.logical_not(out), 1], ESCL[np.logical_not(out), 2]-ESCL[np.logical_not(out),0]],
fmt='o', ms=5, color='0.6', elinewidth=1.5, capsize=3, capthick=2, zorder=2)
for cap in caps: #como no funciona el solid_capstyle='round', hace los caps redondeados uno a uno
cap._marker._capstyle = 'round'
#barra solo para los significativos
plotline, caps, barlinecols = ax.errorbar(ESCL[out,0], ind[out], xerr= [ESCL[out,0]-ESCL[out, 1], ESCL[out, 2]-ESCL[out,0]],
fmt='o', ms=6, color='0', elinewidth=1.5, capsize=3, capthick=2, zorder=2)
for cap in caps:
cap._marker._capstyle = 'round'
#ax.set_xlim(-2, 2)
#Ajusta el eje vertical para dejar un poco de espacio por encima y por debajo
ax.set_ylim(-0.5, len(ESCL)-0.5)
if numbers==True:
for i in range(len(ESCL)):
#ax.annotate(str(ESCL[:,0]), xy=([1,1],[1.3,2.5]), zorder=10, ha='center', va='bottom')
if np.isnan(ESCL[i,0]):
plt.text(0.0, ind[i]+.25, 'nan',
ha = 'center',va='bottom', bbox=dict(facecolor='white', alpha=0.9, edgecolor='none', boxstyle='round,pad=0.3'+',rounding_size=.5'), size = 10, zorder=1)
else:
plt.text(ESCL[i,0], ind[i]+.25, '{0:.{dec}f} [{1:.{dec}f}, {2:.{dec}f}]'.format(ESCL[i,0],ESCL[i,1],ESCL[i,2], dec=decES),
ha = 'center',va='bottom', bbox=dict(facecolor='white', alpha=0.9, edgecolor='none', boxstyle='round,pad=0.3'+',rounding_size=.5'), size = 10, zorder=1)
if borders==False:
#quita los ticks de la derecha e izquierda
ax.tick_params(left=False)#quita los ticks del lado izquierdo #labelbottom='off', bottom=False, labelleft='off',
#quita las líneas de los bordes excepto la de abajo
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
#pinta líneas horizontales
# if grid:
# ax.grid(b=None, axis='y') #el vertical siempre desactivado
ax.yaxis.grid(grid)
ax.set_xlabel('Effect size', fontsize=11)
if(len(varlabels)>0): #escribe las etiquetas en el eje vertical
#plt.yticks(ind, varlabels, fontsize=12, ha = 'right') #ha es akineación horizontal; rotation='horizontal',
ax.set_yticks(ind)
ax.set_yticklabels(varlabels, fontsize=12, ha = 'right')
plt.xticks(fontsize=12)
#plt.margins(.1)
plt.tight_layout(rect=[0,0,1,0.95])
if axx is None:
plt.show()
# =============================================================================
# %% Pruebas
# =============================================================================
if __name__ == '__main__':
help(Hedges_g)
"""
x = np.vstack((x, x+np.random.rand(1)/5, x+np.random.rand(1)/2)) # simulate two instants (two rows)
y = np.vstack((y, y+np.random.rand(1)/5, y+np.random.rand(1)/2))"""
"""
mu1, sigma1 = 0, 0.5 # mean and standard deviation
s1 = np.random.normal(mu1, sigma1, 500)
mu2, sigma2 = 0.7, 0.3 # mean and standard deviation
s2 = np.random.normal(mu2, sigma2, 500)
mu3, sigma3 = 1.2, 0.7 # mean and standard deviation
s3 = np.random.normal(mu3, sigma3, 500)
x = np.vstack((s1, s2, s3))
y = x+np.random.rand(1)/5
dfx=pd.DataFrame(x, columns=['var1', 'var2', 'var3'])
dfy=pd.DataFrame(y, columns=['var1', 'var2', 'var3'])
"""
import pandas as pd
#nomVars=[]
numVars = 5
numDat = 50
# ###############################################
# #crea medias y sd aleatorias
for i in range(numVars):
mux=2 + np.random.rand(numVars)*10
muy=mux + np.random.rand(numVars)*1.1
sdx=np.random.rand(numVars)
sdy=sdx + np.random.rand(numVars)/4.2
x = np.random.normal(mux, sdx, (numDat, numVars))
y = np.random.normal(muy, sdy, (numDat, numVars))
# x = sdx * np.random.randn(numDat, numVars) + mux
# y = sdy * np.random.randn(numDat, numVars) + muy
nomVars= ["Var{0}".format(nom) for nom in range(numVars)]
dfx=pd.DataFrame(x, columns=nomVars)
dfy=pd.DataFrame(y, columns=nomVars)
dfx.hist()
dfy.hist()
# ###############################################
import matplotlib.pyplot as plt
plt.rcParams.update(plt.rcParamsDefault) #para el fondo blanco, etc
plt.figure()
plt.subplot()
#plt.plot(xx[:,0], 'bo')
#plt.plot(xx[:,1], 'ro')
plt.plot(dfx, 'bo', label='x')
plt.plot(dfy, 'r^', label='y')
#plt.legend(loc='best')
plt.title('todas las variables')
plt.show()
plt.figure()
plt.subplot()
#plt.plot(xx[:,0], 'bo')
#plt.plot(xx[:,1], 'ro')
plt.plot(dfx, dfy, 'o')
plt.title('por grupos')
plt.show()
print("Hedges g muestras relacionadas (medidas repetidas) [g_inf, g_sup]: ", Hedges_g(dfx, dfy, muestras_dep=1))
Hedges_g(dfx, dfy, muestras_dep=True, borders=True, show=True)
Hedges_g(dfx, dfy, varlabels=dfx.columns, muestras_dep=True, show=True)
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.55))
fig.suptitle("Hedges' g effect size", fontsize=14, fontweight='bold', y=1.06)
ax.set_title('(independent samples)', y=1.03)
Hedges_g(dfx, dfy, varlabels=dfx.columns, muestras_dep=False, numbers=True, ax=ax, show=True)
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.85))
fig.suptitle("Hedges' g effect size", fontsize=14, fontweight='bold', y=1.06)
ax.set_title('(related samples)', y=1.03)
ESCI, tabla=Hedges_g(dfx, dfy, varlabels=dfx.columns, muestras_dep=False, numbers=True, show=True, grid=True, ax=ax)
print(tabla)
ESCI, tabla = Hedges_g(dfx, dfy, grouplabels=['G1', 'G2'], varlabels=dfx.columns, muestras_dep=0, leadingZero=True, numbers=True, show=True)
print(tabla)
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.75))
fig.suptitle("Hedges' g effect size", fontsize=14, fontweight='bold', y=1.06)
ax.set_title('(related samples)', y=1.03)
ESCI, tabla=Hedges_g(dfx.iloc[:,0:1], dfy.iloc[:,0:1], decVariable=3, decP=3, decES=3, varlabels=dfx.columns[0:1], muestras_dep=False, numbers=True, show=True, ax=ax)
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.75))
fig.suptitle("Hedges' g effect size", fontsize=14, fontweight='bold', y=1.06)
ax.set_title('(related samples)', y=1.03)
ESCI, tabla=Hedges_g(dfx.iloc[:,0:1], dfy.iloc[:,0:1], decVariable=3, decP=3, decES=3, varlabels=dfx.columns[0:1], muestras_dep=False, grid=True, numbers=True, show=True, ax=ax)
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.75))
ESCI, tabla = Hedges_g(dfx, dfy, grouplabels=['G1', 'G2'], muestras_dep=False, leadingZero=True, numbers=True, show=True, ax=ax)
print(tabla)
#%%
#para comprobar con nans
x=np.array([100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0])
y=np.array([100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0])
dfx=pd.DataFrame(x, columns=['var1'])
dfy=pd.DataFrame(y, columns=['var2'])
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.25))#apiñado
fig.suptitle("Hedges' g effect size", fontsize=14, fontweight='bold')
ax.set_title('(related samples)', y=1.03)
ESCI, tabla=Hedges_g(dfx, dfy, varlabels=dfx.columns, muestras_dep=False, decVariable=4, decP=2, decES=3, numbers=True, show=True, ax=ax)
#%%
x=np.array([0.892, 0.903, 0.898, 0.878, 0.908, 0.945, 0.926, 0.932, 0.932, 0.879, 0.920, 0.882])
y=np.array([0.889, 0.908, 0.891, 0.864, 0.929, 0.939, 0.934, 0.928, 0.965, 0.872 ,0.918, 0.872])
dfx=pd.DataFrame(x, columns=['Tgs_NQ'])
dfy=pd.DataFrame(y, columns=['Tgs_Q'])
fig, ax = plt.subplots(1, 1, figsize=(4, numVars*0.75), dpi=200)
fig.suptitle("Hedges' g effect size", fontsize=14, fontweight='bold', y=1.06)
ax.set_title('(related samples)', y=1.07)
ESCI, tabla=Hedges_g(dfx, dfy, grouplabels=['G1', 'G2'], varlabels=dfx.columns, muestras_dep=False, pctcl = 95, sdpooled= True, tipose='Nakagawa', decVariable=3, decP=3, decES=3, numbers=True, show=True, ax=ax)
ax.set_xlim(-2,2)
plt.show()
```
#### File: BiomecanicaPython/Funciones/filtrar_Butter.py
```python
from __future__ import division, print_function
import numpy as np
import pandas as pd
import xarray as xr
import scipy.signal
__author__ = '<NAME>'
__version__ = 'v.1.5.0'
__date__ = '08/05/2021'
"""
Modificaciones:
08/05/2021, v1.5.0
- Arraglado con xarray. Si tiene nan los rellena interpolando y después los elimina
- Si no se pide el RMS o hacer la gráfica, no lo calcula.
- Cambiados nombres de argumentos a más pythonics.
"""
# =============================================================================
# %% Función filtrar low o High pass
# =============================================================================
def filtrar_Butter(dat_orig, fr, fc, order=2.0, kind='low', returnRMS=False, show=False, ax=None):
"""
Parameters
----------
dat_orig : array 1D o dataframe de pandas en 2D o xarray.
Datos originales a filtrar.
fr : frecuencia de registro.
fc : frecuencia de corte del filtro.
order : 'agudeza' del filtro.
2 por defecto.
kind : 'low' o 'high'
low por defecto.
returnRMS: True o False
(False por defecto). Devuelve o no el RMS de la diferencia
entre filtrado y original.
show : muestra o no el gráfico con datos originales y filtrados con el RMSE.
ax : ejes de una figura creada previamente.
Returns
-------
filtData : array de datos filtrados.
RMS: root mean square de la diferencia entre los datos originales y los filtrados.
Notes
-----
Describir filtro de 2º orden y 2 pasadas como "double 2nd order Butterworth filter"
(<NAME>) http://biomch-l.isbweb.org/threads/26625-Matlab-Code-for-EMG-processing-(negative-deflection-after-normalization!!!)?p=32073#post32073
Examples
--------
>>> import numpy as np
>>> from filtrar_Butter import filtrar_Butter
>>> y = np.cumsum(np.random.randn(1000))
>>> fy = filtrar_Butter(y, fr=1000, fc=10, order=2, show=True)
>>>
>>> dfCaminos = pd.DataFrame((np.random.random([100, 4])-0.5).cumsum(axis=0), columns=['A','B','C','D'])
>>> dfCaminosFilt, RMS = filtrar_Butter(dfCaminos, 1000, 50, 2, show=True, returnRMS=True)
"""
RMS=[]
#orden = 2 #orden 2 para que al hacer el doble paso sea de 4th orden
passes = 2.0 #nº de pasadas del filtro adelante y atrás
#fc = 15
Cf = (2**(1/passes)-1)**(1/(2*order)) #correction factor. Para 2nd order = 0.802
Wn = 2*fc/fr/Cf
b, a = scipy.signal.butter(order, Wn, btype = kind)
if isinstance(dat_orig, pd.DataFrame): #Si los datos son pandas dataframe
DatFilt=pd.DataFrame()
for i in range(dat_orig.shape[1]):
DatFilt[dat_orig.columns[i]] = scipy.signal.filtfilt(b, a, dat_orig.iloc[:, i])
DatFilt.index=dat_orig.index #esto es necesario por si se pasa un slice del dataframe
if returnRMS or show==True:
RMS=pd.DataFrame()
for i in range(dat_orig.shape[1]):
RMS.at[0, dat_orig.columns[i]] = np.linalg.norm(DatFilt.iloc[:,i].values-dat_orig.iloc[:,i].values) / np.sqrt(len(dat_orig.iloc[:,i]))
elif isinstance(dat_orig, pd.Series):
DatFilt = pd.Series(scipy.signal.filtfilt(b, a, dat_orig), index=dat_orig.index, name=dat_orig.name)
if returnRMS or show==True:
RMS = np.linalg.norm(DatFilt-dat_orig) / np.sqrt(len(dat_orig))
elif isinstance(dat_orig, xr.DataArray):
#DatFilt = xr.apply_ufunc(scipy.signal.filtfilt, b, a, dat_orig.dropna(dim='time')) #se asume que hay una dimensión tiempo
DatFilt = xr.apply_ufunc(scipy.signal.filtfilt, b, a, dat_orig.interpolate_na(dim='time', method='linear', fill_value='extrapolate')) #rellena los nan con datos interpolados
DatFilt = DatFilt.where(xr.where(np.isnan(dat_orig), False, True), np.nan) #recupera el nº de datos original rellenando con nan los finales como el original
if returnRMS or show==True:
RMS=pd.DataFrame()
for i in range(dat_orig.shape[0]):
RMS.at[0, i]=np.linalg.norm(DatFilt[i,:]-dat_orig[i,:]) / np.sqrt(len(dat_orig[i,:]))
#xr.apply_ufunc(np.linalg.norm, DatFilt[0,:], dat_orig[0,:])
#pip install xskillscore
#import xskillscore as xs
#RMS = xs.rmse(DatFilt, dat_orig, dim='time')
#Investigar para hacer el RMSE directamente sin necesitar la librería xskillscore
#CON XARRAY NO FUNCIONAN LOS GRÁFICOS
else: #si los datos no son pandas dataframe
DatFilt = scipy.signal.filtfilt(b, a, dat_orig)
if returnRMS or show==True:
RMS = np.linalg.norm(DatFilt-dat_orig) / np.sqrt(len(dat_orig))
if show:
_plot(dat_orig, DatFilt, RMS, fc, ax)
if returnRMS:
return DatFilt, RMS
else:
return DatFilt
# =============================================================================
# =============================================================================
# Presenta la gráfica
# =============================================================================
def _plot(dat_orig, DatFilt, RMS, fc, ax):
import matplotlib.pyplot as plt
bNecesarioCerrarFigura = False
if ax is None:
bNecesarioCerrarFigura = True
fig, ax = plt.subplots(1, 1, figsize=(10, 4))
if isinstance(dat_orig, pd.DataFrame): #Si los datos son pandas dataframe
import seaborn as sns
cmap = sns.color_palette('bright', n_colors=dat_orig.shape[1])
DatFilt.plot(color=cmap, legend=False, ax=ax)
dat_orig.plot(color=cmap, alpha=0.6, linestyle=':', legend=False, ax=ax)
labels=[dat_orig.columns[x]+', RMSE='+'{:.3f}'.format(RMS.iloc[0,x]) for x in range(dat_orig.shape[1])]
plt.legend(labels)
else: #cuando no son dataframe, incluso si son pandas series
ax.plot(dat_orig, 'b:', label='Original')
ax.plot(DatFilt, 'b-', label='Filt (RMSE={:.3f})'.format(RMS))
plt.legend(loc='best')
ax.set_xlabel('Num. datos')
ax.set_ylabel('Variable')
ax.set_title('Filtrado Butterworth {0:3g} Hz'.format(fc))
if bNecesarioCerrarFigura:
plt.show()
# =============================================================================
# =============================================================================
# %% Función filtrar low o High pass
# =============================================================================
def filtrar_Butter_bandpass(dat_orig, fr, fclow, fchigh, order=2.0, show=False, ax=None):
"""
Parameters
----------
dat_orig : array 1D o dataframe de pandas en 2D.
Datos originales a filtrar.
fr : frecuencia de registro.
fclow, fchigh : frecuencias de corte del filtro.
order : 'agudeza' del filtro.
2 por defecto.
show : muestra o no el gráfico con datos originales y filtrados con el RMSE.
ax : ejes de una figura creada previamente.
Returns
-------
filtData : array de datos filtrados.
Notes
-----
Información sobre el filtro Butterworth de bandpass en
https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
Examples
--------
>>> import numpy as np
>>> from filtrar_Butter import filtrar_Butter
>>> y = np.cumsum(np.random.randn(1000))
>>> fy = filtrar_Butter(y, fr=1000, fc=10, order=2, show=True)
>>>
>>> dfCaminos = pd.DataFrame((np.random.random([100, 4])-0.5).cumsum(axis=0), columns=['A','B','C','D'])
>>> dfCaminosFilt, RMS = filtrar_Butter(dfCaminos, 1000, 50, 2, show=True, returnRMS=True)
"""
nyq = 0.5 * fr
low = fclow / nyq
high = fchigh / nyq
b, a = scipy.signal.butter(order, [low, high], btype='band')
if isinstance(dat_orig, pd.DataFrame): #Si los datos son pandas dataframe
DatFilt=pd.DataFrame()
RMS=pd.DataFrame()
for i in range(dat_orig.shape[1]):
DatFilt[dat_orig.columns[i]] = scipy.signal.lfilter(b, a, dat_orig.iloc[:, i])
DatFilt.index=dat_orig.index #esto es necesario por si se pasa un slice del dataframe
elif isinstance(dat_orig, pd.Series):
DatFilt = pd.Series(scipy.signal.lfilter(b, a, dat_orig), index=dat_orig.index, name=dat_orig.name)
else: #si los datos no son pandas dataframe
DatFilt = scipy.signal.lfilter(b, a, dat_orig)
if show:
_plot(dat_orig, DatFilt, RMS, fclow, ax)
return DatFilt
# =============================================================================
# =============================================================================
# %% PRUEBAS
# =============================================================================
if __name__ == '__main__':
np.random.seed(2)
y = np.cumsum(np.random.randn(1000))
fy, rms = filtrar_Butter(y, 1000, 10, 2, show=True, returnRMS=True)
fy2, rms2 = filtrar_Butter(y[100:300], 1000, 10, 2, show=True, returnRMS=True)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(8, 4))
fig.suptitle('Título grande', y=1.03)
fy = filtrar_Butter(y, 1000, 50, 2, show=True, ax=ax)
ax.set_title('Superpone el Título pequeno', y=1.0)
plt.show()
#Con dataframe de varias columnas
num=1000
colNames=['A','B','C','D']
dfCaminos = pd.DataFrame((np.random.random([num, 4])-0.5).cumsum(axis=0), columns=colNames)
dfCaminosFilt = filtrar_Butter(dfCaminos, 1000, 5, 2, show=True)
dfCaminosFilt, RMS = filtrar_Butter(dfCaminos, 1000, 50, 2, show=True, returnRMS=True)
#con pd series
dfCaminosFilt, RMS = filtrar_Butter(dfCaminos.iloc[:,0], 1000, 5, 2, show=True, returnRMS=True)
dfCaminosFilt, RMS = filtrar_Butter(dfCaminos['A'], 1000, 50, 2, show=True, returnRMS=True)
#%%Onda con ruido
t = np.arange(0, 2, 1/1000)
#offset vertical
of=[0,0, 0,0]
#ampitudes
a=[3,0.5, 5,0.3]
#frecuencias
f=[1,60, 3,40]
#phase angle, ángulo al inicio del tiempo
pa=[0,0, 0,0]
ondas = pd.DataFrame(np.array([of[i] + a[i]*np.sin(2*np.pi*f[i]*t + pa[i]) for i in range(len(a))]).T)
Onda=pd.DataFrame({'Onda1':ondas[0]+ondas[1], 'Onda2':ondas[2]+ondas[3]})
dfOndaFilt = filtrar_Butter(Onda, 1000, 10, 2, show=True)
# con cambio de index
dfOndaFiltCacho = filtrar_Butter(Onda[100:300], 1000, 20, 2, show=True)
dfOndaFiltCacho, RMS = filtrar_Butter(Onda.iloc[100:300, 0], 1000, 20, 2, show=True, returnRMS=True)
fig, ax = plt.subplots(figsize=(8, 4))
fy = filtrar_Butter(Onda.iloc[400:600, 0], 1000, 50, 2, show=True, ax=ax)
ax.set_title('(Superpone el Título pequeño)', y=1.0)
plt.suptitle('Título grande', y=1.03)
plt.show()
#%%prueba bandpass
# Filter a noisy signal.
fs = 5000.0
lowcut = 500.0
highcut = 1250.0
T = 0.05
nsamples = T * fs
t = np.linspace(0, T, int(nsamples), endpoint=False)
a = 0.02 #amplitud de la señal
f0 = 600.0 #frecuencia principal a extraer de la señal
x = 0.1 * np.sin(2 * np.pi * 1.2 * np.sqrt(t))
x += 0.01 * np.cos(2 * np.pi * 312 * t + 0.1)
x += a * np.cos(2 * np.pi * f0 * t + .11)
x += 0.03 * np.cos(2 * np.pi * 2000 * t)
xFiltBand= filtrar_Butter_bandpass(x, fs, lowcut, highcut, order=6, show=False, ax=None)
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(t, x,'b--')
ax.plot(t, xFiltBand, 'r')
plt.hlines([-a, a], 0, T, 'r', linestyles='--')
plt.title('Filtro bandpass')
plt.show()
###############################
#%%prueba con xarray
t = np.arange(0, 2, 1/1000)
#offset vertical
of=[0,0, 0,0]
#ampitudes
a=[3,0.5, 5,0.3]
#frecuencias
f=[1,60, 3,40]
#phase angle, ángulo al inicio del tiempo
pa=[0,0, 0,0]
ondas = pd.DataFrame(np.array([of[i] + a[i]*np.sin(2*np.pi*f[i]*t + pa[i]) for i in range(len(a))]).T)
Onda=pd.DataFrame({'Onda1':ondas[0]+ondas[1], 'Onda2':ondas[2]+ondas[3]})
da = xr.DataArray(data=np.array(Onda).T,
dims=['channel', 'time'],
coords={'channel': Onda.columns,
'time': np.arange(0, len(Onda)/1000, 1/1000),
},
)
o = da.isel(channel=-1)
da.plot.line(x='time') #sin filtrar
da.isel(channel=1).plot()
plt.show()
np.linalg.norm(da.isel(channel=1)-da.isel(channel=0)) / np.sqrt(len(da.isel(channel=0)))
o_filt, RMSEda = filtrar_Butter(da, 1000, 10, 2, returnRMS=True, show=False)
da.plot.line(x='time')#sin filtrar
o_filt.plot.line(x='time') #filtrado
plt.show()
#Al compararlo con el pandas sale igual
dfOndaFilt, RMSEdf = filtrar_Butter(Onda, 1000, 10, 2, returnRMS=True, show=True)
#%% Con xarray con varias dimensiones
from pathlib import Path #para gestión de archivos y carpetas
#Carga un archivo con datos aleatorios
ruta_trabajo = Path('F:\Programacion\Python\Mios\TratamientoDatos\BasesDatosCreadas\ArchivosPorFactoresCinematicaFake')
da2 = xr.load_dataset(ruta_trabajo / 'DataArrayPruebas.nc').to_array()
del da2['variable'] #la quita de coordenadas
da2 = da2.squeeze('variable') #la quita de dimensiones
da_filt = filtrar_Butter(da2, fr=100, fc=2, order=2, kind='low')
da2.plot.line(x='time', col='partID', col_wrap=4)
da2.sel(partID='s04').plot.line(x='time')
da_filt.sel(partID='s04').plot.line(x='time')
```
#### File: BiomecanicaPython/Funciones/readViconCsv.py
```python
from __future__ import division, print_function
"""Lee archivos de datos exportados del Vicon Nexus"""
import numpy as np
import pandas as pd
import xarray as xr
#import scipy.signal
__author__ = '<NAME>'
__version__ = 'v.2.2.0'
__date__ = '29/03/2021'
"""
Modificaciones:
29/03/2021, v2.1.1
- Incluido parámetro 'header_format' para que devuelva el encabezado como 'flat' en una sola línea (variable_x, variable_y, ...) o en dos líneas ((variable,x), (variable,y), ...).
28/03/2021, v2.1.1
- Mejorada lectura con Pandas. Ahora puede cargar archivos que empiezan sin datos en las primeras líneas.
21/03/2021, v2.1.0
- Cambiado lector del bloque de archivos por pd.read_csv con número de columnas delimitado a los que carga en las variables (quitando los de velocidad y aceleración)
- Solucionado fallo al leer frecuencia cuando terminaba la línea rellenando con separadores (como al exportar en Excel)
10/01/2021, v2.0.1
- Ajustado para que pueda devolver xArray con Model Outputs
13/12/2020, v2.0.0
- Con el argumento formatoxArray se puede pedir que devuelva los datos en formato xArray
"""
def read_vicon_csv(nombreArchivo, nomBloque='Model Outputs', separador=',', returnFrec=False, formatoxArray=False, header_format='flat'):
"""
Parameters
----------
versión : v2.2.0
nombreArchivo : string
ruta del archivo a abrir
nomBloque : string
tipo de datos a leer en el archivo original.
'Model Outputs', 'Trajectories' o 'Devices'
separador : string
caracter separador de los datos
returnFrec : bool
si es True devuelve un int con la frecuencia de muestreo
formatoxArray : bool
si es true devuelve los datos en formato xArray
header_format : str
'flat': devuelve el encabezado en una línea (por defecto)
otra cosa: devuelve el encabezaco en dos líneas (var y coord)
Returns
-------
data : datos leidos en formato DataFrame de Pandas o DataArray de xArray.
frec: frecuencia de registro de los datos.
Examples
--------
>>> dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
>>> dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
>>> #Con formato dataarray de xArray
>>> daDatos = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', formatoxArray=True)
"""
with open(nombreArchivo, mode='rt') as f:
numLinea=0
#busca etiqueta del inicio del bloque
linea = f.readline()
while nomBloque not in linea:
if linea == '':
raise Exception('No se ha encontrado el encabezado')
numLinea+=1
linea = f.readline()
inicioBloque = numLinea
#Lo que viene detrás de la etiqueta es la frecuencia
linea = f.readline()
frecuencia = int(linea.replace(separador,'')) #quita el separador para los casos en los que el archivo ha sido guardado con Excel (completa línea con separador)
#Carga el nombre de las columnas
#linea = f.readline()
nomColsVar = str(f.readline()[:-1]).split(separador) #nombreVariables
nomCols = str(f.readline()[:-1]).split(separador) #nombre coordenadas X,Y,Z.
#nomCols = [s.lower() for s in nomCols] # Lo fuerza a minúsculas
#busca etiqueta del final del bloque
while linea!='\n':
if linea == '':
raise Exception('No se ha encontrado el final del bloque')
numLinea+=1
#print('Linea '+ str(numLinea))
linea = f.readline()
finBloque = numLinea-1 #quita 1 para descontar la línea vacía
#Cuenta el nº de líneas totales
finArchivo=0
with open(nombreArchivo, mode='rt') as f:
for i in f:
finArchivo+=1
#primero asigna los nombres según el propio archivo
nomVars=['Frame', 'Sub Frame']
for i in range(2,len(nomCols),3):
if "'" not in nomCols[i] and "''" not in nomCols[i]: #elimina las posibles columnas de velocidad y aceleración
nomVars.append(nomColsVar[i].split(':')[1]+'_' + nomCols[i])#X
nomVars.append(nomColsVar[i].split(':')[1]+'_' + nomCols[i+1])#Y
nomVars.append(nomColsVar[i].split(':')[1]+'_' + nomCols[i+2])#Z
# [i for i in nomColsVar if "'" in i]
# nomColsVar = [i for i in nomColsVar if "'" not in i]
#carga todos los datos
#CON GENFROMTXT FALLA SI NO EMPIEZA LA PRIMERA LÍNEA CON DATOS
#provisional= np.genfromtxt(nombreArchivo, skip_header= inicioBloque+5, max_rows=finBloque-inicioBloque-1, delimiter=separador, missing_values='', filling_values=np.nan, invalid_raise=True)
#provisional=provisional[:, :len(nomVars)] #recorta solo hasta las variables
#Convierte los datos en pandas dataframe. Pasa solo los que no son de velocidad o aceleración
#dfReturn = pd.DataFrame(provisional[:, :len(nomVars)], columns=nomVars)
#dfReturn = dfReturn.iloc[:, :len(nomVars)] #se queda solo con las columnas de las variables, quita las de velocidad si las hay
#Con pandas directamente funciona (para evitar error si primera línea no son datos, lee la fina de las unidades y luego la quita)
dfReturn = pd.read_csv(nombreArchivo, delimiter=separador, header=None, skiprows=inicioBloque+4, skipfooter=finArchivo-finBloque-5, usecols=range(len(nomVars)), engine='python')
dfReturn = dfReturn.drop(index=0).reset_index(drop=True).astype(float) #borra la primera fila, que contiene las unidades
#Nombra encabezado
var=['_'.join(s.split('_')[:-1]) for s in nomVars[:len(nomVars)]] #gestiona si la variable tiene separador '_', lo mantiene
coord=[s.split(':')[-1] for s in nomCols[:len(nomVars)]]
dfReturn.columns=pd.MultiIndex.from_tuples(list(zip(*[var,coord])), names=['Variable', 'Coord'])
#dfReturn.columns=[var, coord]
#dfReturn.columns.set_names(names=['Variable', 'Coord'], level=[0,1], inplace=True)
if header_format=='flat':
dfReturn.columns = dfReturn.columns.map('_'.join).str.strip()
# #Elimina las columnas de velocidad y aceleración, si las hay
# borrarColsVA = dfReturn.filter(regex='|'.join(["'", "''"])).columns
# dfReturn = dfReturn.drop(columns=borrarColsVA)
#Si hace falta lo pasa a xArray
if formatoxArray:
daReturn=xr.DataArray()
#transforma los datos en xarray
x=dfReturn.filter(regex='|'.join(['_x','_X'])).to_numpy().T
y=dfReturn.filter(regex='|'.join(['_y','_Y'])).to_numpy().T
z=dfReturn.filter(regex='|'.join(['_z','_Z'])).to_numpy().T
data=np.stack([x,y,z])
#Quita el identificador de la coordenada del final
canales = dfReturn.filter(regex='|'.join(['_x','_X'])).columns.str.rstrip('|'.join(['_x','_X']))
n_frames = x.shape[1]
channels = canales
time = np.arange(start=0, stop=n_frames / frecuencia, step=1 / frecuencia)
coords = {}
coords['axis'] = ['x', 'y', 'z']
coords['channel'] = channels
coords['time'] = time
daReturn=xr.DataArray(
data=data,
dims=('axis', 'channel', 'time'),
coords=coords,
name=nomBloque,
attrs={'Frec':frecuencia}
#**kwargs,
)
if formatoxArray and returnFrec:
return dfReturn, daReturn, frecuencia
elif formatoxArray:
return dfReturn, daReturn
elif returnFrec:
return dfReturn, frecuencia
else:
return dfReturn
# =============================================================================
# %%
# =============================================================================
if __name__ == '__main__':
from pathlib import Path
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Con Models al final
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN_ModeloAlFinal.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Sin fila inicial en blanco
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN_SinFilaBlancoInicial.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Solo bloque modelos
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN_2.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
#Con hueco muy grande al inicio
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconConHuecoInicio_S27_WHT_T2_L01.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
dfDatos['R5Meta_Z'].plot()
#Con formato dataarray de xArray
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos, daDatos = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', formatoxArray=True)
dfDatos['Right_Toe_Z'].plot()
daDatos.sel(channel='Right_Toe', axis='z').plot.line()
dfDatos, daDatos = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs', formatoxArray=True)
dfDatos['AngArtLKnee_x'].plot()
daDatos.sel(channel='AngArtLKnee', axis='x').plot.line()
#Archivo con huecos
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconConHuecos_S01_WHF_T1_L04.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatos, frecuencia = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', returnFrec=True)
dfDatos.plot()
#prueba con encabezado multiindex
ruta_Archivo = r'F:\Programacion\Python\Mios\TratamientoDatos\EjemploViconSinHuecos_01_Carrillo_FIN.csv'
nombreArchivo = Path(ruta_Archivo)
dfDatosFlat = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs')
dfDatosMulti = read_vicon_csv(nombreArchivo, nomBloque='Model Outputs', header_format='multi')
dfDatosFlat[['AngArtLKnee_x','AngArtLKnee_y','AngArtLKnee_z']].plot()
dfDatosMulti['AngArtLKnee'].plot()
dfDatosMulti.loc[:, (slice(None), 'x')].plot() #todas las variables de una misma coordenada
dfDatosFlat = read_vicon_csv(nombreArchivo, nomBloque='Trajectories')
dfDatosMulti = read_vicon_csv(nombreArchivo, nomBloque='Trajectories', header_format='multi')
dfDatosFlat[['Right_Toe_X','Right_Toe_Y','Right_Toe_Z']].plot()
dfDatosMulti['Right_Toe'].plot()
dfDatosMulti.loc[:, (slice(None), 'Z')].plot() #todas las variables de una misma coordenada
``` |
{
"source": "Joselyne97/Books-app",
"score": 3
} |
#### File: Books-app/app/models.py
```python
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email_address= db.Column(db.String(255),unique = True,index = True)
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
fullname=db.Column(db.String(255))
mobile_phone=db.Column(db.Integer())
office_phone=db.Column(db.Integer())
password_secure = db.Column(db.String(255))
book = db.relationship('Book', backref='user', lazy='dynamic')
comment = db.relationship('Comment', backref = 'user', lazy = 'dynamic')
upvotes = db.relationship('Upvote', backref = 'user', lazy = 'dynamic')
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_secure,password)
def __repr__(self):
return f'User {self.username}'
class Book(db.Model):
'''
'''
__tablename__ = 'book'
id = db.Column(db.Integer, primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable = False)
title = db.Column(db.String())
summary = db.Column(db.String(), index = True)
category = db.Column(db.String(255), nullable=False)
poster = db.Column(db.String())
location =db.Column(db.String())
comments = db.relationship('Comment',backref='book',lazy='dynamic')
upvotes = db.relationship('Upvote', backref = 'book', lazy = 'dynamic')
@classmethod
def get_books(cls, id):
books = Pitch.query.order_by(pitch_id=id).desc().all()
return books
def __repr__(self):
return f'Pitch {self.description}'
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer,primary_key=True)
book_id = db.Column(db.Integer, db.ForeignKey('book.id'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable= False)
description = db.Column(db.Text)
def __repr__(self):
return f"Comment : id: {self.id} comment: {self.description}"
class Upvote(db.Model):
__tablename__ = 'upvotes'
id = db.Column(db.Integer,primary_key=True)
upvote = db.Column(db.Integer,default=1)
book_id = db.Column(db.Integer,db.ForeignKey('book.id'))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
def save_upvotes(self):
db.session.add(self)
db.session.commit()
def add_upvotes(cls,id):
upvote_book = Upvote(user = current_user, book_id=id)
upvote_book.save_upvotes()
@classmethod
def get_upvotes(cls,id):
upvote = Upvote.query.filter_by(book_id=id).all()
return upvote
@classmethod
def get_all_upvotes(cls,book_id):
upvotes = Upvote.query.order_by('id').all()
return upvotes
def __repr__(self):
return f'{self.user_id}:{self.book_id}'
``` |
{
"source": "joselynzhao/ATM",
"score": 3
} |
#### File: joselynzhao/ATM/drawer01.py
```python
import os
import codecs
import os.path as osp
import math
import matplotlib.pyplot as plt
import numpy as np
class drawer01():
def __init__(self,dataset = 'DukeMTMC-VideoReID', exp_name = 'atm'):
self.dataset =dataset
self.exp_name = exp_name
self.exp_path = osp.join('logs',self.dataset,self.exp_name)
def __get_time_info(self,file_path,is_baseline=0):
try:
time_file = codecs.open(osp.join(file_path,'time.txt'),'r','utf-8')
except FileNotFoundError:
print('Time.txt in {} is not fond'.format(file_path))
time_info = time_file.readlines()
step_time_list = []
for line in time_info:
line = line.split()
line = [float(i.split(':')[1]) for i in line]
step_time_out = line[1] + line[2] if not is_baseline else line[1]+line[2]+line[3]
step_time_list.append(step_time_out)
return step_time_list
def compare_train_lits_time(self,train_list,unit_size=2,dpi=100):
train_time = []
for train in train_list:
step_time_list = self.__get_time_info(osp.join(self.exp_path,str(train)))
train_time.append(step_time_list)
# 添加baseline
baseline_EF10 = self.__get_time_info(osp.join('logs',self.dataset,'baseline','EF10'),is_baseline=1)
baseline_EF15 = self.__get_time_info(osp.join('logs',self.dataset,'baseline','EF15'),is_baseline=1)
train_time.append(baseline_EF10)
train_time.append(baseline_EF15)
# 下面开始绘图
plt.figure(figsize=(4*unit_size,2*unit_size),dpi=dpi)
for index,train in enumerate(train_time):
step = len(train)
x = np.linspace(1,step,step)
sum_time = sum(train)
plt.plot(x,train,label='{}:{}'.format(train_list[index] if index<len(train_list) else 'baseline',round(sum_time,2)))
plt.xlabel='steps'
plt.ylabel='time(s)'
plt.title = 'time costed of {}'.format(train_list)
plt.legend(loc='best')
plt.savefig(osp.join(self.exp_path,'time_costed_{}'.format(train_list)),bbox_inches='tight')
plt.show()
def generate_formdata_for_group_list(self,group_list):
for group in group_list:
group = str(group)
data_file = osp.join(self.exp_path, group, 'data.txt')
self.__for_generate_formdata(data_file,group,'format_data.txt')
tagper_file = osp.join(self.exp_path, group, 'tagper_data.txt')
self.__for_generate_formdata(tagper_file, group, 'format_tagper.txt')
def compare_reid_and_tagper(self,group_list,compare_item=['mAP','Rank-1','Rank-5','Rank-10','Rank-20','num_selected','label_pre','select_pre'],unit_size =4,dpi=100,hspace=0.3):
for group in group_list:
try:
reid_file = codecs.open(osp.join(self.exp_path,str(group),'format_data.txt'),'r','utf-8')
tagper_file = codecs.open(osp.join(self.exp_path,str(group),'format_tagper.txt'),'r','utf-8')
except FileNotFoundError:
self.generate_formdata_for_group_list([group])
reid_file = codecs.open(osp.join(self.exp_path, str(group), 'format_data.txt'), 'r', 'utf-8')
tagper_file = codecs.open(osp.join(self.exp_path, str(group), 'format_tagper.txt'), 'r', 'utf-8')
reid_data = eval('{' + reid_file.read() + '}')
tagper_data = eval('{' + tagper_file.read() + '}')
compare_list = [reid_data, tagper_data]
out_name ='reidvstagper_{}'.format(group)
self.__draw_compre_for_list(compare_list,compare_item,out_name,unit_size,dpi,hspace,is_reidvstagper=1)
def compare_train_list(self,train_list,is_tagper=0,compare_item=['mAP','Rank-1','Rank-5','Rank-10','Rank-20','num_selected','label_pre','select_pre'],unit_size =4,dpi=100,hspace=0.3):
file_name = 'format_tagper.txt' if is_tagper else 'format_data.txt'
compare_list = []
for train in train_list:
try:
file_info = codecs.open(osp.join(self.exp_path,str(train),file_name),'r','utf-8')
except FileNotFoundError:
self.generate_formdata_for_group_list([train])
file_info = codecs.open(osp.join(self.exp_path, str(train), file_name), 'r', 'utf-8')
file_data = eval('{' + file_info.read() + '}')
compare_list.append(file_data)
baseline10 = codecs.open(osp.join('logs',self.dataset,'baseline','EF10','format_data.txt'),'r','utf-8')
baseline15 = codecs.open(osp.join('logs',self.dataset,'baseline','EF15','format_data.txt'),'r','utf-8')
baseline10 = eval('{' + baseline10.read() + '}')
baseline15 = eval('{' + baseline15.read() + '}')
compare_list.extend([baseline10,baseline15])
out_name = 'comparetrains_tagper_{}'.format(train_list) if is_tagper else 'comparetrains_reid_{}'.format(train_list)
self.__draw_compre_for_list(compare_list,compare_item,out_name,unit_size,dpi,hspace)
def get_top_value_for_all(self,is_tagper=0): #自动捕捉所有的训练
dictionary = os.listdir(self.exp_path)
group_list = [one for one in dictionary if not os.path.isfile(osp.join(self.exp_path,one))]
group_list.sort()
file_name = 'format_data.txt' if not is_tagper else 'format_tagper.txt'
out_name = 'reid_topvalue.txt' if not is_tagper else 'tagper_topvalue.txt'
items = ['step','mAP','Rank-1','Rank-5','Rank-10','Rank-20','label_pre','select_pre']
out_file = codecs.open(osp.join(self.exp_path,out_name), 'w')
out_file.write('group')
for item in items:
out_file.write('\t{}'.format(item))
out_file.write('\n')
for group in group_list:
try:
file_info = codecs.open(osp.join(self.exp_path,str(group),file_name),'r','utf-8')
except FileNotFoundError:
self.generate_formdata_for_group_list([group])
file_info = codecs.open(osp.join(self.exp_path, str(group), file_name), 'r', 'utf-8')
file_data = eval('{' + file_info.read() + '}')
max_data =[max(file_data[item]) for item in items]
out_file.write(group)
for data in max_data:
out_file.write('\t{}'.format(data))
out_file.write('\n')
out_file.close()
def __draw_compre_for_list(self,compare_list,compare_item,out_name,unit_size,dpi,hspace,is_reidvstagper=0):
item_num = len(compare_item)
raw = math.floor(pow(item_num,0.5)) #为了让图尽可能方正
col = math.ceil(item_num/raw)
plt.figure(figsize=(4*unit_size,2*unit_size),dpi=dpi)
plt.subplots_adjust(hspace=hspace)
for i in range(item_num):
plt.subplot(raw,col,i+1)
item = compare_item[i]
max_len = max([train['length'] for train in compare_list]) # 求这个max_len来做什么呢,好像没有用呀
for train in compare_list:
max_point = np.argmax(train[item])
plt.annotate(str(train[item][max_point]),xy=(max_point+1,train[item][max_point]))
x = np.linspace(1,train['length'],train['length'])
plt.plot(x,train[item],label=train['title'],marker='o')
plt.xlabel('steps')
plt.ylabel('value(%)')
plt.title(item)
if i==1:
if is_reidvstagper:
plt.legend(['reid','tagper'],loc='center', bbox_to_anchor=(0.5, 1.2), ncol=len(compare_list)) # 1
else:
plt.legend(loc='center', bbox_to_anchor=(0.5, 1.2), ncol=len(compare_list)) # 1
plt.savefig(osp.join(self.exp_path,out_name),bbox_inches='tight')
plt.show()
def __for_generate_formdata(self,data_file,group,out_name):
file = codecs.open(data_file,'r','utf-8')
save_path = osp.join(self.exp_path,group,out_name)
format_file = codecs.open(save_path,'w')
datas = file.readlines()
for i in range(len(datas)):
datas[i] = datas[i].strip().split(' ')
datas[i] =[k.split(':')[-1].strip('%') for k in datas[i]] #只取数值
# datas = np.aray(datas)
print(datas)
name_list = ['step','mAP','Rank-1','Rank-5','Rank-10','Rank-20','num_selected','label_pre','select_pre']
format_file.write("\"length\":{},".format(len(datas)))
format_file.write("\"title\":\"{}\"".format(self.exp_name + '_' + group))
for i in range(len(name_list)):
data = [float(datas[k][i]) for k in range(len(datas))]
format_file.write(",\"{}\":{}".format(name_list[i],data))
if __name__ =='__main__':
drawer = drawer01(exp_name='atm')
# drawer.init()
# drawer.generate_formdata_for_group_list([0,1,2,3])
# drawer.compare_reid_and_tagper([4,5])
# drawer.compare_train_list([1,4])
# drawer.get_top_value_for_all(is_tagper=0)
drawer.compare_train_lits_time([0,1,2,3,4,5])
``` |
{
"source": "joselynzhao/Cross-Study-in-Neuroscience",
"score": 3
} |
#### File: joselynzhao/Cross-Study-in-Neuroscience/bandpower02.py
```python
import numpy as np
data = np.loadtxt('data.txt')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1.2)
# Define sampling frequency and time vector
sf = 100. #采样频率
def bandpower(data, sf, band, window_sec=None, relative=False):
"""Compute the average power of the signal x in a specific frequency band.
Parameters
----------
data : 1d-array
Input signal in the time-domain.
sf : float
Sampling frequency of the data.
band : list
Lower and upper frequencies of the band of interest.
window_sec : float
Length of each window in seconds.
If None, window_sec = (1 / min(band)) * 2
relative : boolean
If True, return the relative power (= divided by the total power of the signal).
If False (default), return the absolute power.
Return
------
bp : float
Absolute or relative band power.
"""
from scipy.signal import welch
from scipy.integrate import simps
band = np.asarray(band)
low, high = band
# Define window length
if window_sec is not None:
nperseg = window_sec * sf
else:
nperseg = (2 / low) * sf
# Compute the modified periodogram (Welch)
freqs, psd = welch(data, sf, nperseg=nperseg)
# Frequency resolution
freq_res = freqs[1] - freqs[0]
# Find closest indices of band in frequency vector
idx_band = np.logical_and(freqs >= low, freqs <= high)
# Integral approximation of the spectrum using Simpson's rule.
bp = simps(psd[idx_band], dx=freq_res)
if relative:
bp /= simps(psd, dx=freq_res)
return bp
if __name__ =='__main__':
print(bandpower(data,sf,[0.5,4]))
```
#### File: Cross-Study-in-Neuroscience/exam/3.py
```python
import numpy as np
num_book = 5
num_reader = 3
requeir_list = [[1, 2], [2], [3, 4]]
def get_max_index(l):
index = 0
max = l[0]
for i in range(1,len(l)):
if l[i]>max:
index = i
max = l[i]
return index
def updata_remain(num_book,requeir_list):
temp = []
for i in range(num_book):
temp.append(sum(requeir_list[:,i]))
return temp
# ag_requeir_list = []
for requeir in requeir_list:
for i in range(1,num_book+1):
if i not in requeir:
requeir.insert(i-1,0)
else:
requeir[i-1]=1
# print(requeir)
# print(requeir_list)
requeir_list = np.array(requeir_list)
remain_req = updata_remain(num_book,requeir_list)
satifi_list = np.ones(num_reader)
buy_book = []
while(sum(satifi_list)!=0):
# print('-----------------')
# print(requeir_list)
# print(remain_req)
# print(satifi_list)
# print(remain_req)
index = get_max_index(remain_req)
buy_book.append(index+1)
for i in range(num_reader):
if requeir_list[i][index]==1:
#需求已满足
satifi_list[i]=0
requeir_list[i]= 0
remain_req = updata_remain(num_book,requeir_list)
# print(requeir_list)
# print('-----------------')
print(len(buy_book))
``` |
{
"source": "joselynzhao/Feature-Similarity-Memory-in-Progressive-Learning",
"score": 2
} |
#### File: joselynzhao/Feature-Similarity-Memory-in-Progressive-Learning/main.py
```python
from __future__ import print_function, absolute_import
from reid.REID import *
from reid import datasets
from reid import models
import numpy as np
import torch
import argparse
import os
from reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from reid.utils.serialization import load_checkpoint
from torch import nn
import time
import math
import pickle
import time
import matplotlib.pyplot as plt
import os
import codecs
from common_tools import *
def main(args):
cudnn.benchmark = True
cudnn.enabled = True
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
l_data, u_data = get_one_shot_in_cam1(dataset_all, load_path="./examples/oneshot_{}_used_in_paper.pickle".format(
dataset_all.name))
mv_num = math.ceil(len(u_data)/args.total_step) #最后一轮没有mv_num的量
# 总的训练step数的计算
# total_step = math.ceil(math.pow((100 / args.EF), (1 / args.q))) # 这里应该取上限或者 +2 多一轮进行one-shot训练的 # EUG base 采样策略
# total_step = math.ceil((2 * NN * args.step_s + args.yita + len(u_data)) / (args.yita + NN + len(l_data))) + 2 # big start 策略
#实验信息
print("{}/{} is training with {}, the max_frames is {}, and will be saved to {}".format(args.exp_name,args.exp_order,args.dataset,args.max_frames,args.logs_dir))
# 参数信息
print("parameters are setted as follows:")
print("\ttotal_step:\t{}".format(args.total_step))
# print("\tEF:\t{}".format(args.EF))
# print("\tq:\t{}".format(args.q))
# print("\ttrain_tagper_step:\t{}".format(args.train_tagper_step))
print("\tepoch:\t{}".format(args.epoch))
print("\tstep_size:\t{}".format(args.step_size))
print("\tbatch_size:\t{}".format(args.batch_size))
print("\tmv_num:\t{}".format(mv_num))
# 指定输出文件
# 第三部分要说明关键参数的设定
sys.stdout = Logger(osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order,'log'+time.strftime(".%m_%d_%H-%M-%S")+'.txt'))
data_file =codecs.open(osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order,'data.txt'),mode='a')
time_file =codecs.open(osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order,'time.txt'),mode='a')
save_path = osp.join(args.logs_dir, args.dataset,args.exp_name,args.exp_order)
resume_step, ckpt_file = -1, ''
if args.resume: # 重新训练的时候用
resume_step, ckpt_file = resume(args)
# initial the EUG algorithm
reid = REID(model_name=args.arch, batch_size=args.batch_size,mode = args.mode, num_classes=dataset_all.num_train_ids,
data_dir=dataset_all.images_dir, l_data=l_data, u_data=u_data, save_path=save_path,
max_frames=args.max_frames)
select_data = []
# 开始的时间记录
exp_start = time.time()
for step in range(args.total_step+1):
# while(not isout):
print('-'*20+'training step:{}/{}'.format(step+1,args.total_step+1)+'-'*20)
# 开始训练
train_start = time.time()
train_data = l_data+select_data
reid.train(train_data, step, epochs=args.epoch, step_size=args.step_size, init_lr=0.1)
# 开始评估
evaluate_start = time.time()
# mAP, top1, top5, top10, top20 = 0,0,0,0,0
mAP,top1,top5,top10,top20 = reid.evaluate(dataset_all.query, dataset_all.gallery)
# 标签估计
estimate_start = time.time()
# pred_y, pred_score, label_pre, id_num = 0,0,0,0
pred_y, pred_score, label_pre = reid.estimate_label_FSM(step) # step \in [0,total_step]
estimate_end = time.time()
selected_idx = reid.select_top_data(pred_score, min(mv_num*(step+1),len(u_data)))
select_data, select_pre = reid.generate_new_train_data(selected_idx, pred_y)
# 输出该epoch的信息
data_file.write(
"step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} len(l_data):{} label_pre:{:.2%} select_pre:{:.2%}\n".format(
int(step + 1), mAP, top1, top5, top10, top20, len(select_data), label_pre, select_pre))
print(
"reid step:{} mAP:{:.2%} top1:{:.2%} top5:{:.2%} top10:{:.2%} top20:{:.2%} len(l_data):{} label_pre:{:.2%} select_pre:{:.2%} \n".format(
int(step + 1), mAP, top1, top5, top10, top20, len(select_data), label_pre, select_pre))
if args.clock:
train_time = evaluate_start-train_start
evaluate_time = estimate_start - evaluate_start
estimate_time = estimate_end-estimate_start
epoch_time = train_time-estimate_time
time_file.write("step:{} train:{} evaluate:{} estimate:{} epoch:{}\n".format(int(step+1),train_time,evaluate_time,estimate_time,epoch_time))
data_file.close()
if (args.clock):
exp_end = time.time()
exp_time = exp_end - exp_start
h, m, s = changetoHSM(exp_time)
print("experiment is over, cost %02d:%02d:%02.6f" % ( h, m, s))
time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='FSM')
parser.add_argument('-d', '--dataset', type=str, default='DukeMTMC-VideoReID',choices=datasets.names()) #s
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('--epoch',type=int,default=70)
parser.add_argument('--step_size',type=int,default=55)
parser.add_argument('--total_step',type=int,default=10)
parser.add_argument('--EF', type=float, default=5) # 渐进采样系数
parser.add_argument('--q', type=float, default=1) # 渐进采样指数
working_dir = os.path.dirname(os.path.abspath(__file__))
parser.add_argument('--data_dir', type=str, metavar='PATH',default=os.path.join(working_dir, 'data')) # 加载数据集的根目录
parser.add_argument('--logs_dir', type=str, metavar='PATH',default=os.path.join(working_dir, 'logs')) # 保持日志根目录
parser.add_argument('--exp_name',type=str,default="fsm")
parser.add_argument('--exp_order',type=str,default="0")
parser.add_argument('--resume', type=bool, default=False)
parser.add_argument('--mode', type=str, choices=["Classification", "Dissimilarity"], default="Dissimilarity") #这个考虑要不要取消掉
parser.add_argument('--max_frames', type=int, default= 400)
parser.add_argument('--clock',type=bool, default=True) #是否记时
parser.add_argument('--gdraw',type=bool, default=False) #是否实时绘图
#下面是暂时不知道用来做什么的参数
parser.add_argument('-a', '--arch', type=str, default='avg_pool',choices=models.names()) #eug model_name
parser.add_argument('-i', '--iter-step', type=int, default=5)
parser.add_argument('-g', '--gamma', type=float, default=0.3)
parser.add_argument('-l', '--l', type=float)
parser.add_argument('--continuous', action="store_true")
main(parser.parse_args())
'''
python3.6 main.py --total_step 10
'''
``` |
{
"source": "joselynzhao/One-shot-Person-Re-ID-ATM",
"score": 2
} |
#### File: joselynzhao/One-shot-Person-Re-ID-ATM/atmpro1_vsm2.py
```python
from my_reid.eug import *
from my_reid import datasets
from my_reid import models
import numpy as np
import torch
import argparse
import os
import warnings
warnings.filterwarnings("ignore")
from my_reid.utils.logging import Logger
import os.path as osp
import sys
from torch.backends import cudnn
from my_reid.utils.serialization import load_checkpoint
from torch import nn
import time
import pickle
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from pathlib import Path
def resume(savepath):
import re
pattern = re.compile(r'step_(\d+)\.ckpt')
start_step = -1
ckpt_file = ""
# find start step
files = os.listdir(savepath)
files.sort()
for filename in files:
try:
iter_ = int(pattern.search(filename).groups()[0])
print(iter_)
if iter_ > start_step:
start_step = iter_
ckpt_file = osp.join(savepath, filename)
except:
continue
# if need resume
if start_step >= 0:
print("continued from iter step", start_step)
else:
print("resume failed", start_step, files)
return start_step, ckpt_file
def main(args):
father = Path('/mnt/')
if father.exists(): # 是在服务器上
data_dir = Path('/mnt/share/datasets/RE-ID/data') # 服务器
logs_dir = Path('/mnt/home/{}'.format(args.log_name)) # 服务器
else: #本地
data_dir = Path('/home/joselyn/workspace/ATM_SERIES/data') # 本地跑用这个
logs_dir = Path('/home/joselyn/workspace/ATM_SERIES/{}'.format(args.log_name)) # 本地跑用这个
cudnn.benchmark = True
cudnn.enabled = True
save_path = os.path.join(logs_dir, args.dataset, args.exp_name, args.exp_order) # 到编号位置.
total_step = 100 // args.EF + 1
sys.stdout = Logger(osp.join(save_path, 'log' + str(args.EF) + time.strftime(".%m_%d_%H:%M:%S") + '.txt'))
dataf_file = open(osp.join(save_path, 'dataf.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
data_file = open(osp.join(save_path, 'data.txt'), 'a') # 保存性能数据. #特征空间中的性能问题.
kf_file = open(osp.join(save_path,'kf.txt'),'a')
# 数据格式为 label_pre_r, select_pre_r,label_pre_t, select_pre_t ,加上了了tagper的数据.
tagper_path = osp.join(save_path,'tagper') #tagper存储路径.
if not Path(tagper_path).exists():
os.mkdir(tagper_path)
'''# 记录配置信息 和路径'''
print('-'*20+'config_info'+'-'*20)
config_file = open(osp.join(save_path, 'config.txt'), 'w')
config_info = str(args).split('(')[1].strip(')').split(',')
config_info.sort()
for one in config_info:
key,value=map(str,one.split('='))
config_file.write(key.strip()+'='+value.strip('\'')+'\n')
print(key.strip()+'='+value.strip('\''))
config_file.write('save_path='+save_path)
print('save_path='+save_path)
print('-' * 20 + 'config_info' + '-' * 20)
config_file.close()
train_time_file = open(osp.join(save_path, 'time.txt'), 'a') # 只记录训练所需要的时间.
# 数据格式为 step_time total_time.
total_time = 0
# get all the labeled and unlabeled data for training
dataset_all = datasets.create(args.dataset, osp.join(data_dir, args.dataset))
num_all_examples = len(dataset_all.train)
l_data, u_data = get_init_shot_in_cam1(dataset_all,
load_path="./examples/{}_init_{}.pickle".format(dataset_all.name, args.init),
init=args.init)
resume_step, ckpt_file = -1, ''
if args.resume:
resume_step, ckpt_file = resume(save_path)
# initial the EUG algorithm
eug = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=save_path, max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
tagper = EUG(batch_size=args.batch_size, num_classes=dataset_all.num_train_ids,
dataset=dataset_all, l_data=l_data, u_data=u_data, save_path=tagper_path,
max_frames=args.max_frames,
embeding_fea_size=args.fea, momentum=args.momentum, lamda=args.lamda)
new_train_data = l_data
unselected_data = u_data
iter_mode = 2 #迭代模式,确定是否训练tagper
for step in range(total_step):
# for resume
if step < resume_step:
continue
ratio = (step + 1) * args.EF / 100
ratio_t = (step+1+args.t) * args.EF /100
nums_to_select = int(len(u_data) * ratio)
nums_to_select_tagper = int(len(u_data) * ratio_t)
if nums_to_select >= len(u_data):
break
#args.vsm_lambda的衰减 0.5 - 0
vsm_lambda = args.vsm_lambda*step/(1-(total_step/2)) +args.vsm_lambda
vsm_lambda +=1
print("Runing: EF={}%, step {}:\t Nums_to_be_select {} \t Ritio \t Logs-dir {}".format(
args.EF, step, nums_to_select, ratio, save_path))
# train the model or load ckpt
start_time = time.time()
print("training reid model")
eug.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size,
init_lr=0.1) if step != resume_step else eug.resume(ckpt_file, step)
# 只对eug进行性能评估
# mAP, rank1, rank5, rank10, rank20 = 0, 0, 0, 0, 0
mAP, rank1, rank5, rank10, rank20 = eug.evaluate(dataset_all.query, dataset_all.gallery)
# 把数据写到data文件里.
data_file.write('{} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step, mAP, rank1, rank5, rank10, rank20))
pred_y, pred_score,label_pre,dists= eug.estimate_label_vsm()
selected_idx = eug.select_top_data_vsm2(pred_score, dists,args.topk,vsm_lambda,min(nums_to_select_tagper,len(u_data)-50) if iter_mode==2 else min(nums_to_select,len(u_data))) #直接翻两倍取数据. -50个样本,保证unselected_data数量不为0
new_train_data, unselected_data, select_pre= eug.generate_new_train_data(selected_idx, pred_y)
raw_label_pre, raw_select_pre = label_pre,select_pre
t_label_pre,t_select_pre = 0,0
raw_select_pre_t = 0
# label_pre_t,select_pre_t=0,0
if iter_mode==2:
raw_select_pre_t = raw_select_pre
print("training tagper model")
selected_idx = eug.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda, min(nums_to_select, len(u_data)))
_, _, raw_select_pre = eug.generate_new_train_data(selected_idx, pred_y)
# kf_file.write('{} {:.2%} {:.2%}'.format(step, label_pre, select_pre))
tagper.resume(osp.join(save_path,'step_{}.ckpt'.format(step)),step)
tagper.train(new_train_data, unselected_data, step, loss=args.loss, epochs=args.epochs, step_size=args.step_size, init_lr=0.1)
pred_y, pred_score, label_pre,dists= tagper.estimate_label_vsm()
selected_idx = tagper.select_top_data_vsm2(pred_score,dists,args.topk,vsm_lambda,min(nums_to_select,len(u_data))) # 采样目标数量
new_train_data, unselected_data, select_pre= tagper.generate_new_train_data(selected_idx, pred_y)
t_label_pre,t_select_pre = label_pre,select_pre
label_pre,select_pre = t_label_pre,t_select_pre
if nums_to_select_tagper >=len(u_data):
iter_mode=1 #切换模式
print('tagper is stop')
else: #mode = 1
# raw_select_pre = raw_select_pre_t
# raw_select_pre_t = 0
label_pre,select_pre = raw_label_pre,raw_select_pre
end_time = time.time()
step_time = end_time - start_time
total_time = step_time + total_time
train_time_file.write('{} {:.6} {:.6}\n'.format(step, step_time, total_time))
kf_file.write('{} {} {} {:.2%} {:.2%} {:.2%} {:.2%} {:.2%}\n'.format(step,nums_to_select,nums_to_select_tagper,raw_label_pre,raw_select_pre,raw_select_pre_t,t_label_pre,t_select_pre))
dataf_file.write(
'{} {:.2%} {:.2%}\n'.format(step, label_pre, select_pre))
dataf_file.close()
train_time_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Progressive Learning for One-Example re-ID')
parser.add_argument('-d', '--dataset', type=str, default='mars',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('-f', '--fea', type=int, default=1024)
parser.add_argument('--EF', type=int, default=10)
parser.add_argument('--t', type=float, default=2) #不再tagper采样的倍率, 而是表示跨多少个step采样.
parser.add_argument('--exp_order', type=str, default='0')
parser.add_argument('--exp_name', type=str, default='atm')
parser.add_argument('--exp_aim', type=str, default='for paper')
parser.add_argument('--run_file',type=str,default='train.py')
parser.add_argument('--log_name',type=str,default='pl_logs')
parser.add_argument('--topk',type=int,default=2)
parser.add_argument('--vsm_lambda',type=float,default=0.5)
parser.add_argument('--resume', type=str, default='Yes')
parser.add_argument('--max_frames', type=int, default=900)
parser.add_argument('--loss', type=str, default='ExLoss', choices=['CrossEntropyLoss', 'ExLoss'])
parser.add_argument('--init', type=float, default=-1)
parser.add_argument('-m', '--momentum', type=float, default=0.5)
parser.add_argument('-e', '--epochs', type=int, default=70)
parser.add_argument('-s', '--step_size', type=int, default=55)
parser.add_argument('--lamda', type=float, default=0.5)
main(parser.parse_args())
```
#### File: reid/models/end2end.py
```python
from __future__ import absolute_import
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn import init
import torch
import torchvision
import math
from .resnet import *
__all__ = ["End2End_AvgPooling"]
def conv1_1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class AvgPooling(nn.Module):
def __init__(self, input_feature_size, num_classes, is_output_feature, embeding_fea_size=1024, dropout=0.5, classifier="CrossEntropyLoss"):
super(self.__class__, self).__init__()
self.is_output_feature = is_output_feature
# embeding
self.embeding_fea_size = embeding_fea_size
IDE_fea_size = 2048
Ex_fea_size = 2048
self.middel_conv = conv1_1(2048,2048,1)
init.kaiming_normal_(self.middel_conv.weight,mode='fan_out')
self.middle_norm = nn.BatchNorm2d(2048)
init.constant_(self.middle_norm.weight,1)
init.constant_(self.middle_norm.bias, 0)
self.middle_relu = nn.ReLU(inplace=True)
self.IDE_embeding = nn.Linear(input_feature_size, IDE_fea_size)
self.IDE_embeding_bn = nn.BatchNorm1d(IDE_fea_size)
self.Ex_embeding = nn.Linear(input_feature_size, Ex_fea_size)
self.Ex_embeding_bn = nn.BatchNorm1d(Ex_fea_size)
init.kaiming_normal_(self.IDE_embeding.weight, mode='fan_out')
init.constant_(self.IDE_embeding.bias, 0)
init.constant_(self.IDE_embeding_bn.weight, 1)
init.constant_(self.IDE_embeding_bn.bias, 0)
init.kaiming_normal_(self.Ex_embeding.weight, mode='fan_out')
init.constant_(self.Ex_embeding.bias, 0)
init.constant_(self.Ex_embeding_bn.weight, 1)
init.constant_(self.Ex_embeding_bn.bias, 0)
self.drop = nn.Dropout(dropout)
self.classify_fc = nn.Linear(IDE_fea_size, num_classes, bias=True)
init.normal_(self.classify_fc.weight, std = 0.001)
init.constant_(self.classify_fc.bias, 0)
self.cls = classifier
def forward(self, inputs):
pool5 = inputs.mean(dim = 1)
#pool5 = inputs
if (not self.training) and self.is_output_feature:
pool5 = pool5.view(pool5.size(0),-1)
return F.normalize(pool5, p=2, dim=1)
pool5 = self.middel_conv(pool5)
pool5 = self.middle_norm(pool5)
#pool5 = self.middle_relu(pool5)
pool5 = pool5.view(pool5.size(0),-1)
metric_feature = F.normalize(pool5, p=2, dim=1)
""" IDE """
# embeding
net = self.drop(pool5)
net = self.IDE_embeding(net)
net = self.IDE_embeding_bn(net)
net = F.relu(net)
net = self.drop(net)
# classifier
predict = self.classify_fc(net)
if (not self.training) and (not self.is_output_feature):
return predict
""" Exclusive """
net = self.Ex_embeding(pool5)
net = self.Ex_embeding_bn(net)
net = F.normalize(net, p=2, dim=1)
Ex_feat = self.drop(net)
return predict, Ex_feat,metric_feature
class End2End_AvgPooling(nn.Module):
def __init__(self, pretrained=True, dropout=0, num_classes=0, is_output_feature=True, embeding_fea_size=1024, classifier="CrossEntropyLoss", fixed_layer=True):
super(self.__class__, self).__init__()
self.CNN = resnet50(dropout=dropout, fixed_layer=fixed_layer)
self.avg_pooling = AvgPooling(input_feature_size=2048, num_classes=num_classes, dropout=dropout, is_output_feature=is_output_feature, classifier=classifier,
embeding_fea_size = embeding_fea_size)
def forward(self, x):
assert len(x.data.shape) == 5
# reshape (batch, samples, ...) ==> (batch * samples, ...)
oriShape = x.data.shape
x = x.view(-1, oriShape[2], oriShape[3], oriShape[4])
# resnet encoding
resnet_feature = self.CNN(x)
# reshape back into (batch, samples, ...)
#resnet_feature = resnet_feature.view(oriShape[0], oriShape[1], -1)
feature_size = resnet_feature.data.shape
# reshape back into (batch, samples, ...)
resnet_feature = resnet_feature.view(oriShape[0], oriShape[1], feature_size[1],feature_size[2],feature_size[3])
# avg pooling
# if eval and cut_off_before_logits, return predict; else return avg pooling feature
predict = self.avg_pooling(resnet_feature)
return predict
```
#### File: One-shot-Person-Re-ID-ATM/reid/trainers.py
```python
from __future__ import print_function, absolute_import
import time
import torch
from torch import nn
from torch.autograd import Variable
from .evaluation_metrics import accuracy
from .utils.meters import AverageMeter
from .dist_regularization_loss import DistRegularizeLoss
def cycle(iterable):
while True:
for x in iterable:
yield x
class BaseTrainer(object):
def __init__(self, model, criterion, lamda, fixed_layer=True):
super(BaseTrainer, self).__init__()
self.model = model
self.ide_criterion = nn.CrossEntropyLoss().cuda()
self.u_criterion = criterion
self.fixed_layer = fixed_layer
self.label_ratio = lamda
self.u_re_ratio = 0.8
self.regularize_loss = DistRegularizeLoss()
def train(self, epoch, ide_data_loader, u_loader, optimizer, use_unselcted_data, print_freq=30):
self.model.train()
if self.fixed_layer:
# The following code is used to keep the BN on the first three block fixed
fixed_bns = []
for idx, (name, module) in enumerate(self.model.module.named_modules()):
if name.find("layer3") != -1:
assert len(fixed_bns) == 22
break
if name.find("bn") != -1:
fixed_bns.append(name)
module.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
precisions = AverageMeter()
end = time.time()
u_loader = iter(cycle(u_loader))
trainlog = TrainingLog(epoch, print_freq, len(ide_data_loader))
for i, ide_inputs in enumerate(ide_data_loader):
data_time.update(time.time() - end)
# ide forward
ide_inputs, ide_targets = self._parse_data(ide_inputs, 'ide')
ide_loss, ide_prec1,ide_feats = self._forward(ide_inputs, ide_targets, 'ide')
weighted_loss = ide_loss
u_loss, u_prec1 = ide_loss, ide_prec1
regular_loss = ide_loss
# unselcted part forward
if use_unselcted_data:
u_inputs = next(u_loader)
u_inputs, u_targets = self._parse_data(u_inputs, 'u')
u_loss, u_prec1 ,u_feats= self._forward(u_inputs, u_targets, 'u')
regular_loss = self.regularize_loss(ide_feats,u_feats)
weighted_loss = self.get_weighted_loss(ide_loss, u_loss,regular_loss)
# update weighted loss and bp
optimizer.zero_grad()
weighted_loss.backward()
optimizer.step()
trainlog.update(i, weighted_loss, ide_loss, u_loss,regular_loss,ide_prec1, u_prec1, ide_targets)
def get_weighted_loss(self, ide_loss, u_loss,regular_loss):
weighted_loss = ide_loss * self.label_ratio + (1 - self.label_ratio)*(u_loss *self.u_re_ratio+
(1-self.u_re_ratio) * regular_loss)
return weighted_loss
def _parse_data(self, inputs):
raise NotImplementedError
def _forward(self, inputs, targets):
raise NotImplementedError
class Trainer(BaseTrainer):
def _parse_data(self, inputs, mode):
imgs, _, pids, indexs, _ = inputs
inputs = Variable(imgs, requires_grad=False)
if mode == "u":
targets = Variable(indexs.cuda())
elif mode == "ide":
targets = Variable(pids.cuda())
else:
raise KeyError
return inputs, targets
def _forward(self, inputs, targets, mode):
ide_preds, u_feats ,metric_feats= self.model(inputs)
if mode == "ide":
# id predictions
ide_loss = self.ide_criterion(ide_preds, targets)
ide_prec, = accuracy(ide_preds.data, targets.data)
ide_prec = ide_prec[0]
return ide_loss, ide_prec,metric_feats
elif mode == 'u':
# u predictions
u_loss, outputs = self.u_criterion(u_feats, targets)
u_prec, = accuracy(outputs.data, targets.data)
u_prec = u_prec[0]
return u_loss, u_prec,metric_feats
else:
raise KeyError
class TrainingLog():
def __init__(self, epoch, print_freq, data_len):
self.batch_time = AverageMeter()
self.losses = AverageMeter()
self.ide_losses = AverageMeter()
self.u_losses = AverageMeter()
self.regular_loss = AverageMeter()
self.ide_precisions = AverageMeter()
self.u_precisions = AverageMeter()
self.time = time.time()
self.epoch = epoch
self.print_freq = print_freq
self.data_len = data_len
def update(self, step, weighted_loss, ide_loss, u_loss,regular_loss,ide_prec, u_prec, targets):
# update time
t = time.time()
self.batch_time.update(t - self.time)
self.time = t
# weighted loss
self.losses.update(weighted_loss.item(), targets.size(0))
self.ide_losses.update(ide_loss.item(), targets.size(0))
self.u_losses.update(u_loss.item())
self.regular_loss.update(regular_loss.item())
# id precision
self.ide_precisions.update(ide_prec, targets.size(0))
self.u_precisions.update(u_prec, targets.size(0))
if (step + 1) % self.print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})\t'
'IDE_Loss {:.3f} ({:.3f})\t'
'Re_Loss {:.3f} ({:.3f})\t'
'ExLoss {:.3f} ({:.3f})\t'
'IDE_Prec {:.1%} ({:.1%})\t'
'ExPrec {:.1%} ({:.1%})\t'
.format(self.epoch, step + 1, self.data_len,
self.batch_time.val, self.batch_time.avg,
self.losses.val, self.losses.avg,
self.ide_losses.val, self.ide_losses.avg,
self.regular_loss.val, self.regular_loss.avg,
self.u_losses.val, self.u_losses.avg,
self.ide_precisions.val, self.ide_precisions.avg,
self.u_precisions.val, self.u_precisions.avg))
``` |
{
"source": "joselynzhao/One-shot-Person-Re-ID-with-Variance-Subsampling-Method",
"score": 2
} |
#### File: utils/data/sampler.py
```python
from __future__ import absolute_import
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data.sampler import (
Sampler, SequentialSampler, RandomSampler, SubsetRandomSampler,
WeightedRandomSampler)
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, num_instances=1):
self.data_source = data_source
self.num_instances = num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.num_samples = len(self.pids)
def __len__(self):
return self.num_samples * self.num_instances
def __iter__(self):
indices = torch.randperm(self.num_samples)
ret = []
for i in indices:
pid = self.pids[i]
t = self.index_dic[pid]
if len(t) >= self.num_instances:
t = np.random.choice(t, size=self.num_instances, replace=False)
else:
t = np.random.choice(t, size=self.num_instances, replace=True)
ret.extend(t)
return iter(ret)
``` |
{
"source": "joselynzhao/plt_draw",
"score": 3
} |
#### File: joselynzhao/plt_draw/data_prehandle.py
```python
from data import *
import numpy as np
import math
def get_len(train_name):
return len(train_name["mAP"])
def percent_gradually_5_k15():
Nu = 1494.0
k = 15
x = np.linspace(1,gradually_5_k15["length"],gradually_5_k15["length"])
def fun(x):
num = min(math.ceil(-(Nu/2) * np.cos((k/100) * (x-1))+(Nu/2)),Nu)
return round(num)
xx = list(map(fun,x))
print("percent_gradually_5_k15",xx)
return xx
def percent_gradually_5_10():
Nu = 1494.0
q = 1.0
ef = 5
x = np.linspace(1,gradually_5_10["length"],gradually_5_10["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num)
xx = list(map(fun,x))
print("percent_gradually_5_10",xx)
return xx
def percent_gradually_5_13():
Nu = 1494.0
q = 1.3
ef = 5
x = np.linspace(1,gradually_5_13["length"],gradually_5_13["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num)
xx = list(map(fun,x))
print("percent_gradually_5_13",xx)
return xx
def percent_gradually_5_15():
Nu = 1494.0
q = 1.5
ef = 5
x = np.linspace(1,gradually_5_15["length"],gradually_5_15["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num)
xx = list(map(fun,x))
print("percent_gradually_5_15",xx)
return xx
def percent_gradually_11_15():
Nu = 1494.0
q = 1.5
ef = 1.1
x = np.linspace(1,gradually_11_15["length"],gradually_11_15["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num)
xx = list(map(fun,x))
print("percent_gradually_11_15",xx)
return xx
def percent_gradually_55_25():
Nu = 1494.0
q = 2.5
ef = 0.055
x = np.linspace(1,gradually_11_15["length"],gradually_11_15["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num)
xx = list(map(fun,x))
print("percent_gradually_55_25",xx)
return xx
def percent_gradually_223_05():
Nu = 1494.0
q = 0.5
ef = 22.3
x = np.linspace(1,gradually_223_05["length"],gradually_223_05["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num*100/Nu,2)
xx = list(map(fun,x))
print("percent_gradually_55_25",xx)
return xx
def percent_gradually_30_04():
Nu = 1494.0
q = 0.4
ef = 30
x = np.linspace(1,gradually_30_04["length"],gradually_30_04["length"])
def fun(x):
num = min(math.ceil(Nu * math.pow(x-1,q)*(ef/100)),Nu)
return round(num*100/Nu,2)
# return round(num)
xx = list(map(fun,x))
print("percent_gradually_30_04",xx)
return xx
def get_train_pre(train_name):
Ln=702
select_pre = train_name["select_pre"]
select_num = train_name["select_num"]
def fun(pre,num):
return round((math.floor(pre*num/100)+Ln)*100/(num+Ln),2)
train_pre = list(map(fun,select_pre,select_num))
print(train_name["title"],train_pre)
def get_train_pre_all():
train_list = [EFnorm_50_10]
for train_name in train_list:
get_train_pre(train_name)
if __name__=="__main__":
# print(get_len(gradually_5_15))
# percent_gradually_5_k15()
# get_train_pre(gradually_223_05)
# percent_gradually_30_04()
get_train_pre_all()
``` |
{
"source": "joselynzhao/Python-data-structure-and-algorithm",
"score": 4
} |
#### File: algorithm/Divide/zuida.py
```python
def get_max(max_list):
if len(max_list)==2:
return max(max_list[0],max_list[1])
else:
return max_list[0]
def get_max_list(init_list):
n = len(init_list)
if n<=2:
return get_max(init_list)
left_list,right_list = init_list[:n//2],init_list[n//2:] #" // "表示整数除法。
left_max,right_max = get_max_list(left_list),get_max_list(right_list)
return get_max([left_max,right_max]) # 组合为list再传进去
test_list = [12,2,34,34,454,2,34,64,43]
print(get_max_list(test_list))
```
#### File: algorithm/enumerate/gui.py
```python
def jiecheng(n):
if n == 1:
return 1
else:
return n*jiecheng(n-1)
print(jiecheng(10))
```
#### File: algorithm/xianxingbiao/cha.py
```python
def insert_list(L,i,element):
L_length = len(L)
if i< 1 or i>L_length: # 我感觉应该允许L_length加1
return False
if i <= L_length: # 全部往后移
for k in range(i-1,L_length)[::-1]: # 这个地方看不懂 # 这个的意思是倒着
print(k)
x = [L[k]]
print(x)
# L[k+1] = L[k]
L[k+1:k+2] = [L[k]]
L[i-1] = element
print(L)
return True
L = [1,2,3,4]
insert_list(L,2,0)
L[2:3] =[L[0]]
print(L)
```
#### File: data_structure/list/yield.py
```python
def foo(num):
print("starting...")
while num<10:
num=num+1
yield num
for n in foo(0):
print(n)
print(list(foo(0))) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
```
#### File: data_structure/tuple/lishi.py
```python
from _collections import deque
def search(lines, pattern,history = 5):
previous_lines = deque(maxlen=history)
for line in lines:
if pattern in line:
yield line, previous_lines #编写搜索某项记录的代码时,通常会用到含有yield关键字的生成器函数,它能够将处理搜索过程的代码和使用搜索结果的代码成功解耦开来。
previous_lines.append(line)
# Example use on a file
if __name__ =="__main__":
with open('123.txt') as f:
for line,prevlines in search(f,'python',5):
for pline in prevlines:
print(pline)
print(line)
print('-'*20)
q = deque(maxlen=3) #创建一个固定长度的队列
q.append(1)
q.append(2)
q.append(3)
print(q)
q.append(4)
print(q)
```
#### File: Python-data-structure-and-algorithm/leecode/20060801.py
```python
def maxArea(height):
'''暴力循环'''
# max = 0
# # print(index)
# for i in range(len(height)):
# for j in range(i,len(height)):
# area = min(height[i],height[j])*abs(i-j)
# if area>max:
# max = area
# print(max)
'''双指针'''
i,j = 0,len(height)-1
max = 0
while(i!=j):
# 计算当前面积
area = min(height[j],height[i])*(j-i) # j-i非负
if area>max:
max = area
if height[i]>=height[j]:
j-=1
else:
i+=1
print(max)
maxArea([1,8,6,2,5,4,8,3,7])
```
#### File: Python-data-structure-and-algorithm/leecode/55.py
```python
def canJump( nums) :
if len(nums)==1:
return True
flag = [1 if len(nums) - 1 - i <= nums[i] else 0 for i in range(len(nums) - 1)]
# 先考虑递归终止条件
if sum(flag)==0:
return False
if flag[0]==1:
return True
# 进入子问题
is_out = 0
for i in range(len(flag)-1,0,-1):
if is_out:
break
if flag[i]==1:
is_out = 1
return canJump(nums[:i+1])
# return False
# 别人的答案
class Solution:
def canJump(self, nums) :
max_i = 0 #初始化当前能到达最远的位置
for i, jump in enumerate(nums): #i为当前位置,jump是当前位置的跳数
if max_i>=i and i+jump>max_i: #如果当前位置能到达,并且当前位置+跳数>最远位置
max_i = i+jump #更新最远能到达位置
return max_i>=i
new_nums = [2,3,1,1,4]
c = canJump(new_nums)
print(c)
```
#### File: Python-data-structure-and-algorithm/leecode/84.py
```python
def get_neighbor(index):
# index = [3,2,5,0,4,1]
neighbor = [1]*len(index)
for i,e in enumerate(index):
pre = index[:i]
cur = e
while(cur+1 in pre):
neighbor[i]+=1
cur+=1
cur = e
while(cur-1 in pre):
neighbor[i]+=1
cur-=1
return neighbor
def largestRectangleArea(heights): #超出时间限制了。
if len(heights) == 0:
return 0
import numpy as np
heights = np.array(heights)
index = np.argsort(-heights)
neighbor = [1] * len(index)
for i, e in enumerate(index):
pre = index[:i]
cur = e
while (cur + 1 in pre):
neighbor[i] += 1
cur += 1
cur = e
while (cur - 1 in pre):
neighbor[i] += 1
cur -= 1
max = heights[index[0]]
# print(max)
for i,e in enumerate(index):
cur = heights[e]*neighbor[i]
if cur>max:
max = cur
return max
heights = [2,1,5,6,2,3]
print(largestRectangleArea(heights))
# get_neighbor()
```
#### File: joselynzhao/Python-data-structure-and-algorithm/pachong3.py
```python
import time
from bs4 import BeautifulSoup # 处理抓到的页面
import sys
import requests
import re
import importlib
importlib.reload(sys) # 编码转换,python3默认utf-8,一般不用加
# ff = open('test.txt', 'w')
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
} # 定义头文件
ff = open('result.txt','w')
def getfromBaidu():
start = time.clock()
global order
order = 0
for k in range(1, 50):
geturl(k)
end = time.clock()
print(end - start)
def geturl(k):
number = str((k - 1) * 10)
path = 'https://www.baidu.com/s?wd=%E6%B6%82%E4%BA%9A%E5%BA%86&pn='+number+'&oq=%E6%B6%82%E4%BA%9A%E5%BA%86&ie=utf-8&usm=1&rsv_pq=cfb1f9ec00028b62&rsv_t=1bf8iGvhHo%2FmOTo4xluWlPdIN4Nnp1CaUvtAHCfY%2F2RIoqOECLttdDT5xqU&rsv_page=1'
# path = 'https://www.baidu.com/s?wd=%E5%92%96%E5%95%A1&pn=' + number + '&oq=%E5%92%96%E5%95%A1&ie=utf-8&usm=1&rsv_pq=9ccd7f6500120ebb&rsv_t=d92fDeHr8TAXzN%2FuqzNW3xd3BcU3lunThKY2lkUUobFc3Ihjx46MPW4iNbc'
# print(path)
content = requests.get(path, headers=headers)
# 使用BeautifulSoup解析html
soup = BeautifulSoup(content.text, 'html.parser')
tagh3 = soup.find_all('div', {'class', 'result c-container '})
# print(tagh3)
for h3 in tagh3:
try:
global order
order +=1
title = h3.find(name="h3", attrs={"class": re.compile("t")}).find('a').text.replace("\"", "")
print(title)
# print(n)
ff.write('\n'+str(order)+'~ ')
ff.write(title+' ~ ')
except:
title = ' ~ '
try:
url = h3.find(name="a", attrs={"class": re.compile("c-showurl")}).get('href')
print(url + '\n')
ff.write(url+' ~ ')
# print('\n')
except:
url = ' ~ '
try:
abstract = h3.find(name="div", attrs={"class": re.compile("c-abstract")}).text.replace("\"", "")
print(abstract)
ff.write(abstract)
except:
abstract = ''
# ff.write('\n')
if __name__ == '__main__':
getfromBaidu()
ff.close()
```
#### File: joselynzhao/Python-data-structure-and-algorithm/pachong4.py
```python
import urllib.request as ure
import urllib.parse
import string
import urllib
import re
import random
# 设置多个user_agents,防止百度限制IP
user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', \
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \
(KHTML, like Gecko) Element Browser 5.0', \
'IBM WebExplorer /v0.94', 'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)', \
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', \
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14', \
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) \
Version/6.0 Mobile/10A5355d Safari/8536.25', \
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/28.0.1468.0 Safari/537.36', \
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']
def baidu_search(keyword, pn):
p = {'wd': keyword}
res = ure.urlopen(("https://www.baidu.com/s?" + urllib.parse.urlencode(p) + "&pn={0}&cl=3&rn=100").format(pn))
html = res.read()
return html
def getList(regex, text):
arr = []
res = re.findall(regex, text)
if res:
for r in res:
arr.append(r)
return arr
def getMatch(regex, text):
res = re.findall(regex, text)
if res:
return res[0]
return ""
def clearTag(text):
p = re.compile(u'<[^>]+>')
retval = p.sub("", text)
return retval
def geturl(keyword):
for page in range(10):
pn = page * 100 + 1
html = baidu_search(keyword, pn)
content = str(html)
arrList = getList(u"<table.*?class=\"result\".*?>.*?<\/a>", content)
for item in arrList:
regex = u"<h3.*?class=\"t\".*?><a.*?href=\"(.*?)\".*?>(.*?)<\/a>"
link = getMatch(regex, item)
url = link[0]
# 获取标题
# title = clearTag(link[1]).encode('utf8')
try:
domain = ure.Request(url)
r = random.randint(0, 11)
domain.add_header('User-agent', user_agents[r])
domain.add_header('connection', 'keep-alive')
response = ure.urlopen(domain)
uri = response.geturl()
print(uri)
except:
continue
if __name__ == '__main__':
geturl('python')
```
#### File: joselynzhao/Python-data-structure-and-algorithm/pachong.py
```python
import requests
from lxml import etree
# import BeautifulSoup
# import sys
# reload(sys)
# sys.setdefaultencoding("utf-8")
import importlib,sys
importlib.reload(sys)
import re
def get_page(url):
url_num = re.search('(?<=\/)[0-9]+(?=\.)',url)
url_num = url_num.group()
url_num_1 = int(url_num) + 1
url = url.replace(url_num,str(url_num_1))
return url
def getfromBaidu(word):
list=[]
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'
}
baiduurl = 'http://www.baidu.com'
url = 'http://www.baidu.com.cn/s?wd=' + word + '&cl=3'
html = requests.get(url=url,headers=headers)
path = etree.HTML(html.content)
#用k来控制爬取的页码范围
for k in range(1, 20):
print(url)
# 取出内容
path = etree.HTML(requests.get(url, headers).content)
print(path)
flag = 10
if k == 1:
flag = 10
for i in range(1, flag):
# 获取标题
sentence = ""
for j in path.xpath('//*[@id="%d"]/h3/a//text()'%((k-1)*10+i)):
sentence+=j
print(sentence) # 打印标题
# 获取真实URL
try:
url_href = path.xpath('//*[@id="%d"]/h3/a/@href'%((k-1)*10+i))
url_href = ''.join(url_href)
baidu_url = requests.get(url=url_href, headers=headers, allow_redirects=False)
real_url = baidu_url.headers['Location'] # 得到网页原始地址
print(real_url) # 打印URL
except:
print("error",sentence,url_href)
# 获取描述
# res_abstract = path.xpath('//*[@id="%d"]/div[@class="c-abstract"]'%((k-1)*10+i))
# if res_abstract:
# abstract = res_abstract[0].xpath('string(.)')
# else:
# res_abstract = path.xpath('//*[@id="%d"]/div/div[2]/div[@class="c-abstract"]' % ((k - 1) * 10 + i))
# if res_abstract:
# abstract = res_abstract[0].xpath('string(.)')
# print(abstract) # 打印描述
zj_info = path.xpath('//*[@id="page"]/h3/a[%d]/@href'%(flag))
print(len(zj_info))
# url = baiduurl+zj_info[0] if zj_info else None
# url = get_page(url)
return
#主程序测试函数
if __name__ == '__main__':
print(getfromBaidu('涂亚庆'))
``` |
{
"source": "joselynzhao/Pytorch_advance",
"score": 2
} |
#### File: Pytorch_advance/Define/04.py
```python
'''
特别是对于视觉,我们已经创建了一个叫做 totchvision 的包,
该包含有支持加载类似Imagenet,CIFAR10,MNIST 等公共数据集
的数据加载模块 torchvision.datasets 和支持加载图像数据数据转换模块 torch.utils.data.DataLoader。
'''
'''
对于本教程,我们将使用CIFAR10数据集,
它包含十个类别:‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’。
CIFAR-10 中的图像尺寸为33232,也就是RGB的3层颜色通道,每层通道内的尺寸为32*32。
'''
'''
训练一个图像分类器
我们将按次序的做如下几步:
1. 使用torchvision加载并且归一化CIFAR10的训练和测试数据集
2. 定义一个卷积神经网络
3. 定义一个损失函数
4. 在训练样本数据上训练网络
5. 在测试样本数据上测试网络
'''
'''加载并归一化 CIFAR10 使用 torchvision ,用它来加载 CIFAR10 数据非常简单。'''
import torch
import torchvision
import torchvision.transforms as transforms
'''torchvision 数据集的输出是范围在[0,1]之间的 PILImage,我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。'''
transform = transforms.Compose(
[transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
)
trainset = torchvision.datasets.CIFAR10(root='./data',train = True,download=True,transform=transform) # 数据集会自动下载.
trainloader = torch.utils.data.DataLoader(trainset,batch_size = 4,shuffle = True,num_workers = 2) # 注意这个batch_size
testset = torchvision.datasets.CIFAR10(root='./data',train = False, download=True,transform= transform)
testloader = torch.utils.data.DataLoader(testset,batch_size = 4, shuffle = False, num_workers = 2) # shuffle 是什么含义呢
classes= ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # 这是一个元组.
# 让我们来展示其中的一些训练图片。
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img = img /2 +0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg,(1,2,0))) #这些函数是什么意思都不清楚.
plt.show()
dataiter = iter(trainloader) #了解一下迭代器这个函数
images,labels = dataiter.next() # 我怎么知道这个迭代器返回的数据格式
# show image
imshow(torchvision.utils.make_grid(images)) #make_grid函数???
print(''.join('%5s'% classes[labels[j]] for j in range(4))) #为什么是4呢? batch_size = 4
'''定义一个卷积神经网络 在这之前先 从神经网络章节 复制神经网络,并修改它为3通道的图片(在此之前它被定义为1通道)'''
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1 = nn.Conv2d(3,6,5)
self.pool = nn.MaxPool2d(2,2) # 卷积核大小吗?
self.conv2 = nn.Conv2d(6,16,5) # 最后一位是卷积核大小
self.fc1 = nn.Linear(16 * 5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)
def forward(self,x): # 前向计算的过程
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1,16*5*5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
'''定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数,动量SGD做优化器。'''
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),lr = 0.001, momentum = 0.9) #决定了反向传播的过程
'''训练网络 这里事情开始变得有趣,
我们只需要在数据迭代器上循环传给网络和优化器 输入就可以。'''
# for epoch in range(2):
# running_loss = 0.0
# for i, data in enumerate(trainloader,0):
# inputs,labels = data
# optimizer.zero_grad()
# output = net(inputs)
# loss = criterion(output,labels)
# loss.backward()
# optimizer.step()
#
# running_loss +=loss.item()
# if i%2000 == 1999:
# print('[%d, %5d] loss: %.3f' %
# (epoch + 1, i + 1, running_loss / 2000))
# running_loss = 0.0
print('Finished Training')
'''好的,第一步,让我们从测试集中显示一张图像来熟悉它。'''
outputs = net(images)
'''输出是预测与十个类的近似程度,与某一个类的近似程度越高,
网络就越认为图像是属于这一类别。所以让我们打印其中最相似类别类标:'''
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]for j in range(4)))
# 结果看起开非常好,让我们看看网络在整个数据集上的表现。
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
'''这看起来比随机预测要好,随机预测的准确率为10%(随机预测出为10类中的哪一类)。看来网络学到了东西。'''
# class_correct = list(0. for i in range(10))
# class_total = list(0. for i in range(10))
# with torch.no_grad():
# for data in testloader:
# images,labels = data
# outputs = net(images)
# _,predicted = torch.max(outputs,1)
# c = (predicted == labels).squeeze()
# for i in range(4):
# label = labels[i]
# class_correct[label] +=c[i].item()
# class_correct[label] +=1
#
# for i in range(10):
# print('Accuracy of %5s : %2d %%' % (
# classes[i], 100 * class_correct[i] / class_total[i]))
'''所以接下来呢?
我们怎么在GPU上跑这些神经网络?
在GPU上训练 就像你怎么把一个张量转移到GPU上一样,你要将神经网络转到GPU上。
如果CUDA可以用,让我们首先定义下我们的设备为第一个可见的cuda设备。'''
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
print(device)
'''接着这些方法会递归地遍历所有模块,并将它们的参数和缓冲器转换为CUDA张量。'''
net.to(device)
'''记住你也必须在每一个步骤向GPU发送输入和目标:'''
inputs, labels = inputs.to(device), labels.to(device)
```
#### File: Pytorch_advance/Define/07.py
```python
'''下面我们使用PyTorch的Tensors和autograd来实现我们的两层的神经网络;我们不再需要手动执行网络的反向传播:'''
import torch
# dtype = torch.float
# device = torch.device('cpu')
# # device = torch.device("cuda")
# # N是批量大小; D_in是输入维度;
# # H是隐藏的维度; D_out是输出维度。
# N, D_in, H, D_out = 64, 1000, 100, 10
#
# # 创建随机Tensors以保持输入和输出。
# # 设置requires_grad = False表示我们不需要计算渐变
# # 在向后传球期间对于这些Tensors。
# x = torch.randn(N, D_in, device=device, dtype=dtype)
# y = torch.randn(N, D_out, device=device, dtype=dtype)
#
# # 为权重创建随机Tensors。
# # 设置requires_grad = True表示我们想要计算渐变
# # 在向后传球期间尊重这些张贴。
# w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
# w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)
#
# learning_rate = 1e-6
# for t in range(500):
# # 前向传播:使用tensors上的操作计算预测值y;
# # 由于w1和w2有requires_grad=True,涉及这些张量的操作将让PyTorch构建计算图,
# # 从而允许自动计算梯度。由于我们不再手工实现反向传播,所以不需要保留中间值的引用。
# y_pred = x.mm(w1).clamp(min=0).mm(w2)
#
# # 使用Tensors上的操作计算和打印丢失。
# # loss是一个形状为()的张量
# # loss.item() 得到这个张量对应的python数值
# loss = (y_pred - y).pow(2).sum()
# print(t, loss.item())
#
# # 使用autograd计算反向传播。这个调用将计算loss对所有requires_grad=True的tensor的梯度。
# # 这次调用后,w1.grad和w2.grad将分别是loss对w1和w2的梯度张量。
# loss.backward()
# with torch.no_grad():
# w1 -= learning_rate * w1.grad
# w2 -= learning_rate * w2.grad
#
# # 反向传播后手动将梯度设置为零
# w1.grad.zero_()
# w2.grad.zero_()
# 这个例子中,我们自定义一个自动求导函数来展示ReLU的非线性。并用它实现我们的两层网络:
class MyReLU(torch.autograd.Function):
"""
我们可以通过建立torch.autograd的子类来实现我们自定义的autograd函数,
并完成张量的正向和反向传播。
"""
@staticmethod
def forward(ctx, x):
"""
在正向传播中,我们接收到一个上下文对象和一个包含输入的张量;
我们必须返回一个包含输出的张量,
并且我们可以使用上下文对象来缓存对象,以便在反向传播中使用。
"""
ctx.save_for_backward(x)
return x.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
"""
在反向传播中,我们接收到上下文对象和一个张量,
其包含了相对于正向传播过程中产生的输出的损失的梯度。
我们可以从上下文对象中检索缓存的数据,
并且必须计算并返回与正向传播的输入相关的损失的梯度。
"""
x, = ctx.saved_tensors
grad_x = grad_output.clone()
grad_x[x < 0] = 0
return grad_x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# N是批大小; D_in 是输入维度;
# H 是隐藏层维度; D_out 是输出维度
N, D_in, H, D_out = 64, 1000, 100, 10
# 产生输入和输出的随机张量
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)
# 产生随机权重的张量
w1 = torch.randn(D_in, H, device=device, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# 正向传播:使用张量上的操作来计算输出值y;
# 我们通过调用 MyReLU.apply 函数来使用自定义的ReLU
y_pred = MyReLU.apply(x.mm(w1)).mm(w2)
# 计算并输出loss
loss = (y_pred - y).pow(2).sum()
print(t, loss.item())
# 使用autograd计算反向传播过程。
loss.backward()
with torch.no_grad():
# 用梯度下降更新权重
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# 在反向传播之后手动清零梯度
w1.grad.zero_()
w2.grad.zero_()
# 为了与上面的PyTorch自动梯度实例做对比,我们使用TensorFlow来拟合一个简单的2层网络:
import tensorflow as tf
import numpy as np
# 首先我们建立计算图(computational graph)
# N是批大小;D是输入维度;
# H是隐藏层维度;D_out是输出维度。
N, D_in, H, D_out = 64, 1000, 100, 10
# 为输入和目标数据创建placeholder;
# 当执行计算图时,他们将会被真实的数据填充
x = tf.placeholder(tf.float32, shape=(None, D_in))
y = tf.placeholder(tf.float32, shape=(None, D_out))
# 为权重创建Variable并用随机数据初始化
# TensorFlow的Variable在执行计算图时不会改变
w1 = tf.Variable(tf.random_normal((D_in, H)))
w2 = tf.Variable(tf.random_normal((H, D_out)))
# 前向传播:使用TensorFlow的张量运算计算预测值y。
# 注意这段代码实际上不执行任何数值运算;
# 它只是建立了我们稍后将执行的计算图。
h = tf.matmul(x, w1)
h_relu = tf.maximum(h, tf.zeros(1))
y_pred = tf.matmul(h_relu, w2)
# 使用TensorFlow的张量运算损失(loss)
loss = tf.reduce_sum((y - y_pred) ** 2.0)
# 计算loss对于w1和w2的导数
grad_w1, grad_w2 = tf.gradients(loss, [w1, w2])
# 使用梯度下降更新权重。为了实际更新权重,我们需要在执行计算图时计算new_w1和new_w2。
# 注意,在TensorFlow中,更新权重值的行为是计算图的一部分;
# 但在PyTorch中,这发生在计算图形之外。
learning_rate = 1e-6
new_w1 = w1.assign(w1 - learning_rate * grad_w1)
new_w2 = w2.assign(w2 - learning_rate * grad_w2)
# 现在我们搭建好了计算图,所以我们开始一个TensorFlow的会话(session)来实际执行计算图。
with tf.Session() as sess:
# 运行一次计算图来初始化Variable w1和w2
sess.run(tf.global_variables_initializer())
# 创建numpy数组来存储输入x和目标y的实际数据
x_value = np.random.randn(N, D_in)
y_value = np.random.randn(N, D_out)
for _ in range(500):
# 多次运行计算图。每次执行时,我们都用feed_dict参数,
# 将x_value绑定到x,将y_value绑定到y,
# 每次执行图形时我们都要计算损失、new_w1和new_w2;
# 这些张量的值以numpy数组的形式返回。
loss_value, _, _ = sess.run([loss, new_w1, new_w2],
feed_dict={x: x_value, y: y_value})
print(loss_value)
# 这个例子中,我们用nn包实现两层的网络:
import torch
# N是批大小;D是输入维度
# H是隐藏层维度;D_out是输出维度
N, D_in, H, D_out = 64, 1000, 100, 10
#创建输入和输出随机张量
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# 使用nn包将我们的模型定义为一系列的层。
# nn.Sequential是包含其他模块的模块,并按顺序应用这些模块来产生其输出。
# 每个线性模块使用线性函数从输入计算输出,并保存其内部的权重和偏差张量。
# 在构造模型之后,我们使用.to()方法将其移动到所需的设备。
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
# nn包还包含常用的损失函数的定义;
# 在这种情况下,我们将使用平均平方误差(MSE)作为我们的损失函数。
# 设置reduction='sum',表示我们计算的是平方误差的“和”,而不是平均值;
# 这是为了与前面我们手工计算损失的例子保持一致,
# 但是在实践中,通过设置reduction='elementwise_mean'来使用均方误差作为损失更为常见
loss_fn = torch.nn.MSELoss(reduction='sum')
learning_rate = 1e-6
for t in range(500):
# 前向传播:通过向模型传入x计算预测的y。
# 模块对象重载了__call__运算符,所以可以像函数那样调用它们。
# 这么做相当于向模块传入了一个张量,然后它返回了一个输出张量。
y_pred = model(x)
# 计算并打印损失。
# 传递包含y的预测值和真实值的张量,损失函数返回包含损失的张量。
loss = loss_fn(y_pred, y)
print(t, loss.item())
# 反向传播之前清零梯度
model.zero_grad()
# 反向传播:计算模型的损失对所有可学习参数的导数(梯度)。
# 在内部,每个模块的参数存储在requires_grad=True的张量中,
# 因此这个调用将计算模型中所有可学习参数的梯度。
loss.backward()
# 使用梯度下降更新权重。
# 每个参数都是张量,所以我们可以像我们以前那样可以得到它的数值和梯度
with torch.no_grad():
for param in model.parameters():
param -= learning_rate * param.grad
# 但在实践中,我们经常使用AdaGrad、RMSProp、Adam等更复杂的优化器来训练神经网络。
import torch
# N是批大小;D是输入维度
# H是隐藏层维度;D_out是输出维度
N, D_in, H, D_out = 64, 1000, 100, 10
# 产生随机输入和输出张量
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# 使用nn包定义模型和损失函数
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
# 使用optim包定义优化器(Optimizer)。Optimizer将会为我们更新模型的权重。
# 这里我们使用Adam优化方法;optim包还包含了许多别的优化算法。
# Adam构造函数的第一个参数告诉优化器应该更新哪些张量。
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(500):
y_pred = model(x)
loss = loss_fn(y_pred,y)
print(t,loss.item())
# 在反向传播之前,使用optimizer将它要更新的所有张量的梯度清零(这些张量是模型可学习的权重)
optimizer.zero_grad()
# 反向传播:根据模型的参数计算loss的梯度
loss.backward()
# 调用Optimizer的step函数使它所有参数更新
optimizer.step()
# 自定义nn模块
# 在这个例子中,我们用自定义Module的子类构建两层网络:
import torch
class TwoLayerNet(torch.nn.Module):
def __init__(self,D_in,H,D_out):
"""
在构造函数中,我们实例化了两个nn.Linear模块,并将它们作为成员变量。
"""
super(TwoLayerNet,self).__init__()
self.linear1 = torch.nn.Linear(D_in,H)
self.linear2 = torch.nn.Linear(H,D_out)
def forward(self,x):
"""
在前向传播的函数中,我们接收一个输入的张量,也必须返回一个输出张量。
我们可以使用构造函数中定义的模块以及张量上的任意的(可微分的)操作。
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
# N是批大小; D_in 是输入维度;
# H 是隐藏层维度; D_out 是输出维度
N, D_in, H, D_out = 64, 1000, 100, 10
# 产生输入和输出的随机张量
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# 通过实例化上面定义的类来构建我们的模型。
model = TwoLayerNet(D_in, H, D_out)
# 构造损失函数和优化器。
# SGD构造函数中对model.parameters()的调用,
# 将包含模型的一部分,即两个nn.Linear模块的可学习参数。
loss_fn = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
y_pred = model(x) #自动会调用forward函数
# 计算并输出loss
loss = loss_fn(y_pred, y)
print(t, loss.item())
# 清零梯度,反向传播,更新权重
optimizer.zero_grad()
loss.backward()
optimizer.step()
#我们实现了一个非常奇怪的模型:一个全连接的ReLU网络,在每一次前向传播时,它的隐藏层的层数为随机1到4之间的数,这样可以多次重用相同的权重来计算。
import random
import torch
class DynamicNet(torch.nn.Module):
def __init__(self,D_in,H,D_out):
"""
在构造函数中,我们构造了三个nn.Linear实例,它们将在前向传播时被使用。
"""
super(DynamicNet, self).__init__()
self.input_linear = torch.nn.Linear(D_in,H)
self.middle_linear = torch.nn.Linear(H,H)
self.output_linear = torch.nn.Linear(H,D_out)
def forward(self,x):
"""
对于模型的前向传播,我们随机选择0、1、2、3,
并重用了多次计算隐藏层的middle_linear模块。
由于每个前向传播构建一个动态计算图,
我们可以在定义模型的前向传播时使用常规Python控制流运算符,如循环或条件语句。
在这里,我们还看到,在定义计算图形时多次重用同一个模块是完全安全的。
这是Lua Torch的一大改进,因为Lua Torch中每个模块只能使用一次。
"""
h_relu = self.input_linear(x).clamp(min= 0)
for _ in range(random.randint(0,3)):
h_relu = self.middle_linear(h_relu).clamp(min=0)
y_pred = self.output_linear(h_relu)
return y_pred
'''搞不懂这种情况该如何更新参数 '''
# N是批大小;D是输入维度
# H是隐藏层维度;D_out是输出维度
N, D_in, H, D_out = 64, 1000, 100, 10
# 产生输入和输出随机张量
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# 实例化上面定义的类来构造我们的模型
model = DynamicNet(D_in, H, D_out)
# 构造我们的损失函数(loss function)和优化器(Optimizer)。
# 用平凡的随机梯度下降训练这个奇怪的模型是困难的,所以我们使用了momentum方法。
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
for t in range(500):
y_pred = model(x)
# 计算并打印损失
loss = criterion(y_pred, y) #之前成为loss_fn
print(t, loss.item())
# 清零梯度,反向传播,更新权重
optimizer.zero_grad()
loss.backward()
optimizer.step()
``` |
{
"source": "joselynzhao/wardrobe",
"score": 3
} |
#### File: wardrobe/draft/main.py
```python
import os
import os.path as osp
import codecs
item_id = 0
# global item_id
class ITEM():
def __init__(self,attr_list,attr_value):
# global item_id
# self.item_id = item_id
# item_id+=1 #(在这里变化id)
self.attr_list = attr_list
self.attr_value = attr_value
def get_attr_value(self):
return self.attr_value
class GROUP():
def __init__(self,group_name,group_attr):
self.group_name = group_name
self.attributes = group_attr
self.item_list = []
self.__load_data__() #回复item_list的数据
def __update_len_item(self):
self.num_item = len(self.item_list)
return self.num_item
def __storage_items__(self):
if not os.path.exists(osp.join('database', self.group_name)):
os.mkdir(osp.join('database', self.group_name))
try:
with open(osp.join('database', self.group_name,'group_data.txt'),'w') as file:
for item in self.item_list:
file.write('^'.join(item.attr_value))
file.write('\n')
print("the update of items is finished")
except:
print("创建文件失败!")
def __load_data__(self):
try:
with open(osp.join('database',self.group_name,'group_data.txt'),'r') as group_data_file:
group_data =group_data_file.readlines()
for line in group_data:
attr_values = line.strip().split('^') #指定一个特殊的分割符号。
if len(self.attributes) != len(attr_values):
print("the item and values of the attribute are not matched, creating item failed!")
else:
self.item_list.append(ITEM(self.attributes,attr_values))
self.__update_len_item()
except:
print("the database of {} is empty!".format(self.group_name))
def add_item(self):
print("please raw_input the value one by one, if not value, enter ' '")
attr_value = []
for attr in self.attributes:
value = raw_input(str(attr)+':')
attr_value.append(value)
self.item_list.append(ITEM(self.attributes,attr_value))
self.__storage_items__()
def show_info(self):
print('-'*10+self.group_name+'-'*10)
print("\t\t".join(self.attributes))
for item in self.item_list:
print('\t\t'.join(item.attr_value))
class WARDROBE():
def __init__(self):
self.group_list = []
self.__load_groups__()
def __update_len_group__(self):
self.num_group = len(self.group_list)
return self.num_group
def __storage_group_(self):
with open(osp.join('database','group_info.txt'),'w') as file:
for group in self.group_list:
file.write(group.group_name+'^')
file.write('^'.join(group.attributes))
file.write('\n')
print("the update of group is finished")
def __load_groups__(self):
try:
group_file = open(osp.join('database','group_info.txt'),'r')
group_info = group_file.readlines() #第一项是 group_name ,后面是属性
for line in group_info:
data = line.strip().split('^')
group_name = data[0]
group_attr = data[1:]
self.group_list.append(GROUP(group_name,group_attr))
self.__update_len_group__()
except:
print("the database is empty!")
def get_group_name_list(self):
return [group.group_name for group in self.group_list]
def __add_group__(self,group_name,group_attr):
self.group_list.append(GROUP(group_name, group_attr))
self.__update_len_group__()
self.__storage_group_()
print('group {} is created successfully'.format(group_name))
def F_add_group(self):
group_name = raw_input('please raw_input group name:')
group_attr = raw_input('please raw_input group attribute with " " as division:').split()
if group_name in self.get_group_name_list():
print('the group named {} is exsisted.'.format(group_name))
else:
self.__add_group__(group_name,group_attr)
order = raw_input('would you want add items for it? [y/n]')
if order == 'y':
self.F_add_item_for_group(group_name)
def __add_item__(self,group_name):
group = [group for group in self.group_list if group.group_name == group_name][0]
group.add_item()
def F_add_item_for_group(self,group_name=None):
group_name = raw_input('please raw_input group name:') if group_name==None else group_name
if group_name not in self.get_group_name_list():
print('the group named {} is not exsisted'.format(group_name))
while(True):
self.__add_item__(group_name)
order = raw_input('would you want add item continue? [y/n]:')
if order == 'n':
break
def F_show_info(self):
for group in self.group_list:
group.show_info()
if __name__=='__main__':
wardrobe = WARDROBE()
while(True):
order = raw_input('raw_input 1 for show, 2 for add group, 3 for get item, 0 to exit:')
if order == '1':
wardrobe.F_show_info()
elif order == '2':
wardrobe.F_add_group()
elif order == '3':
wardrobe.F_add_item_for_group()
elif order == '0':
break
else:
print('raw_input error!')
# 主要考虑增加和查看功能
``` |
{
"source": "JoseM1101/web-empresa",
"score": 2
} |
#### File: webempresa/contact/views.py
```python
from django.shortcuts import render, redirect
from django.urls import reverse
from django.core.mail import EmailMessage
from .forms import ContactForm
# Create your views here.
def contact(request):
contact_form = ContactForm()
if request.method == 'POST':
contact_form = ContactForm(data=request.POST)
if contact_form.is_valid():
name = request.POST.get('name', '')
email = request.POST.get('email', '')
content = request.POST.get('content', '')
email = EmailMessage(
'La cafettiera: nuevo mensaje',
'de {} <{}>\n\nEscribio:\n\n{}'.format(name,email,content),
'<EMAIL>',
['<EMAIL>'],
reply_to=[email]
)
try:
email.send()
return redirect(reverse('contact')+'?ok')
except:
return redirect(reverse('contact')+'?fail')
return render(request,'contact/contact.html', {'form':contact_form})
``` |
{
"source": "josema62/Memoria",
"score": 3
} |
#### File: public/scripts/csv_main_dev_as_knowledge_json.py
```python
import argparse
import csv
import json
import sys
######################################################################
## Parse input
######################################################################
def validate_content_by(heading, expected):
if not expected:
return # no validation
comparison = expected.split(',')
stripped = heading[0:len(comparison)] # allow extra fields
if stripped != comparison:
raise MergeError('Erroneous content. Expected = ' + expected + ', got = ' + ','.join(heading))
def parse_csv(filename, parse_action, expected_format=None):
def read_heading_from(r):
p = r.next()
while p == []:
p = r.next()
return p
with open(filename, 'rb') as csvfile:
r = csv.reader(csvfile, delimiter=',')
heading = read_heading_from(r)
validate_content_by(heading, expected_format)
return [parse_action(row) for row in r]
class StructuralElement(object):
def __init__(self, name, complexity):
self.name = name
self.complexity = complexity
def parts(self):
return self.name.split('/')
def parse_structural_element(csv_row):
name = csv_row[1][2:]
complexity = csv_row[4]
return StructuralElement(name, complexity)
def parse_author_color(csv_row):
author = csv_row[0]
color = csv_row[1]
return author,color
class Ownership(object):
def __init__(self, module, main_author, ownership):
self.module = module
self.main_author = main_author
self.ownership = ownership
def parse_ownership(csv_row):
module = csv_row[0]
main_author = csv_row[1]
ownership = csv_row[4]
return Ownership(module, main_author,ownership)
######################################################################
## Organizational information to augment the structure
######################################################################
class Knowledge(object):
DEFAULT_COLOR = "black"
def __init__(self, authors_colors, ownerships):
self._authors_colors = authors_colors
self._ownership = dict([(o.module, o) for o in ownerships])
def color_of(self, author):
if author in self._authors_colors:
return self._authors_colors[author]
return self.DEFAULT_COLOR
def owner_of(self, module_name):
if module_name in self._ownership:
o = self._ownership[module_name]
return o.main_author
return None
def degree_of_ownership_for(self, module_name):
if module_name in self._ownership:
o = self._ownership[module_name]
return o.ownership
return 0.0
######################################################################
## Building the structure of the system
######################################################################
def _matching_part_in(hierarchy, part):
return next((x for x in hierarchy if x['name']==part), None)
def _ensure_branch_exists(hierarchy, branch):
existing = _matching_part_in(hierarchy, branch)
if not existing:
new_branch = {'name':branch, 'children':[]}
hierarchy.append(new_branch)
existing = new_branch
return existing
def _add_leaf(hierarchy, module, knowledge, name):
owner = knowledge.owner_of(module.name)
new_leaf = {'name':name, 'children':[],
'size':module.complexity,
'weight':knowledge.degree_of_ownership_for(module.name),
'author_color':knowledge.color_of(owner)}
hierarchy.append(new_leaf)
return hierarchy
def _insert_parts_into(hierarchy, module, knowledge, parts):
""" Recursively traverse the hierarchy and insert the individual parts
of the module, one by one.
The parts specify branches. If any branch is missing, it's
created during the traversal.
The final part specifies a module name (sans its path, of course).
This is where we add size and weight to the leaf.
"""
if len(parts) == 1:
return _add_leaf(hierarchy, module, knowledge, name=parts[0])
next_branch = parts[0]
existing_branch = _ensure_branch_exists(hierarchy, next_branch)
return _insert_parts_into(existing_branch['children'],
module,
knowledge,
parts=parts[1:])
def generate_structure_from(modules, knowledge):
hierarchy = []
for module in modules:
parts = module.parts()
_insert_parts_into(hierarchy, module, knowledge, parts)
structure = {'name':'root', 'children':hierarchy}
return structure
######################################################################
## Output
######################################################################
def write_json(result):
print json.dumps(result)
######################################################################
## Main
######################################################################
def run(args):
authors_colors = dict(parse_csv(args.authors,
expected_format='author,color',
parse_action=parse_author_color))
module_ownership = parse_csv(args.owners,
expected_format='entity,main-dev,added,total-added,ownership',
parse_action=parse_ownership)
structure_input = parse_csv(args.structure,
expected_format='language,filename,blank,comment,code',
parse_action=parse_structural_element)
knowledge = Knowledge(authors_colors, module_ownership)
knowledge_structure = generate_structure_from(structure_input, knowledge)
write_json(knowledge_structure)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generates a JSON document suitable for knowledge diagrams.')
parser.add_argument('--structure', required=True, help='A CSV file generated by cloc')
parser.add_argument('--owners', required=True, help='A CSV file generated by a Code Maat main-dev analysis')
parser.add_argument('--authors', required=True, help='A CSV file specifying the color to use for each author')
args = parser.parse_args()
run(args)
```
#### File: public/scripts/git_interactions.py
```python
import subprocess
import re
def _as_rev_range(start, end):
return start + '..' + end
def _run_git_cmd(git_arguments):
return subprocess.Popen(git_arguments, stdout=subprocess.PIPE).communicate()[0]
def _read_revisions_matching(git_arguments):
git_log = _run_git_cmd(git_arguments)
revs = []
# match a line like: d804759 Documented tree map visualizations
# ignore everything except the commit number:
rev_expr = re.compile(r'([^\s]+)')
for line in git_log.split("\n"):
m = rev_expr.search(line)
if m:
revs.append(m.group(1))
return revs[::-1]
def _git_cmd_for(rev_start, rev_end):
rev_range = rev_start + '..' + rev_end
return ['git', 'log', rev_range, '--oneline']
def read_revs(rev_start, rev_end):
""" Returns a list of all commits in the given range.
"""
return _read_revisions_matching(git_arguments=_git_cmd_for(rev_start, rev_end))
def read_revs_for(file_name, rev_start, rev_end):
return _read_revisions_matching(git_arguments=_git_cmd_for(rev_start, rev_end) + [file_name])
def read_diff_for(rev1, rev2):
return _run_git_cmd(['git', 'diff', rev1, rev2])
def read_file_diff_for(file_name, rev1, rev2):
return _run_git_cmd(['git', 'diff', rev1, rev2, file_name])
def read_version_matching(file_name, rev):
return _run_git_cmd(['git', 'show', rev + ':' + file_name])
```
#### File: public/scripts/proximity.py
```python
import collections
from operator import itemgetter
import desc_stats
## Functions for calculating proximity/distance
def _pdistance(positions):
return sum([j-i for i,j in zip(positions[:-1], positions[1:])])
def calc_proximity(changes):
return dict([(name, _pdistance(change)) for name, change in changes.iteritems()])
def record_change_to(file_name, change, acc):
if not change:
return
existing = []
if file_name in acc:
existing = acc[file_name]
existing.append(change)
acc[file_name]=existing
def _as_stats(all_proximities):
return [desc_stats.DescriptiveStats(name, proximities_for_one) for name, proximities_for_one in all_proximities.iteritems()]
def _group_by(one_file, proximity, all_grouped):
existing = []
if one_file in all_grouped:
existing = all_grouped[one_file]
existing.append(proximity)
return existing
def sum_proximity_stats(all_proximities):
""" Received all proximities as a list of dictionaries.
Each dictionary represents the proximities in the changed
in one revision.
Take this list and group all changes per item.
"""
all_grouped = {}
for one_rev_proximity in all_proximities:
for (one_file, proximity) in one_rev_proximity.iteritems():
all_grouped[one_file] = _group_by(one_file, proximity, all_grouped)
return _as_stats(all_grouped)
def sorted_on_proximity(proximity_stats):
return sorted(proximity_stats, key=lambda p: p.total, reverse=True)
``` |
{
"source": "JosemaAlvarez/MoSQITo",
"score": 3
} |
#### File: functions/oct3filter/square_and_smooth.py
```python
import numpy as np
from scipy import signal
def square_and_smooth(sig, center_freq, fs):
"""3rd order low-pass filtering (See ISO 532-1 section 6.3)
Parameters
----------
sig : numpy.ndarray
time signal sampled at 48 kHz [pa]
coeff : numpy.ndarray
filter coeeficients
gain : float
filter gain
Outputs
-------
signal_filt : numpy.ndarray
filtered time signal
"""
# Frequency dependent time constant
if center_freq <= 1000:
tau = 2 / (3 * center_freq)
else:
tau = 2 / (3 * 1000)
# Squaring
sig = sig**2
# Three smoothing low-pass filters
a1 = np.exp(-1 / (fs * tau))
b0 = 1 -a1
# zi = signal.lfilter_zi([b0], [1 -a1])
for i in range(3):
sig = signal.lfilter([b0], [1, -a1], sig)
return sig
```
#### File: functions/roughness_danielweber/a0_zwicker.py
```python
import numpy as np
def a0tab(bark_axis):
""" Zwicker coefficient for the transmission between outer and inner ear
See <NAME>, <NAME>: Psychoacoustics. Springer,Berlin, Heidelberg, 1990.
The coefficients are linearly interpolated from the values given in figure 8.18
Parameter
---------
bark_axis: numpy.array
frequency axis in Bark
Output
------
a0tab: numpy.array
coefficients a0 along the given bark axis
"""
xp = [0, 10, 12, 13, 14, 15, 16, 16.5, 17, 18, 18.5, 19, 20,
21, 21.5, 22, 22.5, 23, 23.5, 24, 25, 26]
yp = [0, 0, 1.15, 2.31, 3.85, 5.62, 6.92, 7.38, 6.92,4.23,2.31,0,-1.43,
-2.59,-3.57,-5.19, -7.41,-11.3,-20, -40, -130, -999]
a0tab = np.interp(bark_axis,xp,yp)
return a0tab
```
#### File: functions/sharpness/comp_sharpness.py
```python
import sys
sys.path.append('../..')
# Standard library import
import numpy as np
# Local imports
from mosqito.functions.loudness_zwicker.comp_loudness import comp_loudness
from mosqito.functions.sharpness.sharpness_aures import comp_sharpness_aures
from mosqito.functions.sharpness.sharpness_din import comp_sharpness_din
from mosqito.functions.sharpness.sharpness_bismarck import comp_sharpness_bismarck
from mosqito.functions.sharpness.sharpness_fastl import comp_sharpness_fastl
def comp_sharpness(is_stationary, signal, fs, method='din', skip=0):
""" Acoustic sharpness calculation according to different methods:
Aures, <NAME>, DIN 45692, Fastl
Parameters:
----------
is_stationary: boolean
True if the signal is stationary, false if it is time varying
signal: numpy.array
time history values
fs: integer
sampling frequency
method: string
'din' by default,'aures', 'bismarck','fastl'
skip : float
number of second to be cut at the beginning of the analysis
Outputs
------
S : float
sharpness value
"""
if method!= 'din' and method!='aures' and method !='fastl' and method != 'bismarck' :
raise ValueError("ERROR: method must be 'din', 'aures', 'bismarck', 'fastl'")
loudness = comp_loudness(is_stationary, signal, fs)
if method == 'din':
S = comp_sharpness_din(loudness['values'], loudness['specific values'], is_stationary )
elif method == 'aures':
S = comp_sharpness_aures(loudness['values'], loudness['specific values'], is_stationary )
elif method == 'bismarck':
S = comp_sharpness_bismarck(loudness['values'], loudness['specific values'], is_stationary )
elif method == 'fastl':
S = comp_sharpness_fastl(loudness['values'], loudness['specific values'], is_stationary )
if is_stationary == False:
# Cut transient effect
time = np.linspace(0, len(signal/fs, len(S)))
cut_index = np.argmin(np.abs(time - skip))
S = S[cut_index:]
output = {
"name" : "sharpness",
"method" : method,
"values" : S,
"skip" : skip
}
return output
``` |
{
"source": "josemac95/umucv",
"score": 3
} |
#### File: code/bot/bot3.py
```python
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from io import BytesIO
from PIL import Image
import cv2 as cv
import skimage.io as io
updater = Updater('api token del bot')
def sendImage(bot, cid, frame):
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
image = Image.fromarray(frame, mode = 'RGB')
byte_io = BytesIO()
image.save(byte_io, 'PNG')
byte_io.seek(0)
bot.sendPhoto(chat_id=cid, photo=byte_io)
def hello(bot, update):
update.message.reply_text('Hello {}'.format(update.message.from_user.first_name))
def argu(bot, update, args):
print('arguments:')
for arg in args:
print(arg)
def work(bot, update):
file_id = update.message.photo[-1].file_id
path = bot.get_file(file_id)['file_path']
img = io.imread(path)
print(img.shape)
update.message.reply_text('{}x{}'.format(img.shape[1],img.shape[0]))
r = cv.cvtColor(cv.cvtColor(img, cv.COLOR_RGB2GRAY), cv.COLOR_GRAY2RGB)
sendImage(bot, update.message.chat_id, r)
updater.dispatcher.add_handler(CommandHandler('hello', hello))
updater.dispatcher.add_handler(CommandHandler('argu' , argu, pass_args=True))
updater.dispatcher.add_handler(MessageHandler(Filters.photo, work))
updater.start_polling()
updater.idle()
```
#### File: umucv/code/pose3D.py
```python
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtCore, QtGui
import cv2 as cv
import numpy as np
import numpy.linalg as la
from umucv.stream import autoStream
from umucv.htrans import htrans, Pose, kgen, rotation, desp, sepcam, jc, jr, col, row
from umucv.contours import extractContours, redu
def cameraTransf(M):
K,R,C = sepcam(M)
rt = jr(jc(R, -R @ col(C)),
row(0,0,0,1))
return la.inv(rt)
# esquema en 3d de una cámara
def cameraOutline2(f, sc=0.3):
x = 1
y = x
z = f
ps = [ x, y, z,
-x, y, z,
-x,-y, z,
x,-y, z,
x, y, z,
0, 0, 0,
-x, y, z,
-x,-y, z,
0, 0, 0,
x, -y, z ]
ps = np.array(ps).reshape(-1,3)
return sc*ps
def polygons(cs,n,prec=2):
rs = [ redu(c,prec) for c in cs ]
return [ r for r in rs if len(r) == n ]
def rots(c):
return [np.roll(c,k,0) for k in range(len(c))]
def bestPose(K,view,model):
poses = [ Pose(K, v.astype(float), model) for v in rots(view) ]
sp = sorted(poses,key=lambda p: p.rms)
return sp[0]
marker = np.array(
[[0, 0, 0],
[0, 1, 0],
[0.5, 1, 0],
[0.5, 0.5, 0],
[1, 0.5, 0],
[1, 0, 0]])
stream = autoStream()
HEIGHT, WIDTH = next(stream)[1].shape[:2]
size = WIDTH,HEIGHT
print(size)
f = 1.7
K = kgen(size,f) # fov aprox 60 degree
print(K)
## Create a GL View widget to display data
app = QtGui.QApplication([])
win = gl.GLViewWidget()
win.show()
win.setWindowTitle('pose')
win.setCameraPosition(distance=20)
## grid
g = gl.GLGridItem()
win.addItem(g)
ax = gl.GLAxisItem(glOptions='opaque')
ax.setSize(2,2,2)
win.addItem(ax)
ax.setTransform(QtGui.QMatrix4x4(*(rotation((1,0,0),0.0001,homog=True).flatten())))
ax.translate(0,0,-0.02)
axc = gl.GLAxisItem(glOptions='opaque')
axc.setSize(1,1,1)
#axc.translate(0,0,0.02)
win.addItem(axc)
# imagen
view = gl.GLImageItem(data=np.zeros([100,100,4]))
win.addItem(view)
# marker
gmark = gl.GLLinePlotItem(pos=np.vstack([marker,marker[0]]),color=(255,0,0,1),antialias=True,width=3)
gmark.setGLOptions('opaque')
gmark.translate(0,0,0.01)
win.addItem(gmark)
# camera
cam = gl.GLLinePlotItem(pos=np.array([[0,0,0]]),color=(255,255,255,1),antialias=True,width=2)
cam.setGLOptions('opaque')
win.addItem(cam)
camsize = 0.5
drawCam = cameraOutline2(f,camsize)
W2 = WIDTH/2
H2 = HEIGHT/2
A = desp((0,0,f*camsize)) @ np.diag([1,1,1,W2/camsize]) @ desp((-W2,-H2,0))
def img2tex(image):
x = image.transpose(1,0,2)
texture,_ = pg.makeARGB(x, useRGBA=True)
return texture
def update():
key, img = next(stream)
g = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cs = extractContours(g, minarea=5, reduprec=2)
good = polygons(cs,6,3)
poses = []
for c in good:
p = bestPose(K,c,marker)
if p.rms < 2:
poses += [p.M]
cv.polylines(img,[c],True,(255,255,0),3)
if poses:
p = poses[0]
T = cameraTransf(p)
cam.setData(pos= htrans(T, drawCam ) )
view.setData(data=img2tex(img))
m = T @ A
view.setTransform(QtGui.QMatrix4x4(*(m.flatten())))
axc.setTransform(QtGui.QMatrix4x4(*(T.flatten())))
#print(p)
cv.imshow('input', img)
#key = cv.waitKey(1);
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(10)
QtGui.QApplication.instance().exec_()
``` |
{
"source": "josemagalan/UBU-TFG-IG-Study",
"score": 2
} |
#### File: ProyectoUBU/flaskr/views.py
```python
from flask import Blueprint, render_template,request
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html')
@main.route('/sign')
def sign():
return render_template('sign.html')
@main.route('/sign', methods=['POST'])
def sign_post():
name = request.form.get('name')
comment = request.form.get('comment')
return 'Name:{name} Comment:{comment}'
``` |
{
"source": "JoseMan90/sentence-clustering",
"score": 3
} |
#### File: sentence-clustering/scripts/SentenceClusterizer.py
```python
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
import string
from sklearn.decomposition import TruncatedSVD
from hdbscan.hdbscan_ import HDBSCAN
import pandas as pd
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import RandomizedSearchCV
from scripts.DisableCV import DisabledCV
def normalized_tokenizer(text):
"""
Returns the normalized (proprocessed and stemmed) tokens
:param text: sentence
:return: list of tokens
"""
punctuations = dict((ord(char), None) for char in string.punctuation)
stemmer = nltk.stem.snowball.SnowballStemmer("english")
tokens = nltk.word_tokenize(text.lower().translate(punctuations))
tokens = [stemmer.stem(item) for item in tokens]
return tokens
def get_word_vector_matrix(texts, dimensions=10):
"""
Calculates and returns the reduced words vector matrix
:param texts: list of sentences
:param dimensions: dimensions to which the word matrix will be reduced into
:return: Work vector matrix
"""
print("Vectorizing sentences into TF-IDF vectors...")
vectorizer = TfidfVectorizer(tokenizer=normalized_tokenizer)
matrix = vectorizer.fit_transform(texts)
print("Word Vector Matrix : " + str(matrix.shape))
decomposer = TruncatedSVD(n_components=dimensions, n_iter=50, random_state=20)
reduced_matrix = decomposer.fit_transform(matrix)
print(decomposer.explained_variance_ratio_)
return reduced_matrix
def hdb_segment(vector_matrix, min_cluster_size=5, min_samples=2, metric="euclidean", cluster_selection_method="eom"):
"""
Segments the given data using the HDB clustering algorithm
:param vector_matrix:
:param min_cluster_size:
:param min_samples:
:param metric:
:param cluster_seletion_method:
:return: cluster labels
"""
print("Running HDB clustering...")
hdb_algo = HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
hdb_algo.fit(vector_matrix)
scores = pd.DataFrame({"label":hdb_algo.labels_, "probability":hdb_algo.probabilities_})
scores["confident"] = 0
scores.loc[scores["probability"]<0.05, "confident"] = 1
print(scores)
print(scores["confident"].mean())
return hdb_algo.labels_
def hdb_scorer(hdb_algo, X):
"""
Segments the given data using the HDB clustering algorithm
"""
hdb_algo.fit(X)
scores = pd.DataFrame({"label":hdb_algo.labels_, "probability":hdb_algo.probabilities_})
scores["confident"] = 0
scores.loc[scores["probability"]>=0.05, "confident"] = 1
scores.loc[scores["label"] == -1, "confident"] = 0
score = scores["confident"].sum()/scores["label"].count()
print("Returning Score : " + str(score))
return score
def hdb_segment_generalized(matrix, iterations=50):
parameter_grid = {
"min_cluster_size": range(5, 100),
"min_samples": range(2, 10),
"metric": ["euclidean"],
"cluster_selection_method": ["eom", "leaf"],
"allow_single_cluster": [True, False]
}
random_search = RandomizedSearchCV(estimator=HDBSCAN(), param_distributions=parameter_grid,
scoring=hdb_scorer, cv=DisabledCV(), n_jobs=-2, random_state=45,
n_iter=iterations, refit=True)
random_search.fit(matrix)
print(random_search.best_score_)
hdb = random_search.best_estimator_
print(pd.Series(hdb.labels_).value_counts(normalize=True))
return hdb.labels_
``` |
{
"source": "josemanimala/lint-review",
"score": 2
} |
#### File: lintreview/tools/checkstyle.py
```python
from __future__ import absolute_import
import logging
import os
import jprops
import lintreview.docker as docker
from lintreview.review import IssueComment
from lintreview.tools import Tool, process_checkstyle
log = logging.getLogger(__name__)
class Checkstyle(Tool):
"""
Integrates with checkstyle.
When checkstyle is run a properties file will be generated, that
defines the following keys:
- config_loc
- samedir
- project_loc
- basedir
All of these keys will resolve to your repository's root directory.
"""
name = 'checkstyle'
def check_dependencies(self):
"""
See if checkstyle image exists
"""
return docker.image_exists('checkstyle')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext == '.java'
def process_files(self, files):
"""
Run code checks with checkstyle.
Only a single process is made for all files
to save resources.
"""
log.debug('Processing %s files with %s', files, self.name)
if 'config' not in self.options:
msg = ("We could not run `checkstyle` you did not set "
"the `config` option to a valid checkstyle XML file.")
return self.problems.add(IssueComment(msg))
props_path = os.path.join(self.base_path, '_lintreview.properties')
# Close the file before trying to read.
# There have been problems with reading properties while
# the file handle is still open.
with open(props_path, 'w+') as f:
self.setup_properties(f)
properties_filename = os.path.basename(f.name)
command = self.create_command(properties_filename, files)
output = docker.run('checkstyle', command, self.base_path)
# Cleanup the generated properties file.
os.remove(props_path)
# Only one line is generally a config error. Replay the error
# to the user.
lines = output.strip().split('\n')
# Checkstyle >=8.28 outputs non-xml summary data at the beginning.
if lines[0].startswith('Checkstyle ends with'):
lines = lines[1:]
if not lines[0].startswith('<'):
msg = ("Running `checkstyle` failed with:\n"
"```\n"
"%s\n"
"```\n"
"Ensure your config file exists and is valid XML.")
return self.problems.add(IssueComment(msg % (lines[0],)))
# Remove the last line if it is not XML
# Checkstyle may output text after the XML if there are errors.
if not lines[-1].strip().startswith('<'):
lines = lines[0:-1]
output = ''.join(lines)
process_checkstyle(self.problems, output, docker.strip_base)
def setup_properties(self, properties_file):
config_loc = os.path.dirname(docker.apply_base(self.options['config']))
project_loc = docker.apply_base('/')
properties = {
'config_loc': config_loc,
'samedir': config_loc,
'project_loc': project_loc,
'basedir': project_loc,
}
jprops.store_properties(properties_file, properties)
def create_command(self, properties_filename, files):
command = [
'checkstyle',
'-f', 'xml',
'-p', docker.apply_base(properties_filename),
'-c', docker.apply_base(self.options['config'])
]
command += files
return command
```
#### File: lintreview/tools/remarklint.py
```python
from __future__ import absolute_import
import logging
import os
import re
import lintreview.docker as docker
from lintreview.tools import Tool
log = logging.getLogger(__name__)
# matches: ' 1:4 warning Incorrect list-item indent: add 1 space list-item-indent remark-lint'
# matches: ' 18:71-19:1 error Missing new line after list item list-item-spacing remark-lint',
warning_pattern = re.compile(r'^ +(?P<line>\d+):(\d+)(-(\d+):(\d+))? (?P<text>.+)$')
filename_pattern = re.compile(r'^[\S]+.*$')
class Remarklint(Tool):
name = 'remarklint'
def check_dependencies(self):
"""See if the node image exists
"""
return docker.image_exists('nodejs')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ('.md', '.markdown')
def process_files(self, files):
"""
Run code checks with pep8.
Only a single process is made for all files
to save resources.
"""
log.debug('Processing %s files with %s', files, self.name)
command = self.create_command()
command += map(lambda f: docker.apply_base(f), files)
output = docker.run('nodejs', command, source_dir=self.base_path)
if not output:
return False
output = output.split("\n")
filename = None
# The output from remarklint is a unique format that looks like:
#
# >>> file.md
# >>> 1:1-1:8 warning Some warning
#
# We inspect each line to determine if it is a file or warning.
for line in output:
if filename_pattern.match(line):
# Remove the base path as remarklint is fed absolute paths.
filename = docker.strip_base(line)
else:
match = warning_pattern.match(line)
if match:
line = match.group('line')
text = match.group('text')
self.problems.add(filename, line, text)
def has_fixer(self):
"""
remarklint has a fixer that can be enabled through configuration.
"""
return bool(self.options.get('fixer', False))
def process_fixer(self, files):
"""Run remarklint, in fixer mode.
"""
command = self.create_fixer_command(files)
docker.run('nodejs', command, source_dir=self.base_path)
def create_command(self):
# Use the wrapper script for remarklint. See docker/run-remark.sh
# for more information.
return ['run-remark', '--no-stdout', '--no-color']
def create_fixer_command(self, files):
command = self.create_command()
command += map(lambda f: docker.apply_base(f), files)
command.append('-o')
return command
```
#### File: fixtures/flake8/no_errors.py
```python
def does_something(thing):
"""Do a thing and then return."""
return thing.buzz()
```
#### File: fixtures/pytype/has_errors.py
```python
import re
def get_username(email_address):
match = re.match(r'([^@]+)@example\.com', email_address)
return match.group(1)
class Foo(object):
__slots__ = (1, 2, 3)
def error(self):
x = {}
y = x["y"]
return y
```
#### File: lint-review/tests/test_config.py
```python
from __future__ import absolute_import
from unittest import TestCase
from lintreview.config import build_review_config, get_lintrc_defaults
from lintreview.config import load_config, ReviewConfig
sample_ini = """
[files]
ignore = test/CodeStandards/test/**
vendor/**
[tools]
linters = phpcs, pep8, jshint
[tool_jshint]
config = ./jshint.json
[tool_phpcs]
standard = test/CodeStandards
config = test/phpcs.xml
"""
defaults_ini = """
[tool_jshint]
config = /etc/jshint.json
"""
simple_ini = """
[tools]
linters = jshint
"""
review_ini = """
[tools]
linters = jshint
[review]
summary_comment_threshold = 25
fail_on_comments = False
apply_label_on_pass = lint ok
"""
fixer_ini = """
[tools]
linters = phps
[fixers]
enable = True
workflow = pull_request
"""
bad_ini = """
[herp]
derp=derplily
"""
# Simulate the application config
app_config = {
'SUMMARY_THRESHOLD': 100,
'OK_LABEL': 'no lint',
'PULLREQUEST_STATUS': True
}
class ReviewConfigTest(TestCase):
@staticmethod
def test_load_config():
res = load_config()
assert res['GITHUB_URL'].endswith, 'Exists and is stringy'
@staticmethod
def test_get_lintrc_defaults():
config = load_config()
res = get_lintrc_defaults(config)
assert res is None
def test_build_review_config(self):
config = build_review_config(sample_ini, {})
assert isinstance(config, ReviewConfig)
self.assertEqual(3, len(config.linters()))
def test_linter_listing_bad(self):
config = build_review_config(bad_ini)
res = config.linters()
self.assertEqual(res, [])
def test_linter_listing(self):
config = build_review_config(sample_ini)
res = config.linters()
expected = ['phpcs', 'pep8', 'jshint']
self.assertEqual(sorted(res), sorted(expected))
def test_linter_config_bad(self):
config = build_review_config(bad_ini)
res = config.linter_config('phpcs')
self.assertEqual(res, {})
def test_linter_config(self):
config = build_review_config(sample_ini)
res = config.linter_config('phpcs')
expected = {
'standard': 'test/CodeStandards',
'config': 'test/phpcs.xml'
}
self.assertEqual(res, expected)
res = config.linter_config('not there')
self.assertEqual(res, {})
def test_ignore_patterns(self):
config = build_review_config(sample_ini)
res = config.ignore_patterns()
expected = ['test/CodeStandards/test/**', 'vendor/**']
self.assertEqual(res, expected)
def test_ignore_patterns_missing(self):
config = ReviewConfig()
res = config.ignore_patterns()
self.assertEqual(res, [])
def test_load_ini__override(self):
config = ReviewConfig()
config.load_ini(defaults_ini)
config.load_ini(sample_ini)
res = config.linter_config('jshint')
expected = {
'config': './jshint.json',
}
self.assertEqual(res, expected)
def test_load_ini__multiple_merges_settings(self):
config = ReviewConfig()
config.load_ini(defaults_ini)
config.load_ini(simple_ini)
res = config.linter_config('jshint')
expected = {
'config': '/etc/jshint.json',
}
self.assertEqual(res, expected)
def test_fixers_enabled(self):
config = build_review_config(sample_ini)
self.assertEqual(False, config.fixers_enabled())
config = build_review_config(fixer_ini)
self.assertEqual(True, config.fixers_enabled())
def test_fixer_workflow(self):
config = build_review_config(sample_ini)
self.assertEqual('commit', config.fixer_workflow())
config = build_review_config(fixer_ini)
self.assertEqual('pull_request', config.fixer_workflow())
def test_getitem(self):
config = build_review_config(simple_ini, app_config)
self.assertEqual(app_config['SUMMARY_THRESHOLD'],
config['SUMMARY_THRESHOLD'])
def test_getitem__error(self):
config = build_review_config(simple_ini, app_config)
with self.assertRaises(KeyError):
config['UNKNOWN']
def test_get(self):
config = build_review_config(simple_ini, app_config)
self.assertEqual(app_config['SUMMARY_THRESHOLD'],
config.get('SUMMARY_THRESHOLD'))
self.assertEqual(None, config.get('unknown'))
self.assertEqual('default', config.get('unknown', 'default'))
def test_summary_threshold__undefined(self):
config = build_review_config(simple_ini)
self.assertEqual(None, config.summary_threshold())
def test_summary_threshold__app_config(self):
config = build_review_config(simple_ini, app_config)
self.assertEqual(app_config['SUMMARY_THRESHOLD'],
config.summary_threshold())
def test_summary_threshold__job_config(self):
config = build_review_config(review_ini, app_config)
self.assertEqual(25, config.summary_threshold())
def test_passed_review_label__undefined(self):
config = build_review_config(simple_ini)
self.assertEqual(None, config.passed_review_label())
def test_passed_review_label__app_config(self):
config = build_review_config(simple_ini, app_config)
self.assertEqual('no lint', config.passed_review_label())
def test_passed_review_label__job_config(self):
config = build_review_config(review_ini, app_config)
self.assertEqual('lint ok', config.passed_review_label())
def test_failed_review_status__undefined(self):
config = build_review_config(simple_ini)
self.assertEqual('failure', config.failed_review_status())
def test_failed_review_status__app_config(self):
config = build_review_config(simple_ini, {'PULLREQUEST_STATUS': True})
self.assertEqual('failure', config.failed_review_status())
config = build_review_config(simple_ini, {'PULLREQUEST_STATUS': False})
self.assertEqual('success', config.failed_review_status())
def test_failed_review_status__job_config(self):
config = build_review_config(review_ini, app_config)
self.assertEqual('success', config.failed_review_status())
ini = "[review]\nfail_on_comments = true"
config = build_review_config(ini, app_config)
self.assertEqual('failure', config.failed_review_status())
```
#### File: tests/tools/test_rubocop.py
```python
from __future__ import absolute_import
from unittest import TestCase
from lintreview.review import Comment, Problems
from lintreview.tools.rubocop import Rubocop
from tests import (
root_dir, read_file, read_and_restore_file, requires_image
)
class TestRubocop(TestCase):
fixtures = [
'tests/fixtures/rubocop/no_errors.rb',
'tests/fixtures/rubocop/has_errors.rb',
]
def setUp(self):
self.problems = Problems()
self.tool = Rubocop(self.problems, {}, root_dir)
def test_match_file(self):
self.assertFalse(self.tool.match_file('test.py'))
self.assertFalse(self.tool.match_file('dir/name/test.py'))
self.assertTrue(self.tool.match_file('test.rb'))
self.assertTrue(self.tool.match_file('dir/name/test.rb'))
@requires_image('ruby2')
def test_process_files__one_file_pass(self):
self.tool.process_files([self.fixtures[0]])
self.assertEqual([], self.problems.all(self.fixtures[0]))
@requires_image('ruby2')
def test_process_files__one_file_fail(self):
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
long_line = problems[1]
assert long_line.filename == self.fixtures[1]
assert long_line.line == 3
assert long_line.position == 3
assert 'C: Line is too long. [82/80]' in long_line.body
@requires_image('ruby2')
def test_process_files_two_files(self):
self.tool.process_files(self.fixtures)
linty_filename = self.fixtures[1]
self.assertEqual(3, len(self.problems.all(linty_filename)))
freshly_laundered_filename = self.fixtures[0]
self.assertEqual([], self.problems.all(freshly_laundered_filename))
@requires_image('ruby2')
def test_process_files_one_file_fail_display_cop_names(self):
options = {
'display_cop_names': 'True',
}
self.tool = Rubocop(self.problems, options, root_dir)
linty_filename = self.fixtures[1]
self.tool.process_files([linty_filename])
problems = self.problems.all(linty_filename)
long_line = problems[1]
assert long_line.filename == linty_filename
assert long_line.line == 3
assert long_line.position == 3
assert 'C: Layout/LineLength: Line is too long. [82/80]' in long_line.body
@requires_image('ruby2')
def test_process_files_one_file_fail_display_cop_names__bool(self):
options = {
'display_cop_names': True,
}
self.tool = Rubocop(self.problems, options, root_dir)
linty_filename = self.fixtures[1]
self.tool.process_files([linty_filename])
problems = self.problems.all(linty_filename)
long_line = problems[1]
assert long_line.filename == linty_filename
assert long_line.line == 3
assert long_line.position == 3
assert 'C: Layout/LineLength: Line is too long. [82/80]' in long_line.body
@requires_image('ruby2')
def test_process_files__invalid_rubocop_yml(self):
self.tool.process_files(['tests/fixtures/rubocop/badconfig/has_errors.rb'])
problems = self.problems.all()
assert 1 == len(problems)
assert 'Your rubocop configuration' in problems[0].body
assert 'expected key while parsing' in problems[0].body
def test_has_fixer__not_enabled(self):
tool = Rubocop(self.problems, {}, root_dir)
self.assertEqual(False, tool.has_fixer())
def test_has_fixer__enabled(self):
tool = Rubocop(self.problems, {'fixer': True}, root_dir)
self.assertEqual(True, tool.has_fixer())
@requires_image('ruby2')
def test_execute_fixer(self):
tool = Rubocop(self.problems, {'fixer': True}, root_dir)
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
updated = read_and_restore_file(self.fixtures[1], original)
assert original != updated, 'File content should change.'
self.assertEqual(0, len(self.problems.all()),
'No errors should be recorded')
@requires_image('ruby2')
def test_execute_fixer__fewer_problems_remain(self):
tool = Rubocop(self.problems, {'fixer': True}, root_dir)
# The fixture file can have all problems fixed by rubocop
original = read_file(self.fixtures[1])
tool.execute_fixer(self.fixtures)
tool.process_files(self.fixtures)
read_and_restore_file(self.fixtures[1], original)
self.assertEqual(1, len(self.problems.all()),
'Most errors should be fixed')
self.assertIn('too long', self.problems.all()[0].body)
```
#### File: tests/tools/test_shellcheck.py
```python
from __future__ import absolute_import
from unittest import TestCase
from lintreview.review import Problems, Comment
from lintreview.tools.shellcheck import Shellcheck
from tests import root_dir, requires_image
class Testshellcheck(TestCase):
fixtures = [
'tests/fixtures/shellcheck/no_errors.sh',
'tests/fixtures/shellcheck/has_errors.sh',
]
def setUp(self):
self.problems = Problems()
self.tool = Shellcheck(self.problems, {}, root_dir)
def test_match_file(self):
self.assertTrue(self.tool.match_file('test.bash'))
self.assertTrue(self.tool.match_file('test.zsh'))
self.assertTrue(self.tool.match_file('test.ksh'))
self.assertTrue(self.tool.match_file('test.sh'))
self.assertTrue(self.tool.match_file('dir/name/test.sh'))
self.assertFalse(self.tool.match_file('dir/name/test.py'))
self.assertFalse(self.tool.match_file('test.py'))
self.assertFalse(self.tool.match_file('test.js'))
def test_match_file__executable(self):
res = self.tool.match_file('tests/fixtures/shellcheck/tool')
self.assertTrue(res)
@requires_image('shellcheck')
def test_check_dependencies(self):
self.assertTrue(self.tool.check_dependencies())
@requires_image('shellcheck')
def test_process_files__one_file_pass(self):
self.tool.process_files([self.fixtures[0]])
self.assertEqual([], self.problems.all(self.fixtures[0]))
@requires_image('shellcheck')
def test_process_files__one_file_fail(self):
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
self.assertEqual(3, len(problems))
fname = self.fixtures[1]
expected = Comment(
fname,
3,
3,
'a is referenced but not assigned.\nDouble quote to prevent '
'globbing and word splitting.')
self.assertEqual(expected, problems[0])
expected = Comment(
fname,
4,
4,
'BASE appears unused. Verify it or export it.\n'
'Use $(..) instead of legacy \`..\`.')
self.assertEqual(expected, problems[1])
expected = Comment(
fname,
6,
6,
("The order of the 2>&1 and the redirect matters. "
"The 2>&1 has to be last."))
self.assertEqual(expected, problems[2])
@requires_image('shellcheck')
def test_process_files_two_files(self):
self.tool.process_files(self.fixtures)
self.assertEqual([], self.problems.all(self.fixtures[0]))
problems = self.problems.all(self.fixtures[1])
self.assertEqual(3, len(problems))
@requires_image('shellcheck')
def test_process_files_with_config(self):
config = {
'shell': 'bash',
'exclude': 'SC2154,SC2069'
}
tool = Shellcheck(self.problems, config, root_dir)
tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
self.assertEqual(2, len(problems),
'Changing standards changes error counts')
``` |
{
"source": "josemanuel179/practica3IA",
"score": 3
} |
#### File: src/dominio/transformacion_fichero.py
```python
import shutil
import os
def copiar_file_str(file_name, string):
try:
f = open(file_name, "a")
f.write(string)
except:
print("Error al copiar string en el fichero")
def copiar_file(file_name):
try:
shutil.copyfile(file_name, "../test_data/test.arff")
except:
print("Error al copiar el fichero en './test_data/test.arff'")
def borrar_file(file_name):
try:
os.remove(file_name)
except:
print("Error al eliminar fichero")
```
#### File: src/interfaz/interfaz.py
```python
class Interfaz():
def __init__(self):
raiz = Tk()
raiz.geometry('300x200')
raiz.configure(bg = 'white')
raiz.title('Weka')
ttk.Button(raiz, text='Salir', command=raiz.destroy).pack(side=BOTTOM)
raiz.mainloop()
``` |
{
"source": "jose-manuel/npfc",
"score": 2
} |
#### File: lib/npfc/standardize.py
```python
import logging
from collections import Counter
from copy import deepcopy
from more_itertools import intersperse
import pkg_resources
# data handling
import json
import pandas as pd
from pandas import DataFrame
# chemoinformatics
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Mol
from rdkit.Chem import rdinchi
from rdkit.Chem import Descriptors
from rdkit.Chem import rdmolops
from rdkit.Chem.MolStandardize.metal import MetalDisconnector
from rdkit.Chem.MolStandardize.charge import Uncharger
from rdkit.Chem.MolStandardize.normalize import Normalizer
from rdkit.Chem.MolStandardize.tautomer import TautomerCanonicalizer
from rdkit.Chem.Scaffolds import MurckoScaffold
# graph
from networkx import Graph
# docs
from typing import Union
from typing import List
# dev library
from npfc.draw import depict_mol
from npfc import utils
from npfc.filter import Filter
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GLOBALS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
DEFAULT_ELEMENTS = {'H', 'B', 'C', 'N', 'O', 'F', 'P', 'S', 'Cl', 'Br', 'I'}
# DEFAULT_PROTOCOL = {'tasks': ['filter_empty',
# 'disconnect_metal',
# 'clear_mixtures',
# 'deglycosylate',
# 'filter_num_heavy_atom',
# 'filter_molecular_weight',
# 'filter_num_ring',
# 'filter_elements',
# 'clear_isotopes',
# 'normalize',
# 'uncharge',
# 'canonicalize',
# 'clear_stereo',
# ],
# 'filter_num_heavy_atom': 'num_heavy_atom > 3',
# 'filter_molecular_weight': 'molecular_weight <= 1000.0',
# 'filter_num_ring': 'num_ring > 0',
# 'filter_elements': f'elements in {", ".join(str(x) for x in DEFAULT_ELEMENTS)}',
# 'filter_unwanted: c1ccccc1',
# }
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CLASSES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class FullUncharger(Uncharger):
"""A class derived from rdkit.Chem.MolStandardize.charge.Uncharger, so
instead of attempting to create zwitterions all possible charges are removed
from the molecule.
For instance:
>>> # Uncharger:
>>> [O-][N+](C)(C)C[O-] -> [O-][N+](C)(C)CO
>>> # FullUncharger:
>>> [O-][N+](C)(C)C[O-] -> O[N+](C)(C)CO
"""
def __init__(self):
"""Create an instance of FullUncharger.
.. todo:: This will remove charges from -2 to +2 only. This could be improved using more general smarts?
"""
# some smarts to use to find charges
self.q_pos_1 = Chem.MolFromSmarts("[*;+]")
self.q_pos_2 = Chem.MolFromSmarts("[*;+2]")
self.q_neg_1 = Chem.MolFromSmarts("[*;-]")
self.q_neg_2 = Chem.MolFromSmarts("[*;-2]")
logging.debug("Initialized a new FullUncharger object")
def full_uncharge(self, mol: Mol) -> Mol:
"""Neutralize molecule by adding/removing hydrogens.
Does not attempt to preserve zwitterions.
For now takes into account only charges of -2 and +2.
:param mol: the input molecule
:return: the uncharged molecule
"""
logging.debug("Uncharging a molecule")
mol = deepcopy(mol)
# Get atom ids for matches
p = [x[0] for x in mol.GetSubstructMatches(self.q_pos_1)] # +1
p += [x[0] for x in mol.GetSubstructMatches(self.q_pos_2)] # +2
n = [x[0] for x in mol.GetSubstructMatches(self.q_neg_1)] # -1
n += [x[0] for x in mol.GetSubstructMatches(self.q_neg_2)] # -2
# remove positive charges
for atom in [mol.GetAtomWithIdx(x) for x in p]:
# Remove hydrogen and reduce formal change until neutral or no more hydrogens
while atom.GetFormalCharge() > 0 and atom.GetNumExplicitHs() > 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() - 1)
atom.SetFormalCharge(atom.GetFormalCharge() - 1)
# remove negative charges
for atom in [mol.GetAtomWithIdx(x) for x in n]:
# Add hydrogen and increase formal change until neutral
while atom.GetFormalCharge() < 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
# clean-up
mol.ClearComputedProps()
mol.UpdatePropertyCache()
Chem.GetSymmSSSR(mol)
# mol.ClearComputedProps() # not testes but might solved the -O-H2 issue
# mol.UpdatePropertyCache()
return mol
class Standardizer(Filter):
"""A class for standardizing molecular structures. The standardization itself is based
on a protocol that the user can modify.
By default this protocol consists in 15 tasks applied to each molecule invidually:
1) **initiate_mol**: check if the molecule passed the RDKit conversion
2) **filter_empty**: filter molecules with empty structures
3) **disconnect_metal**: break bonds involving metallic atoms, resulting in potentially several molecules per structure.
4) **clear_mixtures**: retrieve only the "best" molecule from a mixture, which might not always be the largest one.
5) **deglycosylate**: remove all external sugars-like rings from the molecule and return the remaining non-linear entity.
6) **filter_num_heavy_atom**: filter molecules with a heavy atom count not in the accepted range. By default: num_heavy_atom > 3.
7) **filter_molecular_weight**: filter molecules with a molecular weight not in the accepted range. By default: molecular_weight <= 1000.0.
8) **filter_num_ring**: filter molecules with a number of rings (Smallest Sets of Smallest Rings or SSSR) not in the accepted range. By default: num_ring > 0.
9) **filter_elements**: filter molecules with elements not considered as medchem. By default: elements in H, B, C, N, O, F, P, S, Cl, Br, I.
10) **clear_isotopes**: set all atoms to their most common isotope (i.e. 14C becomes 12C which is C).
11) **normalize**: always write the same functional groups in the same manner.
12) **uncharge**: remove all charges on a molecule when it is possible. This is different from rdkit.Chem.MolStandardize.charge module as there is no attempt for reaching the zwitterion.
13) **canonicalize**: enumerate the canonical tautomer.
14) **clear_stereo**: remove all remaining stereochemistry flags on the molecule.
15) **reset_mol**: convert forth and back to SMILES format to discard potential residual outdated flags on atoms and bonds.
Other steps are not part of this protocol but can be executed as well for convenience:
- **depict**: find the "best" possible 2D depiction of the molecule among Input/rdDepictor/Avalon/CoordGen methods
- **extract_murcko**: return the Murcko Scaffold from the molecule
- **clear_side_chains**: remove any exocyclic atom that is not part of a linker
- **reset_mol**: reset the molecule by converting to and then from smiles
This results in new columns in the input DataFrame:
- the 'mol' column: updated structure (only for the protocol)
- the 'status' column: either passed, filtered or error.
- the 'task' column: the latest task that was applied to the molecule.
The standardizer works either on a molecule (method: 'run') or on a DataFrame containing molecules ('run_df').
In the latter case, the inchikey is computed and can be used for identifying duplicate entries.
A timeout value is set by default and will be applied to each molecule individually to avoid the process being stuck on marginally difficult cases.
This value can be set either during the Standardizer object initialization or by defining as an option in the protocol (priority is given to the latter if defined).
"""
def __init__(self,
protocol: str = None,
col_mol: str = 'mol',
col_id: str = 'idm',
elements_medchem: set = DEFAULT_ELEMENTS,
timeout: int = 10,
):
"""Create a Standardizer object.
:param protocol: Either a JSON file or a dictionary. The resultung dictinary needs a 'tasks' key that lists all tasks to be excuted as a list.
:param col_mol: the column with the molecule for when running the run_df method
:param col_id: the column with the id for when running the run_df method
:param filter_duplicates:
"""
# filter
super(Standardizer, self).__init__()
# standardizer
self._elements_medchem = elements_medchem
self._col_id = col_id
self._col_mol = col_mol
if protocol is None:
self._protocol = json.load(open(pkg_resources.resource_filename('npfc', 'data/std_mols.json'), 'r'))
else:
if isinstance(protocol, str):
self._protocol = json.load(open(protocol, 'r'))
else:
self._protocol = protocol
# workers
self.metal_disconnector = MetalDisconnector()
self.normalizer = Normalizer()
self.full_uncharger = FullUncharger()
self.canonicalizer = TautomerCanonicalizer()
# display information on protocol
if logging.getLogger().level == logging.DEBUG:
logging.debug("Successfully instanciated a Standardizer object with protocol:")
[logging.debug("Task #%s: %s", str(i+1).zfill(2), task) for i, task in enumerate(self._protocol['tasks'])]
[logging.debug("Option %s %s", opt, value) for opt, value in self._protocol.items() if opt != 'tasks']
def __repr__(self):
return f"Standardizer ({len(self._protocol['tasks'])} tasks)"
def describe(self):
# init
pad = max(len(x) for x in self._protocol['tasks'])
head = 'STANDARIDZER={\n'
tail = '\n}'
# define a list of tasks with options in parenthesis
tasks = list(self._protocol['tasks'])
for i, task in enumerate(tasks):
if task in self._protocol.keys():
opt = self._protocol[task].replace(task.replace('filter_', ''), 'x')
tasks[i] = f"{task} ({opt})"
# concatenate all parts and intersperse the tasks with bottow arrows, with step index on the left
return head + '\n'.join(intersperse('↓'.center(pad+10), [str(i+1).zfill(2).ljust(5) + x.center(pad) for i, x in enumerate(tasks)])) + tail
@property
def protocol(self):
return self._protocol
@protocol.setter
def protocol(self, protocol: str):
# input is a json file => convert it to a dict
if isinstance(protocol, str):
utils.check_arg_config_file(protocol)
with open(protocol) as f:
protocol = json.load(f)
# input is a dict
if isinstance(protocol, dict):
if 'tasks' not in protocol.keys():
raise ValueError("invalid protocol format (no 'tasks' key found)")
elif not isinstance(protocol['tasks'], list) and not isinstance(protocol['tasks'], tuple):
raise ValueError("invalid protocol format ('tasks' key is neither list or tuple)")
# update default protocol
self._protocol.update(protocol)
@property
def timeout(self) -> str:
return self._timeout
@timeout.setter
def timeout(self, value: int) -> None:
if not isinstance(value, int):
raise ValueError(f"Error! timeout should be a positive int (>1), not '{type(value)}'.")
elif value < 1:
raise ValueError(f"Error! timeout should be superior to 1 ({value})")
self._col_id = value
@property
def col_id(self) -> str:
return self._col_id
@col_id.setter
def col_id(self, value: str) -> None:
if value is None:
raise ValueError(f"Error! col_id cannot be '{value}'.")
self._col_id = value
@property
def col_mol(self) -> str:
return self._col_mol
@col_mol.setter
def col_mol(self, value: str) -> None:
if value is None:
raise ValueError(f"Error! col_mol cannot be '{value}'.")
self._col_mol = value
@property
def elements_medchem(self) -> set:
return self._elements_medchem
@elements_medchem.setter
def elements_medchem(self, value: set) -> None:
if not isinstance(value, set):
raise ValueError(f"Error! elements_medchem should be a set of strings, not '{value}' ({type(value)}).")
elif not all([isinstance(v, str) for v in value]):
raise ValueError(f"Error! elements_medchem should be a set of strings, not '{value}' ({type(value)}).")
self._elements_medchem = value
def clear_isotopes(self, mol: Mol) -> Mol:
"""Return a molecule without any isotopes.
:param mol: the input molecule
:return: the molecule without isotope
"""
mol = Mol(mol)
for a in mol.GetAtoms():
a.SetIsotope(0)
return mol
def clear_mixtures(self, mol: Mol) -> Mol:
"""Return the "best" molecule found in a molecular structure.
The "best" molecule is determined by the following criteria, sorted by priority:
1) contains only medchem elements
2) contains at least one ring
3) has the largest molecular weight of the mixture
To summarize:
.. math::
medchem > non linear > molecular weight
So the largest molecule of a mixture might not always be selected, for instance
a very long aliphatic chain would be dismissed to keep a benzene molecule instead.
This is implemented in such a way because our fragments used for substructure search contain at least one ring.
On the contrary, this long aliphatic chain would be kept in a mixture with a non-medchem molecule.
:param mol: the input molecule(s)
:return: the best molecule
"""
submols = Chem.GetMolFrags(mol, asMols=True, sanitizeFrags=True)
# no need to look further if we have only one submol!
if len(submols) < 2:
return mol
# otherwise, we have to compare the submols
# init
logging.debug("found %s submols", len(submols))
best_molecular_weight = -1.0 # so we are sure to update this on the first iteration
best_submol = None
best_is_medchem = False
best_is_non_linear = False
# begin
for i, submol in enumerate(submols):
# is_medchem
is_medchem = self.filter_mol(submol, f'elements in {", ".join(str(x) for x in self.elements_medchem)}')
is_non_linear = self.filter_mol(submol, "num_rings > 0")
# molecular_weight
molecular_weight = Descriptors.ExactMolWt(submol)
logging.debug("submol #%s: IM=%s, INL=%s, MW=%s", i, is_medchem, is_non_linear, molecular_weight)
# compare to the current best fragment
update_best = False
compute_diff = False # check which
# 2 criteria more important than molecular weight: is_medchem > is_non_linear
if not best_is_medchem and is_medchem:
update_best = True
elif best_is_medchem and not is_medchem:
continue
elif not best_is_medchem and not is_medchem:
if not best_is_non_linear and is_non_linear:
update_best = True
elif best_is_non_linear and not is_non_linear:
continue
else:
compute_diff = True
else: # best_is_medchem and is_medchem
if not best_is_non_linear and is_non_linear:
update_best = True
elif best_is_non_linear and not is_non_linear:
continue
else:
compute_diff = True
# check molecular_weights only in case of doubt
if not update_best and compute_diff and molecular_weight > best_molecular_weight:
update_best = True
# update best with the properties of the current mol
if update_best:
best_is_medchem = is_medchem
best_is_non_linear = is_non_linear
best_submol = submol
best_molecular_weight = molecular_weight
return best_submol
def clear_side_chains(self, mol: Mol, debug: bool = False) -> Mol:
"""Clear the side chains of a molecule.
This method operates in 3 steps:
1. Remove quickly all atoms in side chains but the one attached to a ring, starting from the terminal atom. (would certainly fail in case of linear molecules)
2. Iterate over each remaining exocyclic atoms to remove only atoms when it does not break the ring aromaticity. Simple and double bonds can be broken and the atoms in rings which were attached to removed atoms are neutralized.
3. Remove eventual nitrogen radicals by Smiles editing.
.. warning:: I found only nitrogen radicals in my dataset, this might be insufficient on a larger scale.
.. warning:: I found a bug for this molecule 'O=C(O)C1OC(OCC2OC(O)C(O)C(O)C2O)C(O)C(O)C1O', where a methyl remains after processing.
:param mol: the molecule to simplify
:return: a simplified copy of the molecule
"""
# 1st peeling: fast, chunks of terminal chains
smarts = Chem.MolFromSmarts('[!#1;R0;D1]~[!#1;R0;D{1-2}]') # terminal exocyclic atom linked to another exocyclic atom, neighbour atom is not allowed more than 2 degrees, so branches (i.e. CC(=O)C) are not cut out
while mol.HasSubstructMatch(smarts):
mol = Chem.DeleteSubstructs(mol, smarts)
mol.ClearComputedProps()
mol.UpdatePropertyCache()
Chem.GetSymmSSSR(mol)
# 2nd peeling: slow, atom per atom of the remaining termninal atoms
rwmol = Chem.RWMol(mol)
smarts = Chem.MolFromSmarts('[!#1;R0;D1]') # remaining terminal exocyclic atoms require cautious handling
matches = sorted([item for sublist in rwmol.GetSubstructMatches(smarts) for item in sublist], reverse=True) # reverse order so that remaining atom indices from matches are still valid after removing an atom
for m in matches:
try:
m = m[0] # should be only single atoms
rwmol_tmp = deepcopy(rwmol)
neighbor = rwmol_tmp.GetAtomWithIdx(m).GetNeighbors()[0] # terminal atom so only 1 neighbor
rwmol_tmp.RemoveAtom(m)
neighbor.SetFormalCharge(0) # neutralize in case of previously quaternary nitrogens
neighbor.SetNumRadicalElectrons(0) # remove radicals,this does not work as expected
Chem.SanitizeMol(rwmol_tmp) # will fail in case of break in aromaticity
rwmol = rwmol_tmp # if it went ok
except Chem.rdchem.KekulizeException:
pass # we should not have tried to remove this atom, so just leave it be
# I could not figure out how to remove radicals, so I just convert the mol to Smiles and edit the string
return Chem.MolFromSmiles(Chem.MolToSmiles(rwmol).replace('[N]', 'N').replace('[n]', 'n'))
def _is_sugar_like(self, ring_aidx: list, mol: Mol):
"""Indicate whether a ring (defined by its atom indices) in a molecule is sugar-like or not.
Several conditions are to be met for a ring to be considered sugar-like:
1. size: either 5 or 6 atoms
2. elements: 1 oxygen and the rest carbons
3. hybridization: ring atoms need have single bonds only
4. connection points (next to the ring oxygen): at least 1 has an oxygen as neighbor
5. subsituents (not next tot the ring oxygen): at least 1/2 (for 5-6-membered rings) have an oxygen as neighbor
:param ring_aidx: the molecule indices of the ring to investigate
:param mol: the molecule that contain the ring
:return: True if the ring complies to the 5 conditions above, False otherwise.
"""
# ring size: only 5-6 membered rings, rings are already fused when this function is called
ring_size = len(ring_aidx)
if ring_size != 5 and ring_size != 6:
return False
# access the actual atom objects quickier
ring_atoms = [mol.GetAtomWithIdx(x) for x in ring_aidx] # ring atoms are in the same order as ring_aidx
# atom composition
elements = [x.GetAtomicNum() for x in ring_atoms]
element_counter = Counter(elements)
if not ((ring_size == 5 and element_counter[6] == 4 and element_counter[8] == 1) or (ring_size == 6 and element_counter[6] == 5 and element_counter[8] == 1)):
return False
# hybridization of carbon atoms (check if only single bonds attached to the ring)
carbon_atoms = [x for x in ring_atoms if x.GetAtomicNum() == 6]
if any([x for x in carbon_atoms if x.GetHybridization() != 4]): # to check if no H attached in case of the * position
return False
# to define connection points and substituents, we first need to identify the position of the ring oxygen
oxygen_aidx = [x for x in ring_atoms if x not in carbon_atoms][0].GetIdx() # only 1 oxygen in ring
# connection points: 1 need at least 1 oxygen as neighbor
cps = []
cps_ok = False
for carbon_atom in carbon_atoms:
neighbors = carbon_atom.GetNeighbors()
# if the ring oxygen is next to this atom, this atom is a connection point
if any([n.GetIdx() == oxygen_aidx for n in neighbors]):
cps.append(carbon_atom)
# at least 1 of the connection points has to have an oxygen as side chain
if any([n.GetAtomicNum() == 8 and n.GetIdx() != oxygen_aidx for n in neighbors]):
cps_ok = True
if not cps_ok:
return False
# substituents
substituents = [x for x in carbon_atoms if x.GetIdx() not in [y.GetIdx() for y in cps]]
count_oxygens = 0
for substituent in substituents:
side_chain_atoms = [x for x in substituent.GetNeighbors() if x.GetIdx() not in ring_aidx]
if len(side_chain_atoms) > 0:
if not side_chain_atoms[0].GetAtomicNum() == 8: # do not check for the degree here because there are connections on substituents too!
return False
count_oxygens += 1
# at least 1 oxygen for 5-membered rigns and 2 for 6-membered rings
if (ring_size == 6 and count_oxygens < 2) or (ring_size == 5 and count_oxygens < 1):
return False
return True
def deglycosylate(self, mol: Mol, mode: str = 'run') -> Union[Mol, Graph]:
"""Function to deglycosylate molecules.
Several rules are applied for removing Sugar-Like Rings (SLRs) from molecules:
1. Only external SLRs are removed, so a molecule with aglycan-SLR-aglycan is not modified
2. Only molecules with both aglycans and SLRs are modified (so only SLRs or none are left untouched)
3. Linear aglycans are considered to be part of linkers and are thus never returned as results
4. Glycosidic bonds are defined as either O or CO and can be linked to larger linear linker. So from a SLR side, either nothing or only 1 C are allowed before the glycosidic bond oxygen
5. Linker atoms until the glycosidic bond oxygen atom are appended to the definition of the SLR, so that any extra methyl is also removed.
.. image:: _images/std_deglyco_algo.svg
:align: center
:param mol: the input molecule
:param mode: either 'run' for actually deglycosylating the molecule or 'graph' for returning a graph of rings instead (useful for presentations or debugging)
:return: the deglycosylated molecule or a graph of rings
"""
if len(Chem.GetMolFrags(mol)) > 1:
raise ValueError("Error! Deglycosylation is designed to work on single molecules, not mixtures!")
if mode not in ('run', 'graph'):
raise AttributeError(f"Error! Unauthorized value for parameter 'mode'! ('{mode}')")
# avoid inplace modifications
mol = Chem.Mol(mol)
# define rings
rings = mol.GetRingInfo().AtomRings()
rings = utils.fuse_rings(rings)
# try to deglycosylate only if the molecule has at least 2 rings:
# - leave linear compounds out
# - leave sugars in case they are the only ring on the molecule
if len(rings) < 2:
return mol
# annotate sugar-like rings
are_sugar_like = [self._is_sugar_like(x, mol) for x in rings]
logging.debug('RINGS: %s', [(rings[i], are_sugar_like[i]) for i in range(len(rings))])
# remove sugars only when the molecule has some sugar rings and is not entirely composed of sugars
if not any(are_sugar_like) or all(are_sugar_like):
return mol
ring_atoms = set([item for sublist in rings for item in sublist])
# init sugar graph
G = Graph()
# init linkers parts from left and right the glycosidic bond oxygen: one of the side is required to have either C or nothing
authorized_linker_parts = [[], ['C']] # R1-OxxxxR2 or R1-COxxxxR2 with xxxx being any sequence of linear atoms (same for R2->R1)
# define linker atoms as shortest path between 2 rings that do not include other rings
for i in range(len(rings)):
ring1 = rings[i]
for j in range(i+1, len(rings)):
ring2 = rings[j]
logging.debug('NEW RING PAIR -- R1: %s; R2: %s', ring1, ring2)
# shortest path between the two rings that do not include the current rings themselves
shortest_path = [x for x in Chem.GetShortestPath(mol, ring1[0], ring2[0]) if x not in ring1 + ring2]
# define the other ring atoms
other_ring_atoms = ring_atoms.symmetric_difference(set(ring1 + ring2))
# shortest path for going from the left (ring1) to the right (ring2)
shortest_path_elements = [mol.GetAtomWithIdx(x).GetSymbol() for x in shortest_path]
# in case ring1 (left) or/and ring2 (right) is sugar-like, append the side chains left and right
# to the oxygen to the corresponding ring atoms to avoid left-overs (the O remains is not removed)
glycosidic_bond = False
if 'O' in shortest_path_elements: # not expected to be common enough for a try/catch statement
# from the left side
aidx_oxygen_left = shortest_path_elements.index('O') # first O found in list
logging.debug('R1 -> R2 -- pos of O: %s; R1 is sugar_like: %s; linker part from R1: %s', aidx_oxygen_left, are_sugar_like[i], shortest_path_elements[:aidx_oxygen_left])
if are_sugar_like[i] and shortest_path_elements[:aidx_oxygen_left] in authorized_linker_parts:
glycosidic_bond = True
ring1 += shortest_path[:aidx_oxygen_left]
# from the right side
shortest_path_elements.reverse()
shortest_path.reverse()
aidx_oxygen_right = shortest_path_elements.index('O') # first O found in list
logging.debug('R2 -> R1 -- pos of O: %s; R2 is sugar_like: %s; linker part from R2: %s', aidx_oxygen_right, are_sugar_like[j], shortest_path_elements[:aidx_oxygen_right])
if are_sugar_like[j] and shortest_path_elements[:aidx_oxygen_right] in authorized_linker_parts:
glycosidic_bond = True
ring2 += shortest_path[:aidx_oxygen_right]
logging.debug('R1 and R2 are linked through a glycosidic bond: %s', glycosidic_bond)
# in case the 2 rings are directly connected, append a new edge to G
if not set(shortest_path).intersection(other_ring_atoms):
G.add_edge(i, j, atoms=''.join(shortest_path_elements), glycosidic_bond=glycosidic_bond)
# annotate nodes with the ring atoms (+ relevent linker atoms) and if they are sugar-like
G.nodes[i]['atoms'] = ring1
G.nodes[i]['sugar_like'] = are_sugar_like[i]
G.nodes[j]['atoms'] = ring2
G.nodes[j]['sugar_like'] = are_sugar_like[j]
# draw the graph
if mode == 'graph':
# colormap_nodes = [(0.7,0.7,0.7) if x['sugar_like'] else (1,0,0) for i, x in G.nodes(data=True)]
# return draw.fc_graph(G, colormap_nodes=colormap_nodes)
return G
# iterative recording of terminal sugar rings (atoms) that are linked with a glycosidic bond
ring_atoms_to_remove = []
nodes_to_remove = [node for node in G.nodes(data=True) if node[1]['sugar_like'] and G.degree(node[0]) == 1 and list(G.edges(node[0], data=True))[0][2]['glycosidic_bond']]
while len(nodes_to_remove) > 0:
# record atoms indices to remove from the molecule
[ring_atoms_to_remove.append(n[1]['atoms']) for n in nodes_to_remove]
# remove nodes from current layer for next iteration
[G.remove_node(n[0]) for n in nodes_to_remove]
nodes_to_remove = [node for node in G.nodes(data=True) if node[1]['sugar_like'] and G.degree(node[0]) == 1 and list(G.edges(node[0], data=True))[0][2]['glycosidic_bond']]
logging.debug('Ring atoms to remove: %s', ring_atoms_to_remove)
# edit the molecule
if ring_atoms_to_remove:
# flatten the atom indices of each ring to remove in reverse order so that atom indices do not change when removing atoms
ring_atoms_to_remove = sorted([item for sublist in ring_atoms_to_remove for item in sublist], reverse=True)
emol = Chem.EditableMol(mol)
[emol.RemoveAtom(x) for x in ring_atoms_to_remove]
mol = emol.GetMol()
logging.debug('Obtained fragments: %s', Chem.MolToSmiles(mol))
# clean-up
frags = Chem.GetMolFrags(mol, asMols=True)
# avoid counting the number of rings in each fragment if only 1 fragment left anyway
if len(frags) == 1:
logging.debug('Only one fragment obtained, returning it')
return frags[0]
# the substituents of the deleted terminal sugar-like rings remain in the structure,
# these are obligatory linear because they were not in the graph,
# so we just have to retrieve the one fragment that is not linear
logging.debug('Returning only the non-linear obtained fragment')
return [x for x in frags if Descriptors.rdMolDescriptors.CalcNumRings(x) > 0][0]
def _run(self, mol: Mol) -> tuple:
"""Helper function for run.
Contains all tasks defined within the protocol. Since some operations are
expansive and could last a very long time for complex molecules (normalize, canonicalize),
a timeout value is set globally. The run function is the one that can catch the exception
raised by timeouts.
:param mol: the input molecule
:return: a tuple containing the molecule, its status and the further task name it reached
"""
# initiate_mol
if mol is None:
return (mol, 'error', 'initiate_mol')
# begin protocol
mol = deepcopy(mol) # do not modify the molecule in place
for task in self._protocol['tasks']:
# filter_empty
if task == 'filter_empty':
try:
if not mol.GetNumAtoms():
return ('', 'filtered', task)
except ValueError:
return ('', 'error', task)
# disconnect_metal
elif task == 'disconnect_metal':
try:
mol = self.metal_disconnector.disconnect(mol)
except ValueError:
return (mol, 'error', 'disconnect_metal')
# clear_mixtures
elif task == 'clear_mixtures':
try:
mol = self.clear_mixtures(mol)
except ValueError:
return (mol, 'error', task)
# deglycosylate
elif task == 'deglycosylate':
try:
mol = self.deglycosylate(mol)
except ValueError:
return (mol, 'error', task)
# filters
elif task.startswith('filter_'): # filter empty is tried before
try:
if not self.filter_mol(mol, self._protocol[task]):
return (mol, 'filtered', task)
except ValueError:
return (mol, 'error', task)
# sanitize
elif task == 'sanitize':
try:
Chem.SanitizeMol(mol)
except ValueError:
return (mol, 'error', task)
# clear_isotopes
elif task == 'clear_isotopes':
try:
mol = self.clear_isotopes(mol)
except ValueError:
return (mol, 'error', task)
# normalize
elif task == 'normalize':
try:
mol = self.normalizer.normalize(mol)
except ValueError:
return (mol, 'error', task)
# uncharge
elif task == 'uncharge':
try:
mol = self.full_uncharger.full_uncharge(mol)
except ValueError:
return (mol, 'error', task)
# canonicalize
elif task == 'canonicalize':
# canonicalize
try:
mol = self.canonicalizer.canonicalize(mol)
except (ValueError, RuntimeError):
return (mol, 'error', task)
# clear_stereo
elif task == 'clear_stereo':
try:
rdmolops.RemoveStereochemistry(mol)
except ValueError:
return (mol, 'error', task)
# extract Murcko Scaffolds
elif task == 'extract_murcko':
try:
mol = MurckoScaffold.GetScaffoldForMol(mol)
except ValueError:
return (mol, 'error', task)
# clear side chains
elif task == 'clear_side_chains':
try:
mol = self.clear_side_chains(mol)
except ValueError:
return (mol, 'error', task)
elif task == 'depict':
try:
mol = depict_mol(mol)
except ValueError:
return (mol, 'error', task)
elif task == 'reset_mol':
try:
mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))
except ValueError:
return (mol, 'error', task)
# something else?
else:
raise ValueError(f"Unknown task: {task}")
# a molecule that passed all the protocole!
return (mol, 'passed', 'standardize')
def run(self, mol: Mol, timeout: int = 10) -> tuple:
"""Execute the standardization protocol on a molecule.
Molecule that exceed the timeout value are filtered with a task='timeout'.
As a final step of the protocol, InChiKeys ('inchikey') are computed for identifying molecules.
:param mol: the input molecule
:param timeout: the maximum number of seconds for processing a molecule
:return: a tuple containing the molecule, its status and the further task name it reached
"""
with utils.timeout(timeout):
return self._run(mol)
# in case of timeout
return (mol, 'filtered', 'timeout')
def run_df(self, df: DataFrame, filter_unwanted: List[str] = []) -> tuple:
"""Apply the standardization protocol on a DataFrame, with the possibility of directly filtering duplicate entries as well.
This can be very useful as the standardization process can expose duplicate entries due to salts removal, neutralization,
canonical tautomer enumeration, and stereochemistry centers unlabelling
If a reference file is specified, duplicate removals becomes possible accross chunks.
:param df: the input DataFrame
:param timeout: the maximum number of seconds for processing a molecule
:param filter_unwanted: a list of molecules in SMILES format that should be filtered out at the end of the standardization
:return: three DataFrames separated by status:
- passed
- filtered
- error
.. note:: As a side effect, the output DataFrames get indexed by idm.
:param df: The DataFrame with molecules to standardize
:param return: a tuple of 3 DataFrames: standardized, filtered and error.
"""
df = df.copy() # do not modify df in place
# check filter_unwanted data (if any error, crash before running the protocol)
if len(filter_unwanted) > 0:
unwanted_molecules = [Chem.MolFromSmiles(x) for x in filter_unwanted]
num_errors = 0
for i, m in enumerate(unwanted_molecules):
if m is None:
logging.error('Error in unwanted structure #%s! (%s)', i, filter_unwanted[i])
num_errors += 1
if num_errors > 0:
raise ValueError('Some errors were found in the unwanted molecules, aborting standardization!')
unwanted_inchikeys = [rdinchi.MolToInchiKey(x) for x in unwanted_molecules]
logging.debug('Unwanted Structure List:\n\n%s\n', DataFrame({'smiles': filter_unwanted, 'inchikey': unwanted_inchikeys}))
# run standardization protocol
df.index = df[self.col_id]
df.loc[:, self.col_mol], df.loc[:, 'status'], df.loc[:, 'task'] = zip(*df[self.col_mol].map(self.run))
# flag eventual None molecules at the end of the pipeline for filtering out
df.loc[:, 'status'] = df.apply(lambda x: x['status'] if x['mol'] is not None else 'error', axis=1)
df.loc[:, 'task'] = df.apply(lambda x: x['task'] if x['mol'] is not None else 'filter_empty_final', axis=1)
df.loc[:, 'mol'] = df['mol'].map(lambda x: x if x is not None else '')
# do not apply filter duplicates on molecules with errors or that were already filtered for x reasons
df_error = df[df['status'] == 'error']
df_filtered = df[df['status'] == 'filtered']
df = df[df['status'].str.contains('passed')]
# compute InChiKeys
logging.debug('Computing InChI Keys for standardized structures')
df.loc[:, 'inchikey'] = df.loc[:, self.col_mol].map(rdinchi.MolToInchiKey)
# filter unwanted compounds using InChI Keys
if len(filter_unwanted) > 0:
logging.debug('Filtered out unwanted structures using InChI Keys')
# identify unwanted compounds
df_unwanted = df[df['inchikey'].isin(unwanted_inchikeys)]
if len(df_unwanted) > 0:
# remove these from passed compounds
df = df[~df['idm'].isin(df_unwanted['idm'])]
# annotate unwanted and add them to filtered compounds
df.loc[:, 'task'] = 'standardize'
df.loc[:, 'status'] = 'filter_unwanted'
df_filtered = pd.concat([df_filtered, df_unwanted])
logging.debug('Number of structures filtered out by identity: %s', len(df_unwanted.index))
# tuple of dataframes
return (df, df_filtered, df_error)
``` |
{
"source": "JoseManuelVargas/arandasoft-cpp-prueba-tecnica",
"score": 3
} |
#### File: arandasoft-cpp-prueba-tecnica/www/pruebas.py
```python
import unittest
import requests
class Pruebas(unittest.TestCase):
def setUp(self):
self.url = "http://localhost:5000/arbolbinario"
def test_error_creacion(self):
datos = {"nombre": "Arbol1 Py", "erroneo": [5, 3, 8, 4, 12, 3]}
respuesta = requests.post(self.url + "/crear", json=datos)
self.assertEqual(respuesta.status_code, 400)
r_json = respuesta.json()
self.assertTrue("error" in r_json)
self.assertTrue("mensaje" in r_json)
datos = {"nombre": "Arbol1", "valores": [5, 3, 8, 4, 12, 3]}
respuesta = requests.post(self.url + "/crearfalso", json=datos)
self.assertEqual(respuesta.status_code, 404)
def test_creacion_arbol(self):
datos = {"nombre": "Arbol1 Py", "valores": [5, 3, 8, 4, 12, 3]}
respuesta = requests.post(self.url + "/crear", json=datos)
self.assertEqual(respuesta.status_code, 200)
r_json = respuesta.json()
self.assertTrue("error" not in r_json)
self.assertTrue("nombre" in r_json)
self.assertEqual(r_json["nombre"], datos["nombre"])
self.assertTrue("id" in r_json)
self.assertGreater(r_json["id"], 0)
def test_ancestro(self):
datos = {"nombre": "Arbol2 Py", "valores": [5, 3, 8, 4, 12, 3]}
respuesta = requests.post(self.url + "/crear", json=datos)
self.assertEqual(respuesta.status_code, 200)
r_json = respuesta.json()
self.assertTrue("error" not in r_json)
self.assertTrue("nombre" in r_json)
self.assertEqual(r_json["nombre"], datos["nombre"])
self.assertTrue("id" in r_json)
self.assertGreater(r_json["id"], 0)
id_arbol = r_json["id"]
datos = {"id": id_arbol, "nodo1": 3, "nodo2": 5}
respuesta = requests.post(self.url + "/ancestrocomun", json=datos)
self.assertEqual(respuesta.status_code, 200)
r_json = respuesta.json()
self.assertTrue("error" not in r_json)
self.assertTrue("ancestro" in r_json)
self.assertGreater(r_json["ancestro"], 0)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "JoseManuelVasquez/Leaf",
"score": 3
} |
#### File: backend/services/receipt.py
```python
from abc import ABC, abstractmethod
class Strategy(ABC):
@abstractmethod
def get_blocks(self, lines):
pass
class DefaultStrategy(Strategy):
def get_blocks(self, lines):
blocks = []
row = 0
column = 0
begin_row = 0
while len(lines) - 2 > row:
# Check end_col
col_pos = len(lines[row])
column = col_pos if column < col_pos else column
# If the length of repeated characters is not greater than 1 + '-' or 1
# or there's a line space, creates a new block
# I have considered repeated characters and spaces as a separator as it
# doesn't contain useful data
condition = lines[row + 1] == '' or len("".join(set(lines[row + 1])).strip()) <= 2
while condition and len(lines) - 2 > row:
row += 1
# START Check end_col
col_pos = len(lines[row])
column = col_pos if column < col_pos else column
# END Check end_col
condition = lines[row + 1] == '' or len("".join(set(lines[row + 1])).strip()) <= 2
if not condition:
# The indexes start from 0
blocks.append({"begin_row": begin_row, "begin_col": 0, "end_row": row, "end_col": column - 1})
begin_row = row + 1
column = 0
row += 1
# Check end_col
col_pos = len(lines[row])
column = col_pos if column < col_pos else column
blocks.append({"begin_row": begin_row, "begin_col": 0, "end_row": row, "end_col": column - 1})
return blocks
# Change the name of the class if needed
class SomeStrategy(Strategy):
def get_blocks(self, data):
return data
class ReceiptService():
def __init__(self):
self._strategy = DefaultStrategy()
def set_strategy(self, strategy: Strategy):
self._strategy = strategy
def get_blocks(self, lines):
return self._strategy.get_blocks(lines)
``` |
{
"source": "josemarcaetano/whatsapp-attention-cascades",
"score": 2
} |
#### File: whatsapp-attention-cascades/attention_cascades/attention_cascades.py
```python
import sys
import os
from datetime import datetime
import json
from operator import itemgetter
import networkx as nx
import operator
from collections import OrderedDict
import re, math
from collections import Counter
import itertools
class AttentionCascades:
def __init__(self):
pass
def getDateFormatted(self, string_datetime):
a_datetime = None
string_datetime = string_datetime.replace("\"","")
string_datetime = string_datetime.replace("'", "")
try:
a_datetime = datetime.strptime(string_datetime, '%Y-%m-%d %H:%M:%S')
except Exception as e:
try:
a_datetime = datetime.strptime(string_datetime, '%Y-%m-%dT%H:%M:%SZ')
except Exception as e:
print("ERROR on formattring datetime:",a_datetime, flush=True)
return(a_datetime)
def buildDialoguesCollection(self, filename_messages_dialogues, filename_messages, filename_quote_messages,
filename_messages_quotes_references):
total_users_messages_quotes = 0
all_total_messages_quotes = 0
print("\t>>>>> Loading already inserted messages quotes ids", flush=True)
messages_dialogues_inserted_set = set()
if (os.path.exists(filename_messages_dialogues) is True):
with open(filename_messages_dialogues, 'r', encoding='utf-8') as file_groups:
for group in file_groups:
group = json.loads(group)
messages_dialogues_inserted_set.add(group["_id"])
print("\t>>>>> Building reference hash of messages", flush=True)
original_messages_hash = {}
line_number = 0
with open(filename_messages, 'r',
encoding='utf-8') as file_messages:
for message in file_messages:
message = json.loads(message)
line_number += 1
if (line_number % 100000 == 0):
print("\t\tOriginal messages hashed so far", line_number, '\tDatetime: ', datetime.now(),
flush=True)
original_messages_hash[message['_id']] = message
print("\t>>>>> Building reference hash of quote_messages", flush=True)
quote_messages_hash = {}
line_number = 0
with open(filename_quote_messages, 'r',
encoding='utf-8') as file_quote_messages:
for quote_message in file_quote_messages:
quote_message = json.loads(quote_message)
line_number += 1
if (line_number % 100000 == 0):
print("\t\tQuote messages hashed so far", line_number, '\tDatetime: ', datetime.now(),
flush=True)
if (quote_message['DEVICE'] not in quote_messages_hash):
quote_messages_hash[quote_message['DEVICE']] = {}
if (quote_message['IMPORT_DATE'] not in quote_messages_hash[quote_message['DEVICE']]):
quote_messages_hash[quote_message['DEVICE']][quote_message['IMPORT_DATE']] = {}
if (quote_message['group']['_id'] not in quote_messages_hash[quote_message['DEVICE']][quote_message['IMPORT_DATE']]):
quote_messages_hash[quote_message['DEVICE']][quote_message['IMPORT_DATE']][quote_message['group']['_id']] = {}
if (quote_message['quoted_row_id'] not in quote_messages_hash[quote_message['DEVICE']][quote_message['IMPORT_DATE']][quote_message['group']['_id']]):
quote_messages_hash[quote_message['DEVICE']][quote_message['IMPORT_DATE']][
quote_message['group']['_id']][quote_message['quoted_row_id']] = []
# Get complete message
if(quote_message['_id'] in original_messages_hash):
complete_quote_message = original_messages_hash[quote_message['_id']]
complete_quote_message['quoted_row_id'] = quote_message['quoted_row_id']
quote_messages_hash[quote_message['DEVICE']][quote_message['IMPORT_DATE']][quote_message['group']['_id']][quote_message['quoted_row_id']].append(complete_quote_message)
print("\t>>>>> Initiating insertion process", flush=True)
try:
with open(filename_messages_dialogues, 'a', encoding='utf-8') as file_messages_dialogues:
with open(filename_messages_quotes_references, 'r',
encoding='utf-8') as file_messages_quotes_references:
for message_quoted_reference in file_messages_quotes_references:
message_quoted_reference = json.loads(message_quoted_reference)
all_total_messages_quotes += 1
if (all_total_messages_quotes % 100000 == 0):
print("Dialogues processed so far", all_total_messages_quotes, '\tDatetime: ',
datetime.now(),
flush=True)
if (message_quoted_reference['DEVICE'] in quote_messages_hash and
message_quoted_reference['IMPORT_DATE'] in quote_messages_hash[message_quoted_reference['DEVICE']] and
message_quoted_reference['group']['_id'] in quote_messages_hash[message_quoted_reference['DEVICE']][
message_quoted_reference['IMPORT_DATE']]):
quote_info = quote_messages_hash[message_quoted_reference['DEVICE']][message_quoted_reference['IMPORT_DATE']][message_quoted_reference['group']['_id']]
found_quote_message = False
quote_message = None
if (message_quoted_reference['quoted_row_id'] in quote_info):
for quote_message in quote_info[message_quoted_reference['quoted_row_id']]:
if (quote_message['quoted_row_id'] == message_quoted_reference['quoted_row_id']):
found_quote_message = True
break
if (found_quote_message is True and quote_message is not None and message_quoted_reference['quoted_message_id'] in original_messages_hash):
# Check if the quote message was published after the original message (SANITY TEST)
publication_date_quoted_message = self.getDateFormatted(string_datetime=original_messages_hash[message_quoted_reference['quoted_message_id']]['publication_date'])
publication_date_quote_message = self.getDateFormatted(string_datetime=quote_message['publication_date'])
if(publication_date_quote_message > publication_date_quoted_message):
message_dialogue_id = str(message_quoted_reference['quoted_message_id'] + "/" +quote_message['_id'])
if(message_dialogue_id not in messages_dialogues_inserted_set):
original_message = original_messages_hash[
message_quoted_reference['quoted_message_id']]
messages_dialogue = {}
messages_dialogue['DEVICE'] = message_quoted_reference['DEVICE']
messages_dialogue['IMPORT_DATE'] = message_quoted_reference['IMPORT_DATE']
messages_dialogue['_id'] = message_dialogue_id
messages_dialogue['group'] = original_message['group']
messages_dialogue['original_message'] = original_message
messages_dialogue['quote_message'] = quote_message
json.dump(messages_dialogue, file_messages_dialogues)
file_messages_dialogues.write('\n')
messages_dialogues_inserted_set.add(messages_dialogue['_id'])
total_users_messages_quotes += 1
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ', datetime.now(),
flush=True)
finally:
print("Total messages:", all_total_messages_quotes, "\tTotal quote messages:", total_users_messages_quotes)
def getPredecessors(self, graph, node, node_list):
if (len(list(graph.predecessors(node))) == 0):
return (node_list)
else:
for a_node in graph.predecessors(node):
if(a_node is not None):
node_list.append(a_node)
node_list = self.getPredecessors(graph=graph, node=a_node, node_list=node_list)
return(node_list)
def getSuccessors(self, graph, node, node_list):
if (len(list(graph.successors(node))) == 0):
return (node_list)
else:
for a_node in graph.successors(node):
if(a_node is not None):
node_list.append(a_node)
node_list = self.getSuccessors(graph=graph, node=a_node, node_list=node_list)
return(node_list)
def buildCascadesFiles(self, filename_messages_dialogues, path_graph_sources):
total_cascades = 0
import networkx as nx
# and the following code block is not needed
# but we want to see which module is used and
# if and why it fails
try:
import pygraphviz
from networkx.drawing.nx_agraph import write_dot
print("using package pygraphviz")
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import write_dot
print("using package pydot")
except ImportError:
print()
print("Both pygraphviz and pydot were not found ")
print("see https://networkx.github.io/documentation/latest/reference/drawing.html")
print()
raise
try:
messages_originals_publications = {}
messages_originals_quotes = {}
messages_originals_list_id = set()
line_number = 0
# Create and populate auxiliary dictionaries
print("\tCreating and populating auxiliary dictionaries", '\tDatetime: ', datetime.now(),
flush=True)
with open(filename_messages_dialogues, 'r', encoding='utf-8') as file_dialogues:
for dialogue in file_dialogues:
dialogue = json.loads(dialogue)
line_number += 1
if (line_number % 100000 == 0):
print("\t\tDialogues processed so far", line_number, '\tDatetime: ', datetime.now(),
flush=True)
if (dialogue['group']['_id'] not in messages_originals_publications):
messages_originals_publications[dialogue['group']['_id']] = []
if(dialogue['original_message']['_id'] not in messages_originals_list_id):
#document = {"_id":dialogue['original_message']['_id'], "publication_date":dialogue['original_message']['publication_date']}
messages_originals_publications[dialogue['group']['_id']].append(dialogue['original_message'])
messages_originals_list_id.add(dialogue['original_message']['_id'])
if(dialogue['original_message']['_id'] not in messages_originals_quotes):
messages_originals_quotes[dialogue['original_message']['_id']] = []
# document = {"_id": dialogue['quote_message']['_id'],
# "publication_date": dialogue['quote_message']['publication_date']}
messages_originals_quotes[dialogue['original_message']['_id']].append(dialogue['quote_message'])
print("\tOrdering quote_message_lists", '\tDatetime: ', datetime.now(),
flush=True)
# Order quote_message_lists
line_number = 0
messages_originals_quotes_copy = messages_originals_quotes
for message_original_id, quote_messages_list in messages_originals_quotes_copy.items():
line_number += 1
if (line_number % 100000 == 0):
print("\t\tQuote messages sorted so far", line_number, "\tTotal to process: ",
len(messages_originals_quotes), '\tDatetime: ', datetime.now(),
flush=True)
if(len(quote_messages_list) > 1):
result = sorted(quote_messages_list, key=itemgetter('publication_date'), reverse=True)
messages_originals_quotes[message_original_id] = result
# Get oldest original_messages of each group
print("\tProcessing cascades", '\tDatetime: ', datetime.now(),
flush=True)
group_index = 0
for group_id, values in messages_originals_publications.items():
DG = nx.DiGraph()
cascades_roots = {}
group_index +=1
print("\t>>>>>>>>> Processing group [", group_index,"]\tID:", group_id, '\tDatetime: ', datetime.now(),
flush=True)
#print("Ordering a message_list of group of len", len(values), flush=True)
messages_original_ordered = sorted(values, key=itemgetter('publication_date'), reverse=True)
#print("Now processing ordered message_list", flush=True)
# Process cascades of each group
line_number = 0
for message_original in messages_original_ordered:
cascades_roots[message_original['_id']] = True
line_number += 1
if (line_number % 10000 == 0):
print("\t\tMessages processed so far", line_number, "\tTotal to process: ",
len(messages_original_ordered), '\tDatetime: ', datetime.now(),
flush=True)
DG.add_node(message_original['_id'],
publication_date=message_original['publication_date'],
type = message_original['type'],
group_id = message_original['group']['_id'],
group_category=message_original['group']['category'],
user_id = message_original['user']['_id']
)
#print("Getting messages_quote_list", flush=True)
messages_quote_list = messages_originals_quotes[message_original['_id']]
other_line_number = 0
for message_quote in messages_quote_list:
other_line_number += 1
if (other_line_number % 100 == 0):
print("\t\t>>>>Messages quotes processed so far", other_line_number, "\tTotal to process: ",
len(messages_quote_list), '\tDatetime: ', datetime.now(),
flush=True)
if(DG.has_node(message_quote['_id']) is False):
DG.add_node(message_quote['_id'],
publication_date=message_quote['publication_date'],
type=message_quote['type'],
group_id=message_quote['group']['_id'],
group_category=message_quote['group']['category'],
user_id=message_quote['user']['_id']
)
DG.add_edge(message_original['_id'],message_quote['_id'])
if(message_quote['_id'] in cascades_roots):
cascades_roots[message_quote['_id']] = False
print("\t\t>>>>Writing DOT files\tDatetime:", datetime.now(),flush=True)
index_line = 0
cascade_index = 1
for message_id, is_root in cascades_roots.items():
index_line += 1
if (index_line % 1000 == 0):
print("\t\t>>>>Cascades processed so far", index_line, "\tTotal to process: ",
len(cascades_roots), '\tDatetime: ', datetime.now(),
flush=True)
if(is_root is True and DG.has_node(message_id) is True):
node_list = self.getSuccessors(graph=DG, node=message_id, node_list=[message_id])
#print(node_list)
H = DG.subgraph(node_list)
dot_filename = str(path_graph_sources+group_id+"_"+message_id+".dot")
write_dot(H, dot_filename)
total_cascades += 1
cascade_index +=1
print("\t>>>>>>>>> Group [",group_id,'] had', str(cascade_index),'cascades\tDatetime: ',
datetime.now(),
flush=True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ', datetime.now(),
flush=True)
# finally:
# print("Total messages:", all_total_messages_quotes, "\tTotal quote messages:", total_users_messages_quotes)
# Function to get the maximum width of a binary tree
def getMaxWidth(self, graph, root, maximum_depth):
maxWidth = 0
h = maximum_depth
width_by_depth_dict = {}
# Get width of each level and compare the width with maximum width so far
for i in range(0, h + 1):
width = self.getWidth(graph, root, i)
width_by_depth_dict[str(i)] = width
if (width > maxWidth):
maxWidth = width
return maxWidth, width_by_depth_dict
def getWidth(self, graph, root, level):
if root is None:
return 0
if level == 0:
return 1
elif level >= 1:
neighbors = graph.successors(root)
local_width = 0
for neighbor in neighbors:
local_width = self.getWidth(graph, neighbor, level - 1) + local_width
return(local_width)
def bfs(self,graph, start):
queue_sizes, visited, queue = set(), set(), [start]
while queue:
vertex = queue.pop(0)
if vertex not in visited:
visited.add(vertex)
queue.extend(list(set(list(graph[vertex])) - set(visited)))
else:
queue_sizes.add(len(queue))
return queue_sizes
def buildCascadesAttributesFiles(self, filename_cascades_static_attributes,
filename_cascades_depth_oriented_attributes,
filename_cascades_unique_users_over_time, path_graph_sources,
filename_fake_news_messages_manual_final,filename_messages_in_cascades_mapping,
filename_messages_in_cascades_mapping_tsv,filename_text_messages_info):
try:
text_messages_info_dict = {}
print("\t>>>> Building dictionary of text messages info\tDatetime: ", datetime.now(), flush=True)
#{"_id":message['_id'], "total_urls":len(urls),"total_emojis":len(emojis), "total_words":len(words),"sentiment":sentiment}
with open(filename_text_messages_info, 'r', encoding='utf-8') as file_input:
for text_message_info in file_input:
text_message_info = json.loads(text_message_info)
text_messages_info_dict[text_message_info['_id']] = {"sentiment":text_message_info["sentiment"],
"total_urls":text_message_info["total_urls"],
"total_emojis": text_message_info["total_emojis"],
"total_words": text_message_info["total_words"]}
messages_with_fake_news = set()
messages_with_fake_news_dict = {}
print("\t>>>> Building dictionary of messages falsehood verdict\tDatetime: ", datetime.now(), flush=True)
# "document_id" "fact_check_url" "cosine_similarity" "length_message" "length_fake_news_text" "my_verdict_has_verdict" "cosine_similarity_str" "has_falsehood_str" "cosine_similarity_interval_str"
with open(filename_fake_news_messages_manual_final, 'r', encoding='utf-8') as file_input:
header = file_input.readline()
for info in file_input:
info = info.replace("\n", "").split("\t")
if(str(info[5].replace("\"","")) == "True"):
messages_with_fake_news.add(info[0].replace("\"",""))
messages_with_fake_news_dict[info[0]] = info[1].replace("\"","")
print("\t>>>> Creating cascades attributes files\tDatetime: ", datetime.now(), flush=True)
total_cascades = 0
with open(filename_cascades_static_attributes, 'w', encoding='utf-8') as file_cascades_static_attributes:
file_cascades_static_attributes.write(str("cascade_id\tgroup_id\tgroup_category\troot_message\t"
"total_messages\ttotal_users\thas_falsehood\tmaximum_depth\t"
"unique_users\tstructural_virality\tbreadth\tfact_check_url\n"))
with open(filename_cascades_depth_oriented_attributes, 'w',
encoding='utf-8') as file_cascades_depth_oriented_attributes:
file_cascades_depth_oriented_attributes.write(str(
"cascade_id\tgroup_id\tgroup_category\troot_message\ttotal_messages\ttotal_users\thas_falsehood\t"
"depth\ttime_passed\tbreadth\tunique_users\tfact_check_url\n"))
with open(filename_cascades_unique_users_over_time, 'w',
encoding='utf-8') as file_cascades_unique_users_over_time:
file_cascades_unique_users_over_time.write(str(
"cascade_id\tgroup_id\tgroup_category\troot_message\ttotal_messages\ttotal_users\t"
"has_falsehood\tunique_users\ttime_passed\tfact_check_url\n"))
with open(filename_messages_in_cascades_mapping_tsv, 'w',
encoding='utf-8') as file_messages_in_cascades_mapping_tsv:
file_messages_in_cascades_mapping_tsv.write(str(
"cascade_id\tmessage_id\tuser_id\tgroup_id\tgroup_category\tmessage_type\troot_message\tmessage_depth\ttime_passed_since_root\t"
"publication_date\tcascade_has_falsehood\tmessage_has_falsehood\turl_fact_checking_news\tsentiment\ttotal_words\ttotal_emojis\t"
"total_urls\n"))
with open(filename_messages_in_cascades_mapping, 'w',
encoding='utf-8') as file_messages_in_cascades_mapping:
for dirname, dirnames, filenames in os.walk(path_graph_sources):
cascade_index = 0
for graph_filename in filenames:
# print("\t\t\tCascade ", graph_filename, '\tDatetime: ', datetime.now(),
# flush=True)
# if (graph_filename == graph_example):
try:
cascade_index +=1
cascade_id = str(str(graph_filename.split(".dot")[0]))
total_cascades += 1
if (total_cascades % 10000 == 0):
print("\t\t\tCascades processed so far", total_cascades, '\tDatetime: ', datetime.now(), flush=True)
DG = nx.DiGraph(nx.nx_pydot.read_dot(str(str(dirname)+str(graph_filename))))
# (1) >>>>>>>>>> Depth
first_node = list(nx.topological_sort(DG))[0]
depths = nx.shortest_path_length(DG, first_node)
maximum_depth = sorted(depths.items(), key=operator.itemgetter(1), reverse=True)[0][1]
# Getting cascade credentials
group_id = DG.node[first_node]['group_id']
root_message = first_node
group_category = DG.node[first_node]['group_category']
# (2) >>>>>>>>>> Structural Virality
G = DG.to_undirected()
structural_virality = nx.average_shortest_path_length(G)
first_cascade_message_datetime = self.getDateFormatted(
DG.node[first_node]['publication_date'])
# (3) >>>>>>>>>> Size/Unique Users
users_in_cascade = set()
graph_nodes = DG.nodes()
for a_node in graph_nodes:
users_in_cascade.add(DG.node[a_node]['user_id'])
unique_users = len(users_in_cascade)
# Check if cascade has falsehood
has_falsehood = not set(messages_with_fake_news).isdisjoint(graph_nodes)
# Get fact checking URL
fact_check_url = ""
if(has_falsehood):
for a_node in graph_nodes:
if(a_node in messages_with_fake_news_dict):
fact_check_url = messages_with_fake_news_dict[a_node]
break
total_cascade_messages = len(graph_nodes)
total_cascade_users = unique_users
# @@@@@@@@@@@@@@@ Unique users over time
graph_nodes_publication_date_dict = {}
for a_node in graph_nodes:
if(DG.node[a_node]['publication_date'] not in graph_nodes_publication_date_dict):
graph_nodes_publication_date_dict[DG.node[a_node]['publication_date']] = []
graph_nodes_publication_date_dict[DG.node[a_node]['publication_date']].append(DG.node[a_node]['user_id'])
messages_sorted_by_publication_date = OrderedDict(sorted(graph_nodes_publication_date_dict.items()))
unique_users_over_time_list = []
for message_datetime_info, user_id_list in messages_sorted_by_publication_date.items():
for user_id in user_id_list:
if (user_id not in unique_users_over_time_list):
unique_users_over_time_list.append(user_id)
difference = self.getDateFormatted(message_datetime_info) - first_cascade_message_datetime
minutes = difference.total_seconds() / 60
# ######## WRITING UNIQUE USERS OVER TIME
file_cascades_unique_users_over_time.write(str(
str(cascade_id) + "\t" + str(group_id) + "\t" + str(
group_category) + "\t" + str(root_message) +
"\t" + str(total_cascade_messages) + "\t" + str(
total_cascade_users) + "\t" + str(has_falsehood) + "\t" +
str(len(unique_users_over_time_list)) + "\t" + str(minutes) + "\t" +
str(fact_check_url)+"\n"))
# (4) >>>>>>>>>> Breadth
max_breadth, breadth_by_depth_dict = self.getMaxWidth(graph=DG, root=first_node, maximum_depth=maximum_depth)
# ######## WRITING STATIC FEATURES
file_cascades_static_attributes.write(str(
str(cascade_id)+"\t"+str(group_id)+"\t"+str(group_category)+"\t"+str(root_message)+
"\t"+str(total_cascade_messages)+"\t"+str(total_cascade_users)+"\t"+str(has_falsehood)+"\t"+
str(maximum_depth)+"\t"+str(unique_users)+"\t"+
str(structural_virality)+"\t"+str(max_breadth)+ "\t" +
str(fact_check_url)+"\n"))
# *********** DEPTH ORIENTED FEATURES
# depths_info = sorted(depths.items(), key=operator.itemgetter(1), reverse=True)
depths_info = sorted(depths.items(), key=operator.itemgetter(1))
depth_datetimes = {}
unique_users_by_depth_dict = {}
for a_node, depth in depths_info:
# if(int(depth) >= 0 and int(depth) <= 4):
# a = 10
if(str(depth) not in depth_datetimes):
depth_datetimes[str(depth)] = []
if (str(depth) not in unique_users_by_depth_dict):
unique_users_by_depth_dict[str(depth)] = set()
depth_datetimes[str(depth)].append(self.getDateFormatted(DG.node[a_node]['publication_date']))
unique_users_by_depth_dict[str(depth)].add(DG.node[a_node]['user_id'])
for depth, publication_date_list in depth_datetimes.items():
first_message_at_depth = sorted(publication_date_list, reverse=False)[0]
difference = first_message_at_depth - first_cascade_message_datetime
minutes = difference.total_seconds() / 60
unique_users_at_depth = len(unique_users_by_depth_dict[depth])
breadth_at_depth = breadth_by_depth_dict[depth]
# ######## WRITING DEPTH ORIENTED FEATURES
file_cascades_depth_oriented_attributes.write(str(
str(cascade_id) + "\t" + str(group_id) + "\t" + str(
group_category) + "\t" + str(root_message) +
"\t" + str(total_cascade_messages) + "\t" + str(
total_cascade_users) + "\t" + str(has_falsehood) + "\t" +
str(depth) + "\t" + str(minutes) + "\t" +
str(breadth_at_depth) + "\t" + str(unique_users_at_depth) +"\t"+
fact_check_url+"\n"))
# ######## WRITING MAPPING FILE
for a_node in graph_nodes:
message_id = a_node
user_id = DG.node[a_node]['user_id'].replace("\"", "")
group_id = DG.node[a_node]['group_id'].replace("\"", "")
group_category = DG.node[a_node]['group_category'].replace("\"", "")
publication_date = DG.node[a_node]['publication_date'].replace("\"",
"")
message_type = DG.node[a_node]['type'].replace("\"", "")
message_depth = 0
for another_node, depth in depths_info:
if(a_node == another_node):
message_depth = str(depth)
break
time_passed_since_root = 0
for depth, publication_date_list in depth_datetimes.items():
if(depth == message_depth):
first_message_at_depth = \
sorted(publication_date_list, reverse=False)[0]
difference = first_message_at_depth - first_cascade_message_datetime
minutes = difference.total_seconds() / 60
time_passed_since_root = minutes
break
document = {}
document["_id"] = message_id
document["cascade_id"] = cascade_id
document["message_id"] = message_id
document["user_id"] = user_id
document["group_id"] = group_id
document["group_category"] = group_category
document["message_type"] = message_type
document["root_message"] = (first_node == a_node)
document["message_depth"] = message_depth
document["time_passed_since_root"] = time_passed_since_root
document["publication_date"] = publication_date
document['cascade_has_falsehood'] = has_falsehood
document['message_has_falsehood'] = (a_node in messages_with_fake_news)
document["url_fact_checking_news"] = fact_check_url
sentiment = None
total_words = None
total_emojis = None
total_urls = None
if(message_id in text_messages_info_dict):
sentiment = text_messages_info_dict[message_id]["sentiment"]
total_words = text_messages_info_dict[message_id]["total_words"]
total_emojis = text_messages_info_dict[message_id]["total_emojis"]
total_urls = text_messages_info_dict[message_id]["total_urls"]
document["sentiment"] = sentiment
document["total_words"] = total_words
document["total_emojis"] = total_emojis
document["total_urls"] = total_urls
json.dump(document, file_messages_in_cascades_mapping)
file_messages_in_cascades_mapping.write("\n")
file_messages_in_cascades_mapping_tsv.write(str(
cascade_id+"\t"+message_id+"\t"+user_id+"\t"+group_id+"\t"+group_category+"\t"+
message_type+"\t"+str((first_node == a_node))+"\t"+str(message_depth)+"\t"+str(time_passed_since_root)+"\t"+
str(publication_date)+"\t"+str(has_falsehood)+"\t"+str((a_node in messages_with_fake_news))+"\t"+
str(fact_check_url)+"\t"+str(sentiment)+"\t"+str(total_words)+"\t"+str(total_emojis)+"\t"+
str(total_urls)+"\n"))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno,
'\tDatetime: ', datetime.now(),
flush=True)
print("ERROR PROCESSING CASCADE FILE:",str(str(dirname)+str(graph_filename)), flush=True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ', datetime.now(),
flush=True)
def getIncomingMotifTemplateListByNodeTotal(self, total_nodes):
motif_incoming_star_edge_list = []
for i in range(2, total_nodes + 1):
motif_incoming_star_edge_list.extend([(i, 1)])
return (motif_incoming_star_edge_list)
def getOutgoingMotifTemplateListByNodeTotal(self, total_nodes):
motif_outgoing_star_edge_list = []
for i in range(2, total_nodes + 1):
motif_outgoing_star_edge_list.extend([(1, i)])
return (motif_outgoing_star_edge_list)
def getChainMotifTemplateListByNodeTotal(self, total_nodes):
motif_chain_edge_list = []
for i in range(1, total_nodes + 1):
motif_chain_edge_list.extend([(i, (i + 1))])
return (motif_chain_edge_list)
def getLoopMotifTemplateListByNodeTotal(self, total_nodes):
motif_loop_edge_list = []
initial_node = 1
for i in range(1, total_nodes):
motif_loop_edge_list.extend([(i, (i + 1))])
motif_loop_edge_list.extend([(total_nodes, (initial_node))])
return (motif_loop_edge_list)
def buildUserRelationCascadeMotifsFile(self, filename_cascades_static_attributes,
path_graph_sources, filename_user_relations_motifs,
is_user_cascades_by_cascades):
try:
cascade_info_dict = {}
with open(filename_cascades_static_attributes, 'r', encoding='utf-8') as file_cascades_static_attributes:
#cascade_id group_id group_category root_message total_messages total_users has_falsehood maximum_depth unique_users structural_virality breadth fact_check_url
header = file_cascades_static_attributes.readline()
for cascade_info_line in file_cascades_static_attributes:
cascade_info = cascade_info_line.replace("\n", "").split("\t")
if(cascade_info[0] not in cascade_info_dict):
cascade_info_dict[cascade_info[0]] = {"group_id":cascade_info[1], "group_category":cascade_info[2],
"root_message": cascade_info[3],
"total_messages": cascade_info[4],
"total_users":cascade_info[5], "has_falsehood":cascade_info[6]}
total_cascades = 0
motif_dyadic_edge_list = [(1, 2), (2, 1)]
motif_self_loop_edge_list = [(1, 1)]
with open(filename_user_relations_motifs, 'w', encoding='utf-8') as file_user_relations_motifs:
if(is_user_cascades_by_cascades is True):
file_user_relations_motifs.write(str("cascade_id\tgroup_id\tgroup_category\troot_message\t"
"total_messages\ttotal_users\tcascade_has_falsehood\tmotif_name\ttotal_nodes\ttotal_edges\ttotal_edge_weight_on_motifs\ttotal_motifs\n"))
else:
file_user_relations_motifs.write(str("group_id\tgroup_category\tmotif_name\ttotal_nodes\ttotal_edges\ttotal_edge_weight_on_motifs\ttotal_motifs\n"))
for dirname, dirnames, filenames in os.walk(path_graph_sources):
cascade_index = 0
for graph_filename in filenames:
try:
cascade_index +=1
cascade_id = str(str(graph_filename.split(".dot")[0]))
total_cascades += 1
if (total_cascades % 1000 == 0):
print("\t\t\tCascades processed so far", total_cascades, '\tDatetime: ', datetime.now(), flush=True)
DG = nx.DiGraph(nx.nx_pydot.read_dot(str(str(dirname)+str(graph_filename))))
motif_totals_dict = {"dyadic": {}, "self_loop": {}, "chain":{}, "loop":{},
"incoming_star":{}, "outgoing_star":{}}
V = DG.nodes()
# ------ DYADIC
motif_dyadic = nx.DiGraph(motif_dyadic_edge_list)
motif_name = "dyadic"
total_nodes = 2
motif_totals_dict[motif_name][str(total_nodes)] = {"total_edges":0, "total_edge_weight_on_motifs":0, "total_motifs":0}
for subV in itertools.combinations(V, total_nodes):
subG = nx.subgraph(DG, subV)
if nx.is_isomorphic(subG, motif_dyadic):
motif_totals_dict[motif_name][str(total_nodes)]['total_motifs']+=1
for node, edge_info in subG.adj.items():
for node_to, weight in edge_info.items():
#print(node_to, weight, flush=True)
motif_totals_dict[motif_name][str(total_nodes)]['total_edges'] += 1
motif_totals_dict[motif_name][str(total_nodes)]['total_edge_weight_on_motifs'] += int(weight['weight'])
# ------ SELF-LOOP
motif_self_loop = nx.DiGraph(motif_self_loop_edge_list)
motif_name = "self_loop"
total_nodes = 1
motif_totals_dict[motif_name][str(total_nodes)] = {"total_edges": 0,
"total_edge_weight_on_motifs": 0,
"total_motifs": 0}
for subV in itertools.combinations(V, total_nodes):
subG = nx.subgraph(DG, subV)
if nx.is_isomorphic(subG, motif_self_loop):
motif_totals_dict[motif_name][str(total_nodes)]['total_motifs'] += 1
for node, edge_info in subG.adj.items():
for node_to, weight in edge_info.items():
motif_totals_dict[motif_name][str(total_nodes)][
'total_edges'] += 1
motif_totals_dict[motif_name][str(total_nodes)][
'total_edge_weight_on_motifs'] += int(weight['weight'])
for i in range(1,len(DG.nodes())):
motif_chain_edge_list = self.getChainMotifTemplateListByNodeTotal(total_nodes=i)
# ------ CHAIN
motif_chain = nx.DiGraph(motif_chain_edge_list)
motif_name = "chain"
total_nodes = i+1
motif_totals_dict[motif_name][str(total_nodes)] = {"total_edges": 0,
"total_edge_weight_on_motifs": 0,
"total_motifs": 0}
for subV in itertools.combinations(V, total_nodes):
subG = nx.subgraph(DG, subV)
if nx.is_isomorphic(subG, motif_chain):
motif_totals_dict[motif_name][str(total_nodes)]['total_motifs'] += 1
for node, edge_info in subG.adj.items():
for node_to, weight in edge_info.items():
motif_totals_dict[motif_name][str(total_nodes)]['total_edges'] += 1
motif_totals_dict[motif_name][str(total_nodes)][
'total_edge_weight_on_motifs'] += int(weight['weight'])
if(i >= 3):
motif_loop_edge_list = self.getLoopMotifTemplateListByNodeTotal(total_nodes=i)
# ------ LOOP
motif_loop = nx.DiGraph(motif_loop_edge_list)
motif_name = "loop"
total_nodes = i
motif_totals_dict[motif_name][str(total_nodes)] = {"total_edges": 0,
"total_edge_weight_on_motifs": 0,
"total_motifs": 0}
for subV in itertools.combinations(V, total_nodes):
subG = nx.subgraph(DG, subV)
if nx.is_isomorphic(subG, motif_loop):
motif_totals_dict[motif_name][str(total_nodes)]['total_motifs'] += 1
for node, edge_info in subG.adj.items():
for node_to, weight in edge_info.items():
motif_totals_dict[motif_name][str(total_nodes)]['total_edges'] += 1
motif_totals_dict[motif_name][str(total_nodes)][
'total_edge_weight_on_motifs'] += int(weight['weight'])
motif_incoming_star_edge_list = self.getIncomingMotifTemplateListByNodeTotal(
total_nodes=i)
motif_outgoing_star_edge_list = self.getOutgoingMotifTemplateListByNodeTotal(
total_nodes=i)
# ------ INCOMING STAR
motif_incoming_star = nx.DiGraph(motif_incoming_star_edge_list)
motif_name = "incoming_star"
total_nodes = i
motif_totals_dict[motif_name][str(total_nodes)] = {"total_edges": 0,
"total_edge_weight_on_motifs": 0,
"total_motifs": 0}
for subV in itertools.combinations(V, total_nodes):
subG = nx.subgraph(DG, subV)
if nx.is_isomorphic(subG, motif_incoming_star):
motif_totals_dict[motif_name][str(total_nodes)]['total_motifs'] += 1
for node, edge_info in subG.adj.items():
for node_to, weight in edge_info.items():
motif_totals_dict[motif_name][str(total_nodes)]['total_edges'] += 1
motif_totals_dict[motif_name][str(total_nodes)][
'total_edge_weight_on_motifs'] += int(weight['weight'])
# ------ OUTGOING STAR
motif_outcoming_star = nx.DiGraph(motif_outgoing_star_edge_list)
motif_name = "outgoing_star"
total_nodes = i
motif_totals_dict[motif_name][str(total_nodes)] = {"total_edges": 0,
"total_edge_weight_on_motifs": 0,
"total_motifs": 0}
for subV in itertools.combinations(V, total_nodes):
subG = nx.subgraph(DG, subV)
if nx.is_isomorphic(subG, motif_outcoming_star):
motif_totals_dict[motif_name][str(total_nodes)]['total_motifs'] += 1
for node, edge_info in subG.adj.items():
for node_to, weight in edge_info.items():
motif_totals_dict[motif_name][str(total_nodes)]['total_edges'] += 1
motif_totals_dict[motif_name][str(total_nodes)][
'total_edge_weight_on_motifs'] += int(weight['weight'])
#print('Writing motif file\tDatetime: ', datetime.now(), flush=True)
for motif_name, motif_nodes_info in motif_totals_dict.items():
for motif_nodes, motif_info in motif_nodes_info.items():
if motif_info['total_motifs'] > 0:
group_id = cascade_info_dict[cascade_id]["group_id"]
group_category = cascade_info_dict[cascade_id]["group_category"]
if(is_user_cascades_by_cascades is True):
root_message = cascade_info_dict[cascade_id]["root_message"]
total_cascade_messages = cascade_info_dict[cascade_id]["total_messages"]
total_cascade_users = cascade_info_dict[cascade_id]["total_users"]
has_falsehood = cascade_info_dict[cascade_id]["has_falsehood"]
file_user_relations_motifs.write(str(
str(cascade_id) + "\t" + str(group_id) + "\t" + str(group_category) + "\t" + str(
root_message) +
"\t" + str(total_cascade_messages) + "\t" + str(total_cascade_users) + "\t" + str(
has_falsehood) + "\t" +
str(motif_name) + "\t" + str(motif_nodes) + "\t" + str(motif_info['total_edges']) + "\t" +
str(motif_info['total_edge_weight_on_motifs'])+ "\t" + str(motif_info['total_motifs']) + "\n"))
else:
file_user_relations_motifs.write(str(str(group_id) + "\t" + str(
group_category) + "\t" +
str(motif_name) + "\t" + str(motif_nodes) + "\t" + str(
motif_info['total_edges']) + "\t" +
str(motif_info['total_edge_weight_on_motifs']) + "\t" + str(
motif_info['total_motifs']) + "\n"))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno,
'\tDatetime: ', datetime.now(),
flush=True)
print("ERROR PROCESSING CASCADE FILE:",str(str(dirname)+str(graph_filename)), flush=True)
#sys.exit(1)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ', datetime.now(),
flush=True)
def buildSimilarTextsFromFactCheckingSitesFile(self, filename_clean_verifiable_texts,
filename_clean_fake_news_texts_from_fact_checking,
filename_similar_texts_with_fact_checking_texts_primary,
filename_text_from_url_messages_dialogues):
try:
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
message_id_url_dict = {}
with open(filename_text_from_url_messages_dialogues, 'r',
encoding='utf-8') as file_text_from_url_messages_dialogues:
for message in file_text_from_url_messages_dialogues:
try:
message = json.loads(message)
message_id_url_dict[message['_id']] = message["message_id"]
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError (DID NOT STOP): ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ',
datetime.now(),
flush=True)
from itertools import combinations
# Get all possible variations from fake news texts
fake_news_texts_list_dict = {}
with open(filename_clean_fake_news_texts_from_fact_checking, 'r',
encoding='utf-8') as file_clean_fake_news_texts_from_fact_checking:
for message in file_clean_fake_news_texts_from_fact_checking:
try:
message = json.loads(message)
fact_check_url = message["url"].split("_")[len(message["url"].split("_")) - 2]
if(fact_check_url not in fake_news_texts_list_dict):
fake_news_texts_list_dict[fact_check_url] = []
fake_news_texts_list_dict[fact_check_url].append(message['text'])
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError (DID NOT STOP) on message:', message, '\tThe error:', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ',
datetime.now(),
flush=True)
fake_news_texts_dict = {}
real_total_fake_news_texts = 0
for url, text_list in fake_news_texts_list_dict.items():
all_forward_combinations = [' '.join(text_list[i:j]) for i, j in combinations(range(len(text_list) + 1), 2)]
fake_news_texts_dict[url] = []
for text in all_forward_combinations:
fake_news_texts_dict[url].append(text)
real_total_fake_news_texts +=1
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
messages_dict = {}
total_documents_without_message_id = 0
real_total_verifiable_texts = 0
with open(filename_clean_verifiable_texts, 'r',
encoding='utf-8') as file_clean_verifiable_texts:
for message in file_clean_verifiable_texts:
message = json.loads(message)
message_id = message['message_id']
if (message_id == "" and message["source_text_type"] == "URL" and message['url'] in message_id_url_dict):
message_id = message_id_url_dict[message['url']]
if(message_id != ""):
if(message_id not in messages_dict):
messages_dict[message_id] = []
messages_dict[message_id].append(message['text'])
real_total_verifiable_texts +=1
else:
total_documents_without_message_id +=1
print("Total URL document without message_id:",total_documents_without_message_id, flush=True)
print("$$$$$$$$$$$$ Checking", real_total_verifiable_texts ,"messages/URLs from dialogues in",real_total_fake_news_texts,"fake news texts.\tDatetime: ",
datetime.now(),
flush=True)
line_number = 0
total_found = 0
with open(filename_similar_texts_with_fact_checking_texts_primary, 'w',
encoding='utf-8') as file_similar_texts_with_fact_checking_texts_primary:
file_similar_texts_with_fact_checking_texts_primary.write(str("document_id\tfact_check_url\tcosine_similarity\tlength_message\tlength_fake_news_text\n"))
for message_id, message_text_list in messages_dict.items():
line_number += 1
if(line_number % 1000 == 0):
print("\t>>> Processing message index", line_number, '\tDatetime: ',
datetime.now(),
flush=True)
found_similar_text = False
for message_text in message_text_list:
if (found_similar_text is False):
vector2 = text_to_vector(message_text)
if (len(vector2) >= 5):
for fake_news_url, fake_news_text_list in fake_news_texts_dict.items():
if(found_similar_text is False):
for fake_news_text in fake_news_text_list:
vector1 = text_to_vector(fake_news_text)
if (len(vector1) >= 5):
cosine_similarity = get_cosine(vector1, vector2)
if (cosine_similarity >= 0.5):
file_similar_texts_with_fact_checking_texts_primary.write(
str(str(message_id) +
"\t" + str(fake_news_url) + "\t" + str(
cosine_similarity) + "\t" + str(len(vector2)) + "\t" + str(
len(vector1)) + "\n"))
total_found += 1
print("\t\t>>> Writing similar text", cosine_similarity,
"\tTotal found so far: ", total_found,
"\tProcessing text index:", line_number, '\tDatetime: ',
datetime.now(), flush=True)
if (cosine_similarity >= 0.9):
found_similar_text = True
print(
"\t\t\t>>> Found similarity >=0.9... stopping iteration\tDatetime: ",
datetime.now(), flush=True)
break
levenshtein_distance = fuzz.ratio(message_text, fake_news_text)
if (levenshtein_distance >= 50):
file_similar_texts_with_fact_checking_texts_primary.write(str(str(message_id)+
"\t"+str(fake_news_url)+"\t"+str(levenshtein_distance)+"\t"+str(len(vector2))+"\t"+str(len(vector1))+"\n"))
total_found +=1
print("\t\t>>> Writing similar text", levenshtein_distance, "\tTotal found so far: ", total_found, "\tProcessing text index:", line_number , '\tDatetime: ',datetime.now(),flush=True)
if(levenshtein_distance >= 90):
found_similar_text = True
print("\t\t\t>>> Found similarity >=0.9... stopping iteration\tDatetime: ",datetime.now(), flush=True)
break
else:
break
else:
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ',
datetime.now(),
flush=True)
def lemmatize_text_portuguese(self, nlp_pt, text):
return " ".join(token.lemma_ for token in nlp_pt(text))
def getWordsLematizedWithoutStopWords(self, text, nlp_pt, stopwords_list=None):
text = self.lemmatize_text_portuguese(text=text, nlp_pt=nlp_pt)
text = re.sub(r"https?://\S+", "", text)
text = re.sub(r"\b\d+\b", "", text)
words = [word.lower() for word in re.findall("\w+", text)]
words = [word for word in words if word not in stopwords_list] if stopwords_list else words
words = [word for word in words if len(word) > 2]
return words
def getURLs(self, text):
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
text)
return urls
def buildCleanTextFromMessagesOnCascadesFile(self, filename_portuguese_stopwords, filename_messages_dialogues, filename_text_from_url_messages_dialogues,
filename_fake_news_texts_from_fact_checking, filename_clean_verifiable_texts,
filename_clean_fake_news_texts_from_fact_checking):
try:
# Lemmatizers
import spacy
nlp_pt = spacy.load("pt")
from langid.langid import LanguageIdentifier, model
identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)
messages_processed_ids = set()
messages_urls_dict = {}
line_number = 0
# Stopwords
stopwords_portuguese = set([line.rstrip() for line in open(filename_portuguese_stopwords, encoding="utf-8")])
print("\t>>>> Building clean verifiable texts file\tDatetime: ", datetime.now(), flush=True)
with open(filename_clean_verifiable_texts, 'w', encoding='utf-8') as file_clean_verifiable_texts:
with open(filename_messages_dialogues, 'r', encoding='utf-8') as file_messages_dialogues:
for message_dialogue in file_messages_dialogues:
message_dialogue = json.loads(message_dialogue)
line_number += 1
if (line_number % 10000 == 0):
print("\t>>> Processing dialogue index", line_number, '\tDatetime: ',
datetime.now(),
flush=True)
for message_source in ['original_message', 'quote_message']:
if message_dialogue[message_source]['_id'] not in messages_processed_ids and message_dialogue[message_source]['type'] == 'text':
new_text = message_dialogue[message_source]['text'].lower()
my_val = str(new_text).rstrip("\n").rstrip("\t").rstrip("\r")
my_val = str(my_val).replace('\r', " ").replace('\n', " ").replace('\t', " ")
new_text = my_val.strip()
messages_urls_dict[message_dialogue[message_source]['_id']] = self.getURLs(text=new_text)
langid_language_detected_result = identifier.classify(new_text)
langid_language_detected = langid_language_detected_result[0]
language_probability = langid_language_detected_result[1]
if(langid_language_detected == 'pt' and float(language_probability) >= 0.9):
text = new_text
words = self.getWordsLematizedWithoutStopWords(text=text, stopwords_list=stopwords_portuguese, nlp_pt = nlp_pt)
#words = self.get_words(text, stopwords_portuguese, None)
fullStr = ' '.join(words)
document = {"message_id":message_dialogue[message_source]['_id'], "text":fullStr, "source_text_type":"MESSAGE", "url":""}
json.dump(document, file_clean_verifiable_texts)
file_clean_verifiable_texts.write("\n")
with open(filename_text_from_url_messages_dialogues, 'r', encoding='utf-8') as file_text_from_url_messages_dialogues:
for text_url_info in file_text_from_url_messages_dialogues:
try:
text_url_info = json.loads(text_url_info)
new_text = text_url_info['text'].lower()
my_val = str(new_text).rstrip("\n").rstrip("\t").rstrip("\r")
my_val = str(my_val).replace('\r', " ").replace('\n', " ").replace('\t', " ")
new_text = my_val.strip()
text = new_text
words = self.getWordsLematizedWithoutStopWords(text=text, stopwords_list=stopwords_portuguese,
nlp_pt=nlp_pt)
# words = self.get_words(text, stopwords_portuguese, None)
fullStr = ' '.join(words)
my_message_id = ""
for message_id, url_list in messages_urls_dict.items():
for url in url_list:
if (url == text_url_info['_id']):
my_message_id = message_id
break
document = {"message_id": my_message_id, "text": fullStr, "source_text_type": "URL", "url": text_url_info['_id']}
json.dump(document, file_clean_verifiable_texts)
file_clean_verifiable_texts.write("\n")
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError DID NOT STOP: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno,
'\tDatetime: ',
datetime.now(),
flush=True)
print("\t>>>> Building clean texts from fact checking sites file\tDatetime: ", datetime.now(), flush=True)
with open(filename_clean_fake_news_texts_from_fact_checking, 'w', encoding='utf-8') as file_clean_fake_news_texts_from_fact_checking:
with open(filename_fake_news_texts_from_fact_checking, 'r', encoding='utf-8') as file_fake_news_texts_from_fact_checking:
all_documents = json.load(file_fake_news_texts_from_fact_checking)
for text_url_info in all_documents:
new_text = text_url_info['text'].lower()
my_val = str(new_text).rstrip("\n").rstrip("\t").rstrip("\r")
my_val = str(my_val).replace('\r', " ").replace('\n', " ").replace('\t', " ")
new_text = my_val.strip()
text = new_text
words = self.getWordsLematizedWithoutStopWords(text=text, stopwords_list=stopwords_portuguese,
nlp_pt=nlp_pt)
# words = self.get_words(text, stopwords_portuguese, None)
fullStr = ' '.join(words)
document = {"text": fullStr, "url": text_url_info['_id']}
json.dump(document, file_clean_fake_news_texts_from_fact_checking)
file_clean_fake_news_texts_from_fact_checking.write("\n")
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ',
datetime.now(),
flush=True)
def main():
try:
start_time = datetime.now()
attentionCascades = AttentionCascades()
DATA_DIRECTORY = "/home/<<user>>/data/"
path_graph_sources = str(DATA_DIRECTORY+"attention_cascades/cascades/dot/")
filename_messages_dialogues=str(DATA_DIRECTORY+"attention_cascades/dialogues.jsonl")
filename_messages =str(DATA_DIRECTORY+"attention_cascades/messages.jsonl")
filename_quote_messages = str(DATA_DIRECTORY+"attention_cascades/quote_messages.jsonl")
filename_messages_quotes_references =str(DATA_DIRECTORY+"attention_cascades/messages_quotes_references.jsonl")
filename_cascades_static_attributes = str(DATA_DIRECTORY+"attention_cascades/cascades_static_attributes.tsv")
filename_cascades_depth_oriented_attributes = str(DATA_DIRECTORY+"attention_cascades/cascades_depth_oriented_attributes.tsv")
filename_cascades_unique_users_over_time = str(DATA_DIRECTORY+"attention_cascades/cascades_unique_users_over_time_attributes.tsv")
filename_messages_in_cascades_mapping = str(DATA_DIRECTORY+"attention_cascades/messages_in_cascades_mapping.jsonl")
filename_messages_in_cascades_mapping_tsv = str(DATA_DIRECTORY+"attention_cascades/messages_in_cascades_mapping.tsv")
filename_text_messages_info= str(DATA_DIRECTORY+"attention_cascades/text_messages_info.jsonl")
path_user_relations_individual_graph_sources = str(DATA_DIRECTORY+"attention_cascades/user_relations_by_cascade_cascades/dot/")
filename_clean_verifiable_texts = str(DATA_DIRECTORY+"attention_cascades/clean_verifiable_texts_from_cascades.jsonl")
filename_clean_fake_news_texts_from_fact_checking = str(DATA_DIRECTORY+"attention_cascades/clean_texts_from_urls_from_fact_checking_sites.json")
filename_similar_texts_with_fact_checking_texts_primary = str(DATA_DIRECTORY+"attention_cascades/similar_texts_with_fact_checking_texts_primary.tsv")
filename_text_from_url_messages_dialogues = str(DATA_DIRECTORY+"attention_cascades/dialogues_text_from_url_messages_new.jsonl")
filename_fake_news_texts_from_fact_checking = str(DATA_DIRECTORY+"attention_cascades/texts_from_urls_from_fact_checking_sites.json")
filename_portuguese_stopwords = str(DATA_DIRECTORY+"attention_cascades/stopwords-portuguese.txt")
print(">>>>>>>> Running attentionCascades.buildDialoguesCollection method", flush=True)
attentionCascades.buildDialoguesCollection(filename_messages_dialogues=filename_messages_dialogues,
filename_messages=filename_messages,
filename_quote_messages=filename_quote_messages,
filename_messages_quotes_references=filename_messages_quotes_references)
print(">>>>>>>> Running attentionCascades.buildCascadesFiles method", flush=True)
attentionCascades.buildCascadesFiles(filename_messages_dialogues=filename_messages_dialogues,
path_graph_sources=path_graph_sources)
print(">>>>>>>> Running attentionCascades.buildCleanTextFromMessagesOnCascadesFile method", flush=True)
attentionCascades.buildCleanTextFromMessagesOnCascadesFile(filename_portuguese_stopwords=filename_portuguese_stopwords,
filename_messages_dialogues=filename_messages_dialogues,
filename_text_from_url_messages_dialogues=filename_text_from_url_messages_dialogues,
filename_fake_news_texts_from_fact_checking=filename_fake_news_texts_from_fact_checking,
filename_clean_verifiable_texts=filename_clean_verifiable_texts,
filename_clean_fake_news_texts_from_fact_checking=filename_clean_fake_news_texts_from_fact_checking)
print(">>>>>>>> Running attentionCascades.buildCascadesAttributesFiles method", flush=True)
attentionCascades.buildSimilarTextsFromFactCheckingSitesFile(filename_clean_verifiable_texts=filename_clean_verifiable_texts,
filename_clean_fake_news_texts_from_fact_checking=filename_clean_fake_news_texts_from_fact_checking,
filename_similar_texts_with_fact_checking_texts_primary=filename_similar_texts_with_fact_checking_texts_primary,
filename_text_from_url_messages_dialogues=filename_text_from_url_messages_dialogues)
print(">>>>>>>> Running attentionCascades.buildCascadesAttributesFiles method", flush=True)
filename_fake_news_messages_manual_final = str(DATA_DIRECTORY+"attention_cascades/fake_news_messages_manual_final.tsv")
attentionCascades.buildCascadesAttributesFiles(filename_cascades_static_attributes=filename_cascades_static_attributes,
filename_cascades_depth_oriented_attributes=filename_cascades_depth_oriented_attributes,
filename_cascades_unique_users_over_time=filename_cascades_unique_users_over_time,
path_graph_sources=path_graph_sources,
filename_fake_news_messages_manual_final=filename_fake_news_messages_manual_final,
filename_messages_in_cascades_mapping=filename_messages_in_cascades_mapping,
filename_messages_in_cascades_mapping_tsv=filename_messages_in_cascades_mapping_tsv,
filename_text_messages_info=filename_text_messages_info)
print(">>>>>>>> Running attentionCascades.buildUserRelationCascadeMotifsFile method",
flush=True)
filename_user_relations_motifs=str(DATA_DIRECTORY+"attention_cascades/user_cascades_motifs_by_cascade.tsv")
is_user_cascades_by_cascades = True
attentionCascades.buildUserRelationCascadeMotifsFile(filename_cascades_static_attributes=filename_cascades_static_attributes,
path_graph_sources=path_user_relations_individual_graph_sources, filename_user_relations_motifs=filename_user_relations_motifs,
is_user_cascades_by_cascades=is_user_cascades_by_cascades)
end_time = datetime.now()
print("\nStart time: %s\nFinal time: %s\nTime elapsed (seconds): %s\n" % (
start_time, end_time, (end_time - start_time).seconds))
print("\n\n###########################################################\tSCRIPT FINISHED.", flush=True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('\nError: ', e, '\tDetails: ', exc_type, fname, exc_tb.tb_lineno, '\tDatetime: ', datetime.now(),
flush=True)
if __name__ == '__main__':
# pass
main()
``` |
{
"source": "josemariamoreira/BrainSpace",
"score": 2
} |
#### File: brainspace/mesh/mesh_io.py
```python
from vtk import (vtkPLYReader, vtkPLYWriter, vtkXMLPolyDataReader,
vtkXMLPolyDataWriter, vtkPolyDataReader, vtkPolyDataWriter)
from .io_support import (vtkFSReader, vtkFSWriter, vtkGIFTIReader,
vtkGIFTIWriter)
from ..vtk_interface.pipeline import serial_connect, get_output
from ..vtk_interface.decorators import wrap_output
# 'fs' type for FreeSurfer geometry data (also read FreeSurfer ascii as .asc)
supported_types = ['ply', 'obj', 'vtp', 'vtk', 'asc', 'fs', 'gii']
supported_formats = ['binary', 'ascii']
@wrap_output
def _select_reader(itype):
if itype == 'ply':
reader = vtkPLYReader()
# elif itype == 'obj':
# reader = vtkOBJReader()
elif itype == 'vtp':
reader = vtkXMLPolyDataReader()
elif itype == 'vtk':
reader = vtkPolyDataReader()
elif itype in ['asc', 'fs']:
reader = vtkFSReader()
if itype == 'asc':
reader.SetFileTypeToASCII()
elif itype == 'gii':
reader = vtkGIFTIReader()
else:
raise TypeError('Unknown input type \'{0}\'.'.format(itype))
return reader
@wrap_output
def _select_writer(otype):
if otype == 'ply':
writer = vtkPLYWriter()
# elif otype == 'obj':
# writer = vtkOBJWriter()
elif otype == 'vtp':
writer = vtkXMLPolyDataWriter()
elif otype == 'vtk':
writer = vtkPolyDataWriter()
elif otype in ['asc', 'fs']:
writer = vtkFSWriter()
elif otype == 'gii':
writer = vtkGIFTIWriter()
else:
raise TypeError('Unknown output type \'{0}\'.'.format(otype))
return writer
def read_surface(ipth, itype=None, return_data=True, update=True):
"""Read surface data.
See `itype` for supported file types.
Parameters
----------
ipth : str
Input filename.
itype : {'ply', 'obj', 'vtp', 'vtk', 'fs', 'asc', 'gii'}, optional
Input file type. If None, it is deduced from `ipth`. Default is None.
return_data : bool, optional
Whether to return data instead of filter. Default is False
update : bool, optional
Whether to update filter When return_data=True, filter is
automatically updated. Default is True.
Returns
-------
output : BSAlgorithm or BSPolyData
Surface as a filter or BSPolyData.
Notes
-----
Function can read FreeSurfer geometry data in binary ('fs') and ascii
('asc') format. Gifti surfaces can also be loaded if nibabel is installed.
See Also
--------
:func:`write_surface`
"""
if itype is None:
itype = ipth.split('.')[-1]
reader = _select_reader(itype)
reader.filename = ipth
return get_output(reader, update=update, as_data=return_data)
def write_surface(ifilter, opth, oformat=None, otype=None):
"""Write surface data.
See `otype` for supported file types.
Parameters
----------
ifilter : BSAlgorithm or BSDataObject
Input filter or data.
opth : str
Output filename.
oformat : {'ascii', 'binary'}, optional
File format. Defaults to writer's default format.
Only used when writer accepts format. Default is None.
otype : {'ply', 'obj', 'vtp', 'vtk', 'fs', 'asc', 'gii'}, optional
File type. If None, type is deduced from `opth`. Default is None.
Notes
-----
Function can save data in FreeSurfer binary ('fs') and ascii ('asc')
format. Gifti surfaces can also be saved if nibabel is installed.
See Also
--------
:func:`read_surface`
"""
if otype is None:
otype = opth.split('.')[-1]
writer = _select_writer(otype)
writer.filename = opth
if otype not in ['vtp', 'tri', 'gii', 'obj']:
if oformat == 'ascii' or otype == 'asc':
writer.SetFileTypeToASCII()
else:
writer.SetFileTypeToBinary()
serial_connect(ifilter, writer, update=True, as_data=False, port=None)
def convert_surface(ipth, opth, itype=None, otype=None, oformat=None):
"""Convert between file types.
Parameters
----------
ipth : str
Input filename.
opth : str
Output filename.
itype : str, optional
Input file type. If None, type is deduced from input filename's
extension. Default is None.
otype : str, optional
Output file type. If None, type is deduced from output filename's
extension. Default is None.
oformat : {'ascii', 'binary'}
Output file format. Defaults to writer's default format.
Only used when writer accepts format. Default is None.
"""
reader = read_surface(ipth, itype=itype, return_data=False, update=False)
write_surface(reader, opth, oformat=oformat, otype=otype)
``` |
{
"source": "josemariamoreira/NiftyNet",
"score": 2
} |
#### File: NiftyNet/tests/sampler_weighted_test.py
```python
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import tensorflow as tf
from niftynet.engine.image_window import N_SPATIAL
from niftynet.engine.sampler_weighted import WeightedSampler
from niftynet.engine.sampler_weighted import weighted_spatial_coordinates
from niftynet.io.image_reader import ImageReader
from niftynet.io.image_sets_partitioner import ImageSetsPartitioner
from niftynet.utilities.util_common import ParserNamespace
MULTI_MOD_DATA = {
'T1': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1sampler.csv'),
path_to_search='testing_data',
filename_contains=('_o_T1_time',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
spatial_window_size=(7, 10, 2),
loader=None
),
'FLAIR': ParserNamespace(
csv_file=os.path.join('testing_data', 'FLAIRsampler.csv'),
path_to_search='testing_data',
filename_contains=('FLAIR_',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
spatial_window_size=(7, 10, 2),
loader=None
)
}
MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR'),
sampler=('T1',))
MOD_2D_DATA = {
'ultrasound': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1sampler2d.csv'),
path_to_search='testing_data',
filename_contains=('2d_',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
spatial_window_size=(10, 9, 1),
loader=None
),
}
MOD_2D_TASK = ParserNamespace(image=('ultrasound',),
sampler=('ultrasound',))
DYNAMIC_MOD_DATA = {
'T1': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1sampler.csv'),
path_to_search='testing_data',
filename_contains=('_o_T1_time',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
spatial_window_size=(8, 2),
loader=None
),
'FLAIR': ParserNamespace(
csv_file=os.path.join('testing_data', 'FLAIRsampler.csv'),
path_to_search='testing_data',
filename_contains=('FLAIR_',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
spatial_window_size=(8, 2),
loader=None
)
}
DYNAMIC_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR'),
sampler=('FLAIR',))
data_partitioner = ImageSetsPartitioner()
multi_mod_list = data_partitioner.initialise(MULTI_MOD_DATA).get_file_list()
mod_2d_list = data_partitioner.initialise(MOD_2D_DATA).get_file_list()
dynamic_list = data_partitioner.initialise(DYNAMIC_MOD_DATA).get_file_list()
def get_3d_reader():
reader = ImageReader(['image', 'sampler'])
reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
return reader
def get_2d_reader():
reader = ImageReader(['image', 'sampler'])
reader.initialise(MOD_2D_DATA, MOD_2D_TASK, mod_2d_list)
return reader
def get_dynamic_window_reader():
reader = ImageReader(['image', 'sampler'])
reader.initialise(DYNAMIC_MOD_DATA, DYNAMIC_MOD_TASK, dynamic_list)
return reader
class WeightedSamplerTest(tf.test.TestCase):
def test_3d_init(self):
sampler = WeightedSampler(reader=get_3d_reader(),
data_param=MULTI_MOD_DATA,
batch_size=2,
windows_per_image=10,
queue_length=10)
with self.test_session() as sess:
coordinator = tf.train.Coordinator()
sampler.run_threads(sess, coordinator, num_threads=2)
out = sess.run(sampler.pop_batch_op())
self.assertAllClose(out['image'].shape, (2, 7, 10, 2, 2))
sampler.close_all()
def test_2d_init(self):
sampler = WeightedSampler(reader=get_2d_reader(),
data_param=MOD_2D_DATA,
batch_size=2,
windows_per_image=10,
queue_length=10)
with self.test_session() as sess:
coordinator = tf.train.Coordinator()
sampler.run_threads(sess, coordinator, num_threads=2)
out = sess.run(sampler.pop_batch_op())
self.assertAllClose(out['image'].shape, (2, 10, 9, 1))
sampler.close_all()
def test_dynamic_init(self):
sampler = WeightedSampler(reader=get_dynamic_window_reader(),
data_param=DYNAMIC_MOD_DATA,
batch_size=2,
windows_per_image=10,
queue_length=10)
with self.test_session() as sess:
coordinator = tf.train.Coordinator()
sampler.run_threads(sess, coordinator, num_threads=2)
out = sess.run(sampler.pop_batch_op())
self.assertAllClose(out['image'].shape, (1, 8, 2, 256, 2))
def test_ill_init(self):
with self.assertRaisesRegexp(KeyError, ""):
sampler = WeightedSampler(reader=get_3d_reader(),
data_param=MOD_2D_DATA,
batch_size=2,
windows_per_image=10,
queue_length=10)
def test_close_early(self):
sampler = WeightedSampler(reader=get_2d_reader(),
data_param=MOD_2D_DATA,
batch_size=2,
windows_per_image=10,
queue_length=10)
sampler.close_all()
class WeightedCoordinatesTest(tf.test.TestCase):
def assertCoordinatesAreValid(self, coords, sampling_map):
for coord in coords:
for i in range(len(coord.shape)):
self.assertTrue(coord[i] >= 0)
self.assertTrue(coord[i] < sampling_map.shape[i])
def test_3d_coordinates(self):
img_size = (32, 16, 17, 1, 1)
win_size = (10, 16, 15)
sampling_map = np.zeros(img_size)
coords = weighted_spatial_coordinates(
32, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (32, N_SPATIAL))
self.assertCoordinatesAreValid(coords, sampling_map)
# testing high weight location (10, 8, 7, 0, 0)
sampling_map[10, 8, 7, 0, 0] = 1.0
coords = weighted_spatial_coordinates(
32, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (32, N_SPATIAL))
self.assertTrue(np.all(coords == [10, 8, 7]))
def test_2d_coordinates(self):
img_size = (32, 17, 1, 1, 1)
win_size = (31, 3, 1)
sampling_map = np.zeros(img_size)
coords = weighted_spatial_coordinates(
64, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (64, N_SPATIAL))
self.assertCoordinatesAreValid(coords, sampling_map)
# testing high weight location (15, 1, 1, 0, 0)
sampling_map[15, 1, 0, 0, 0] = 1.0
coords = weighted_spatial_coordinates(
64, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (64, N_SPATIAL))
self.assertTrue(np.all(coords == [15, 1, 0]))
def test_1d_coordinates(self):
img_size = (32, 1, 1, 1, 1)
win_size = (15, 1, 1)
sampling_map = np.zeros(img_size)
coords = weighted_spatial_coordinates(
10, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (10, N_SPATIAL))
self.assertCoordinatesAreValid(coords, sampling_map)
sampling_map[20, 0, 0] = 0.1
coords = weighted_spatial_coordinates(
10, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (10, N_SPATIAL))
self.assertTrue(np.all(coords == [20, 0, 0]))
sampling_map[9, 0, 0] = 0.1
coords = weighted_spatial_coordinates(
10, img_size, win_size, sampling_map)
self.assertAllEqual(coords.shape, (10, N_SPATIAL))
self.assertTrue(np.all((coords == [20, 0, 0]) | (coords == [9, 0, 0])))
if __name__ == "__main__":
tf.test.main()
``` |
{
"source": "jose-mariano/cadastro-de-pessoas",
"score": 2
} |
#### File: src/views/__init__.py
```python
from src.views.CLI import CLI
from src.views.GUI import GUI
def getInterface(interfaceType, controller):
types = {
"CLI": CLI,
"GUI": GUI,
"default": GUI
}
if (interfaceType not in types.keys()):
return types["default"](controller)
return types[interfaceType](controller)
``` |
{
"source": "jose-mariano/jogo-da-forca-web",
"score": 2
} |
#### File: jose-mariano/jogo-da-forca-web/root.py
```python
from flask import Blueprint, render_template, redirect, url_for, request
from datas import *
root_pages = Blueprint('root_pages', __name__, template_folder='templates')
createDatabase()
@root_pages.route('/viewWords')
def viewWords():
words = selectAllJoin()
return render_template('view-words.html', words=words)
@root_pages.route('/addWords')
def addWords():
category_words = select('SELECT * FROM tbl_category_words')
return render_template('add-items.html', categoryWords=category_words)
@root_pages.route('/viewNewItems', methods=["GET", "POST"])
def viewNewItems():
if request.method == "POST":
newCategory = request.form['newCategory']
newWord = request.form['newWords']
categories = validateAndAddNewCategories(newCategory)
words = validateAndAddNewWords(newWord)
return render_template('view-new-items.html', addWords=words["add"], notAddWords=words["notAdd"], addCategories=categories["add"], notAddCategories=categories["notAdd"])
else:
return redirect(url_for('root_pages.addWords'))
``` |
{
"source": "jose-mariano/sistema-de-cadastro-com-python",
"score": 3
} |
#### File: sistema-de-cadastro-com-python/manipulationData/database.py
```python
import sqlite3 as sql
# Database functions
def addExtension(name, extension):
nameCopy = name[:].replace('.', ' .').split()
lenNameCopy = len(nameCopy)
if lenNameCopy < 2 or nameCopy[lenNameCopy - 1] != extension:
name += extension
return name
def loadDatabase(name):
nameDb = addExtension(name, '.db')
db = sql.connect(nameDb)
return db
class Database:
def __init__(self, name):
self.name = name
self.database = loadDatabase(name)
self.console = self.database.cursor()
self.console.execute("CREATE TABLE IF NOT EXISTS tbl_people (id integer PRIMARY KEY AUTOINCREMENT, name text, date_birth text, gender text, marital_status text)")
self.save()
def insertInto(self, name, dateOfBirth, gender, maritalStatus):
data = (name, dateOfBirth, gender, maritalStatus)
self.console.execute("INSERT INTO tbl_people (name, date_birth, gender, marital_status) VALUES (?,?,?,?)", data)
self.save()
def select(self, command=None):
if command == None:
self.console.execute("SELECT * FROM tbl_people")
else:
try:
self.console.execute(command)
except:
return self.select()
return self.console.fetchall()
def save(self):
self.database.commit()
def close(self):
self.database.close()
```
#### File: sistema-de-cadastro-com-python/manipulationData/__init__.py
```python
def modifyDate(date):
oldDate = date[:].split('-')
return '{}/{}/{}'.format(oldDate[2], oldDate[1], oldDate[0])
def removeSpaces(string):
return string.replace(' ', '')
def leapYear(year):
year = int(year)
if year % 4 == 0 and year % 100 != 0:
return True
elif year % 100 == 0 and year % 400 == 0:
return True
else:
return False
def validateDateOfBirth(dateOfBirth):
from datetime import date
# date -> year/month/day
dateOfBirth = dateOfBirth.split('-')
if len(dateOfBirth) != 3:
return False
else:
if len(removeSpaces(dateOfBirth[0])) == 0 or len(removeSpaces(dateOfBirth[1])) == 0 or len(removeSpaces(dateOfBirth[2])) == 0:
return False
dateOfBirth = [int(item) for item in dateOfBirth]
currentYear = date.today().year
if dateOfBirth[0] > currentYear:
return False
elif dateOfBirth[1] < 1 or dateOfBirth[1] > 12:
return False
else:
totalDaysOfTheMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if leapYear(dateOfBirth[0]):
totalDaysOfTheMonth[1] = 29
if dateOfBirth[2] < 1 or dateOfBirth[2] > totalDaysOfTheMonth[dateOfBirth[1] - 1]:
return False
return True
def validateName(name):
nameWithoutSpaces = name[:].replace(' ', '')
if name.strip() != '' and nameWithoutSpaces.isalpha() and len(nameWithoutSpaces) >= 3:
return True
else:
return False
# Erros
def errors(data):
if not validateName(data[0]):
return 'name'
elif not validateDateOfBirth(data[1]):
return 'dateOfBirth'
elif data[2] == '':
return 'gender'
elif data[3] == '':
return 'maritalStatus'
else:
return None
# Treatment of data
def treatData(data):
birthYear = removeSpaces(data['birthYear'])
birthMonth = removeSpaces(data['birthMonth'])
birthday = removeSpaces(data['birthday'])
dateOfBirth = '{}-{}-{}'.format(birthYear, birthMonth, birthday)
return (data['name'].strip(), dateOfBirth, data['gender'], data['maritalStatus'])
``` |
{
"source": "JoseMariaRomeroARK/Movil-Music-Mananger-M3",
"score": 3
} |
#### File: JoseMariaRomeroARK/Movil-Music-Mananger-M3/ManejoArchivos.py
```python
def buscarDirectorio():
from tkinter import Tk, filedialog
Tk().withdraw()
direc = filedialog.askdirectory()
return direc
def buscarArchivo():
from tkinter import Tk, filedialog
Tk().withdraw()
filename = filedialog.askopenfile()
return filename.name
``` |
{
"source": "josemariasosa/music-theory",
"score": 2
} |
#### File: josemariasosa/music-theory/root.py
```python
import json
import inspect
import numpy as np
class RootNote(object):
""" Available scales:
aeolian_dominant
algerian
arabic
balinese_pelog
byzantine
chinese
diminished
dominant_diminished
egyptian
eight_tones_spanish
enigmatic
geez
harmonic_major
harmonic_minor
hirajoshi
hungarian_gypsy
japanese
lydian_dominant
major
major_bebop
major_pentatonic
major_pentatonic_blues
melodic_major
melodic_minor
minor
minor_bebop
minor_pentatonic
minor_pentatonic_blues
natural_major
natural_minor
neapolitan_minor
nine_tone
octatonic_half_whole
octatonic_whole_half
oriental
romanian_minor
spanish_gypsy
super_locrian
symmetrical_augmented
whole_tone
yo
"""
def __init__(self, root):
# 1. Define the list of 12 notes.
self.notes = self.getNotes()
self.num_notes = list(range(12))
# 2. Define the root position.
self.root = root.lower()
self.root_position = self.getRootPosition(self.root)
# 3. Import the patterns.
self.pats = self.importPatterns()
# 4. Default output notation.
self.default_notation = self.setDefaultNotation()
# --------------------------------------------------------------------------
def __str__(self):
message = "The root note is {}!".format(self.root.title())
return message
# --------------------------------------------------------------------------
def getNotes(self):
file_name = "files/notes.json"
with open(file_name, 'r') as f:
notes = json.load(f)['notes']
return notes
# --------------------------------------------------------------------------
def getRootPosition(self, root):
position = [x["position"] for x in self.notes if root in x['match']]
if len(position) == 1:
return position[0]
else:
print("Given note was not found, try again!")
exit()
# --------------------------------------------------------------------------
def importPatterns(self):
file_name = "files/patterns.json"
with open(file_name, 'r') as f:
patterns = json.load(f)['patterns']
return patterns
# --------------------------------------------------------------------------
def setDefaultNotation(self):
if len(self.root) == 1:
default_notation = 'sharp'
elif self.root[-1] == 'b':
default_notation = 'flat'
elif '#' in self.root:
default_notation = 'sharp'
else:
default_notation = 'sharp'
return default_notation
# --------------------------------------------------------------------------
def positionPattern(self, scale_name):
# Define the steps pattern.
step_pattern = self.pats['scales'][scale_name]
# 2. Resort the 12 notes into a basic pattern.
basic = (self.num_notes[self.root_position:]
+ self.num_notes[:self.root_position])
basic = basic + [basic[0]]
# 3. Get the cumulative steps.
step_pattern = [int(x * 2) for x in step_pattern]
accum_steps = list(np.cumsum(step_pattern))
# 4. Calculate the scale values.
scale = []
for index, value in enumerate(basic):
if index == 0:
scale.append(value)
elif index in accum_steps:
scale.append(value)
return scale
# --------------------------------------------------------------------------
def replacePositionNotes(self, scale):
return [self.notes[x][self.default_notation] for x in scale]
# --------------------------------------------------------------------------
@property
def aeolian_dominant(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def algerian(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def arabic(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def balinese_pelog(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def byzantine(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def chinese(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def diminished(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def dominant_diminished(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def egyptian(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def eight_tones_spanish(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def enigmatic(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def geez(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def harmonic_major(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def harmonic_minor(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def hirajoshi(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def hungarian_gypsy(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def japanese(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def lydian_dominant(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def major(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def major_bebop(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def major_pentatonic(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def major_pentatonic_blues(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def melodic_major(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def melodic_minor(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def minor(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def minor_bebop(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def minor_pentatonic(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def minor_pentatonic_blues(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def natural_major(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def natural_minor(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def neapolitan_minor(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def nine_tone(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def octatonic_half_whole(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def octatonic_whole_half(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def oriental(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def romanian_minor(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def spanish_gypsy(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def super_locrian(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def symmetrical_augmented(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def whole_tone(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
@property
def yo(self):
scale_name = inspect.stack()[0][3]
positions = self.positionPattern(scale_name)
return {
'positions': positions,
'notes': self.replacePositionNotes(positions),
'root': self.root,
'scale_name': scale_name
}
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def main():
note = RootNote("G")
print(note)
print('\nThe major scale is: ')
print(note.major['notes'])
print('\nThe symmetrical augmented scale is: ')
print(note.symmetrical_augmented['notes'])
print('\nThe japanese scale is: ')
print(note.japanese['notes'])
# ------------------------------------------------------------------------------
if __name__ == '__main__':
main()
```
#### File: josemariasosa/music-theory/tabs.py
```python
import json
class RootNote(object):
def __init__(self):
# 1. Define the list of 12 notes.
self.notes = self.getNotes()
self.num_notes = list(range(12))
# 2. Define the root position.
# self.root = root.lower()
# self.root_position = self.getRootPosition(self.root)
# 3. Import the patterns.
self.pats = self.importPatterns()
# 4. Default output notation.
self.default_notation = self.setDefaultNotation()
# --------------------------------------------------------------------------
def __str__(self):
message = "The root note is {}!".format(self.root.title())
return message
# --------------------------------------------------------------------------
def getNotes(self):
file_name = "files/notes.json"
with open(file_name, 'r') as f:
notes = json.load(f)['notes']
return notes
# --------------------------------------------------------------------------
def getRootPosition(self, root):
position = [x["position"] for x in self.notes if root in x['match']]
if len(position) == 1:
return position[0]
else:
print("Given note was not found, try again!")
exit()
# --------------------------------------------------------------------------
def importPatterns(self):
file_name = "files/patterns.json"
with open(file_name, 'r') as f:
patterns = json.load(f)['patterns']
return patterns
# --------------------------------------------------------------------------
def setDefaultNotation(self):
return 'sharp'
if len(self.root) == 1:
default_notation = 'sharp'
elif self.root[-1] == 'b':
default_notation = 'flat'
elif '#' in self.root:
default_notation = 'sharp'
else:
default_notation = 'sharp'
return default_notation
# --------------------------------------------------------------------------
def positionPattern(self, scale_name):
# Define the steps pattern.
step_pattern = self.pats['scales'][scale_name]
# 2. Resort the 12 notes into a basic pattern.
basic = (self.num_notes[self.root_position:]
+ self.num_notes[:self.root_position])
basic = basic + [basic[0]]
# 3. Get the cumulative steps.
step_pattern = [int(x * 2) for x in step_pattern]
accum_steps = list(np.cumsum(step_pattern))
# 4. Calculate the scale values.
scale = []
for index, value in enumerate(basic):
if index == 0:
scale.append(value)
elif index in accum_steps:
scale.append(value)
return scale
# --------------------------------------------------------------------------
def replacePositionNotes(self, scale):
return [self.notes[x][self.default_notation] for x in scale]
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def stepDistance(from_note, to_note):
if to_note > from_note:
semitone = to_note - from_note
elif to_note < from_note:
semitone = (to_note + 12) - from_note
else:
semitone = 0
if semitone % 2 == 0:
return int(semitone / 2)
else:
return semitone / 2
def stepOperations(given_note, tones):
semitone = int(tones * 2)
results = given_note + semitone
if results > 11:
while results > 11:
results -= 12
elif results < 0:
while results < 0:
results += 12
return results
def buildString(numeric_open_note, frets=20):
frets = frets + 1 # Adding the open note.
string = []
for fret in range(frets):
string.append(stepOperations(numeric_open_note, fret/2))
# string.extend(RootNote().replacePositionNotes([stepOperations(numeric_open_note, fret/2)]))
return string
e_string = buildString(4)
a_string = buildString(9)
d_string = buildString(2)
g_string = buildString(7)
strings = [
e_string,
a_string,
d_string,
g_string
]
def findFretPosition(string, note):
return [x[0] for x in enumerate(string) if x[1] == 0]
for string in strings:
fret_positions = findFretPosition(string, 0)
print(string)
print(fret_positions)
print('--------------------')
def majorChordTab(note):
text = "G |-----0------|\nD |-----2------|\nA |-----3------|\nE |------------|"
return text
# eS =
# text = """
# G |-----0------|
# D |-----2------|
# A |-----3------|
# E |------------|
# """
G |------------|
D |--------5---|
A |-----7------|
E |--8---------|
[12, 10, 8, 7, ]
# print (text)
# import json
# import math
# import numpy as np
# from root import RootNote
# from pprint import pprint
# class TabGenerator(object):
# """ Generate Tabs.
# """
# def __init__(self):
# # --------------------------------------------------------------------------
# # ------------------------------------------------------------------------------
# def main():
# D_minor = RootNote("c#").major
# TabGenerator(D_minor).simple()
# # ------------------------------------------------------------------------------
# if __name__ == '__main__':
# main()
``` |
{
"source": "josemarimanio/django-adminlte2-templates",
"score": 2
} |
#### File: django-adminlte2-templates/adminlte2_templates/core.py
```python
from django.conf import settings
from adminlte2_templates import constants
try:
# Python 3
from urllib.parse import urlencode
except ImportError:
# Python 2.7
from urllib import urlencode
try:
# Supports >=Django 2.0
from django.shortcuts import reverse
except ImportError:
# Supports <=Django 1.1
from django.core.urlresolvers import reverse
def get_settings(variable, django_setting=None):
"""
Get the settings variable from Django ``settings``. If no settings variable
is found, get the default value from the adminlte2_templates ``constants`` module.
:param variable: Settings variable name
:type variable: str
:param django_setting: Django settings variable to look for before trying to get ``variable``
:type django_setting: str, optional
:return: Settings value
"""
return getattr(settings, django_setting if django_setting else variable, getattr(constants, variable, None))
```
#### File: tests/paginator/tests.py
```python
from django.contrib.sites.models import Site
from django.template.exceptions import TemplateSyntaxError
from django.test import Client
from django.test import TestCase
from adminlte2_templates import constants as const
from adminlte2_templates.core import reverse
class PaginatorTestCase(TestCase):
"""
Test cases for {% paginator %} template tag
Testing these cases:
* Sanity check
* 'adjacent_pages', 'align', 'no_margin' params
* Invalid 'adjacent_pages', 'align' param values
"""
URL_PATTERN_INDEX = 'paginator:index'
URL_PATTERN_INVALID_ADJACENT_PAGES = 'paginator:invalid_adjacent_pages'
URL_PATTERN_INVALID_ALIGN = 'paginator:invalid_align'
def setUp(self):
self.client = Client()
for n in range(5):
n = str(n)
Site.objects.create(domain=n, name=n)
def get_response_page(self, page):
return self.client.get(reverse(self.URL_PATTERN_INDEX) + '?page=' + str(page))
def get_response_invalid_adjacent_pages(self):
return self.client.get(reverse(self.URL_PATTERN_INVALID_ADJACENT_PAGES))
def get_response_invalid_align(self):
return self.client.get(reverse(self.URL_PATTERN_INVALID_ALIGN))
#
# Sanity check
#
def test_template_used(self):
self.assertTemplateUsed(self.get_response_page(1), const.PAGINATOR_TEMPLATE_NAME)
def test_response_page_1(self):
self.assertContains(self.get_response_page(1),
'''<div id="check"><nav>
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_response_page_2(self):
self.assertContains(self.get_response_page(2),
'''<div id="check"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=1"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=1">1</a></li>
<li class="active"><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=3"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_response_page_3(self):
self.assertContains(self.get_response_page(3),
'''<div id="check"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=2"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li class="active"><a href="?page=3">3</a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li><a href="?page=4"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_response_page_4(self):
self.assertContains(self.get_response_page(4),
'''<div id="check"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=3"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li class="active"><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li><a href="?page=6">6</a></li>
<li><a href="?page=5"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_response_page_last(self):
self.assertContains(self.get_response_page('last'),
'''<div id="check"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=5"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li class="active"><a href="?page=6">6</a></li>
</ul>
</nav></div>''', html=True)
#
# 'adjacent_pages' parameter to 3
#
def test_param_adjacent_pages_page_1(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-adjacent-pages"><nav>
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_adjacent_pages_page_2(self):
self.assertContains(self.get_response_page(2),
'''<div id="param-adjacent-pages"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=1"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=1">1</a></li>
<li class="active"><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li><a href="?page=3"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_adjacent_pages_page_3(self):
self.assertContains(self.get_response_page(3),
'''<div id="param-adjacent-pages"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=2"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li class="active"><a href="?page=3">3</a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li><a href="?page=6">6</a></li>
<li><a href="?page=4"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_adjacent_pages_page_4(self):
self.assertContains(self.get_response_page(4),
'''<div id="param-adjacent-pages"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=3"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li class="active"><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li><a href="?page=6">6</a></li>
<li><a href="?page=5"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_adjacent_pages_last(self):
self.assertContains(self.get_response_page('last'),
'''<div id="param-adjacent-pages"><nav>
<ul id="pagination" class="pagination">
<li><a href="?page=1"><small>First</small></a></li>
<li><a href="?page=5"><i class="fa fa-caret-left"></i></a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=4">4</a></li>
<li><a href="?page=5">5</a></li>
<li class="active"><a href="?page=6">6</a></li>
</ul>
</nav></div>''', html=True)
#
# 'align' parameter choices
#
def test_param_align_initial(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-align-initial"><nav>
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_align_center(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-align-center"><nav class="text-center">
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_align_left(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-align-left"><nav class="pull-left">
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_align_right(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-align-right"><nav class="pull-right">
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
#
# 'no_margin' parameter
#
def test_param_no_margin_false(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-no-margin-false"><nav>
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
def test_param_no_margin_true(self):
self.assertContains(self.get_response_page(1),
'''<div id="param-no-margin-true"><nav>
<ul id="pagination" class="pagination no-margin">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
#
# Test all parameters
#
def test_param_all(self):
self.assertContains(self.get_response_page(1),
'''<div id="all-params"><nav>
<ul id="pagination" class="pagination">
<li class="active"><a href="?page=1">1</a></li>
<li><a href="?page=2">2</a></li>
<li><a href="?page=3">3</a></li>
<li><a href="?page=2"><i class="fa fa-caret-right"></i></a></li>
<li><a href="?page=last"><small>Last</small></a></li>
</ul>
</nav></div>''', html=True)
#
# Invalid parameter
#
def test_invalid_param_adjacent_pages(self):
self.assertRaises(TypeError, self.get_response_invalid_adjacent_pages)
def test_invalid_param_align(self):
self.assertRaises(TemplateSyntaxError, self.get_response_invalid_align)
``` |
{
"source": "josemarin7/Python-OpenCV-Recognition-via-Camera",
"score": 3
} |
#### File: camera-opencv/02-image_process/canny_edge_detect.py
```python
import cv2
import sys
import numpy as np
def nothing(x):
pass
cv2.namedWindow('canny_demo')
cv2.createTrackbar('threshold', 'canny_demo', 0, 100, nothing)
cv2.createTrackbar('increase_ratio', 'canny_demo', 0, 5, nothing)
try:
imagePath = sys.argv[1]
image = cv2.imread(imagePath)
except:
image = cv2.imread("lena512rgb.png")
while True:
threshold = cv2.getTrackbarPos('threshold', 'canny_demo')
ratio = cv2.getTrackbarPos('increase_ratio', 'canny_demo')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(edges, threshold, threshold * ratio, apertureSize=3)
cv2.imshow('canny_demo', edges)
if cv2.waitKey(1) & 0xFF == ord("q"):
cv2.imwrite("canny_demo.png", edges)
break
cv2.destroyAllWindows()
```
#### File: camera-python/05-streaming/app-route.py
```python
from flask import Flask, render_template, Response
app = Flask(__name__)
@app.route("/")
def index():
return render_template('link.html')
@app.route("/foo")
def foo():
extns = ['Flask', 'Jinja2', 'Awesome']
return render_template('bar.html', extns=extns)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True)
```
#### File: camera-python/05-streaming/camera_pi.py
```python
import cv2
class Camera(object):
def __init__(self):
if cv2.__version__.startswith('2'):
PROP_FRAME_WIDTH = cv2.cv.CV_CAP_PROP_FRAME_WIDTH
PROP_FRAME_HEIGHT = cv2.cv.CV_CAP_PROP_FRAME_HEIGHT
elif cv2.__version__.startswith('3'):
PROP_FRAME_WIDTH = cv2.CAP_PROP_FRAME_WIDTH
PROP_FRAME_HEIGHT = cv2.CAP_PROP_FRAME_HEIGHT
self.video = cv2.VideoCapture(0)
#self.video = cv2.VideoCapture(1)
#self.video.set(PROP_FRAME_WIDTH, 640)
#self.video.set(PROP_FRAME_HEIGHT, 480)
self.video.set(PROP_FRAME_WIDTH, 320)
self.video.set(PROP_FRAME_HEIGHT, 240)
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tostring()
``` |
{
"source": "josemarioqv/FS0741-DynamicalSystems-FractalGeometry",
"score": 3
} |
#### File: FS0741-DynamicalSystems-FractalGeometry/Homework#2/script.py
```python
import pandas as pd
# Functions Definitions
def F(x):
return x**2 - 2
def G(x):
return x**2 - 2.0001
def D(x):
return (2*x) % 1
# Dict with functions
function = {'F': F, 'G': G, 'D': D}
# Main script
seeds = pd.read_csv('seeds.csv')
columns = list(seeds)
for column in columns:
orbit = pd.DataFrame(columns=seeds[column])
for seed in seeds[column]:
this_orbit = [seed]
for iteration in range(99):
this_orbit.append(function[column](this_orbit[-1]))
orbit[seed] = pd.Series(this_orbit).values
orbit.to_csv(column+'.csv')
```
#### File: FS0741-DynamicalSystems-FractalGeometry/Second_Exam/fern.py
```python
import numpy as np
import random
import pyglet
class Fern():
def __init__(self):
self.W = np.array([[[0., 0.], [0., 0.4]],
[[0.85, 0.04], [-0.04, 0.85]],
[[0.2, -0.26], [0.23, 0.22]],
[[-0.15, 0.28], [0.25, 0.24]]])
self.B = np.array([[0., 0.01],
[1.6, 0.85],
[1.6, 0.07],
[0.44, 0.07]])
self.X = np.array([0.5, 0.6])
def update(self):
i = random.choices(population=[0, 1, 2, 3],
weights=[0.01, 0.85, 0.07, 0.07])[0]
self.X = np.dot(self.W[i], self.X) + self.B[i]
def draw(self):
point = self.X*35
point = tuple(point.astype(int))
print(point)
pyglet.graphics.draw(1, pyglet.gl.GL_POINTS, ('v2i', point),
('c3B', (40, 200, 40)))
class Window(pyglet.window.Window):
def __init__(self):
# pyglet window initialization
super().__init__()
self.set_size(400, 400)
pyglet.clock.schedule_interval(self.update, 0.001)
# initialization
self.fern = Fern()
def on_draw(self):
self.fern.draw()
def update(self, dt):
self.fern.update()
pass
if __name__ == '__main__':
window = Window()
pyglet.app.run()
```
#### File: FS0741-DynamicalSystems-FractalGeometry/Second_Exam/tree.py
```python
import numpy as np
import random
import pyglet
class Tree():
def __init__(self):
self.W = np.array([[[0., 0.], [0., 0.5]],
[[0.42, -0.42], [0.42, 0.42]],
[[0.42, 0.42], [-0.42, 0.42]]])
self.B = np.array([[0., 0.],
[0., 0.2],
[0., 0.2]])
self.X = np.array([2, 2])
def update(self):
i = random.choices(population=[0, 1, 2],
weights=[0.05, 0.4, 0.4])[0]
self.X = np.dot(self.W[i], self.X) + self.B[i]
def draw(self):
point = self.X*800
point = tuple(point.astype(int))
print(point)
pyglet.graphics.draw(1, pyglet.gl.GL_POINTS, ('v2i', point),
('c3B', (40, 200, 40)))
class Window(pyglet.window.Window):
def __init__(self):
# pyglet window initialization
super().__init__()
self.set_size(400, 400)
pyglet.clock.schedule_interval(self.update, 0.001)
# initialization
self.tree = Tree()
def on_draw(self):
self.tree.draw()
def update(self, dt):
self.tree.update()
pass
if __name__ == '__main__':
window = Window()
pyglet.app.run()
``` |
{
"source": "jose-marquez89/cryptovest",
"score": 3
} |
#### File: jose-marquez89/cryptovest/db_model.py
```python
import os
import pymysql
from dotenv import load_dotenv
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, BigInteger, String, Float, ForeignKey
def load_engine():
"""Create the database engine"""
load_dotenv()
DB_UNAME = os.environ["DB_UNAME"]
DB_PWORD = os.environ["DB_PWORD"]
DB_HOST = os.environ["DB_HOST"]
DB_NAME = os.environ["DB_NAME"]
engine = create_engine(f'mysql+pymysql://{DB_UNAME}:{DB_PWORD}@{DB_HOST}/{DB_NAME}')
return engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(BigInteger, primary_key=True, nullable=False)
name = Column(String(32), unique=True)
password = Column(String(32))
transaction = relationship("Ledger")
def __repr__(self):
return "<User(name={}, password={}>".format(self.name, self.password)
class Ledger(Base):
__tablename__ = 'ledger'
# TODO: add standardized columns based on coinbase csv
id = Column(BigInteger, primary_key=True, nullable=False)
source = Column(String(20))
asset = Column(String(20))
txn_type = Column(String(20))
amount = Column(Float)
price_at_txn = Column(Float)
user_id = Column(BigInteger, ForeignKey('user.id'), nullable=False)
def __repr__(self):
return "<Ledger(asset={}, amount={}, user_id={})>".format(self.asset, self.amount,self.user_id)
if __name__ == "__main__":
engine = load_engine()
Base.metadata.create_all(engine)
```
#### File: cryptovest/pages/index.py
```python
import flask
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
from db_users import get_user_id
details = dbc.Col(
id='index-action-call',
md=4
)
photo = dbc.Col(
[
html.Img(src='assets/nick-chong-charts.jpg',
className='img-fluid'),
html.P(
children=[
"Photo Credit: ",
html.A("<NAME>", href="https://twitter.com/n1ckchong?utm_medium=referral&utm_source=unsplash")
]
)
],
md=8
)
layout = dbc.Row([details, photo])
@app.callback(Output('index-action-call', 'children'),
Input('index-action-call', 'children'))
def set_link(_):
user = flask.request.cookies.get('logged-in-user')
if user:
action = [
html.H3("Keep Track Of Your Crypto Investments"),
html.P("Update your ledger to get your latest portfolio analysis."),
dcc.Link(dbc.Button('Get Started', color='primary'), href="#")
]
else:
action = [
html.H3("Keep Track Of Your Crypto Investments"),
html.P("Create an account and start tracking your crypto transactions free of charge."),
dcc.Link(dbc.Button('Create Account', color='primary'), href="/new-account")
]
return action
``` |
{
"source": "jose-marquez89/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 3
} |
#### File: module3-nosql-and-document-oriented-databases/mongodb/rpgsql_to_mongo.py
```python
import os
import urllib
import sqlite3
import pprint
import logging
import pymongo
from dotenv import load_dotenv
log_format = "%(asctime)s - %(levelname)s %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format)
logging.disable(logging.CRITICAL)
load_dotenv()
escape = urllib.parse.quote_plus
MONGO_USER = os.getenv("MONGO_USER", default="OOPS")
MONGO_PASSWORD = os.getenv("MONGO_PASSWORD", default="<PASSWORD>")
MONGO_CLUSTER = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
# Mongo
connection_uri = f"mongodb+srv://{MONGO_USER}:{escape(MONGO_PASSWORD)}"\
f"@{MONGO_CLUSTER}.mongodb.net/test?retryWrites=true"\
"&w=majority"
client = pymongo.MongoClient(connection_uri)
mongoDB = client.rpg
logging.info("Connection @: " + connection_uri)
# SQLite
db_dirname = "module1-introduction-to-sql"
db_filename = 'rpg_db.sqlite3'
db_path = os.path.join(os.path.dirname(__file__),
"..", "..", db_dirname, db_filename)
logging.info(os.path.abspath(db_path))
sqlite_connection = sqlite3.connect(db_path)
cursor = sqlite_connection.cursor()
def table_to_list(table_name, engine):
"""
Take sqlite table and return a mongoDB bulk insertable list
table_name: name of table to acquire from sqlite db
engine: cursor from sqlite3 connection
"""
query = f"""
SELECT * FROM {table_name}
"""
result = engine.execute(query).fetchall()
column_headers = list(map(lambda x: x[0], engine.description))
insertable_list = []
for tup in result:
document = {}
for i in range(len(tup)):
document[column_headers[i]] = tup[i]
insertable_list.append(document)
return insertable_list
table_query = """
SELECT
name
FROM
sqlite_master
WHERE
type ='table' AND
name NOT LIKE 'sqlite_%';
"""
tables = cursor.execute(table_query).fetchall()
sqlite_tables = []
for name in tables:
sqlite_tables.append(name[0])
if __name__ == "__main__":
for table in sqlite_tables:
if table_to_list(table, cursor) == []:
logging.info(f"Empty list @ table {table}")
else:
logging.info(f"Inserting {table}...")
collection = mongoDB[table]
collection.insert_many(table_to_list(table, cursor))
logging.info("Complete.")
``` |
{
"source": "jose-marquez89/lambdata-josemarquez89",
"score": 3
} |
#### File: lambdata-josemarquez89/jose_lambdata/dftools.py
```python
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.stats import chi2_contingency as csc
from IPython.display import display
class DataFrameOperator:
"""Perform pandas.DataFrame operations"""
def __init__(self, dataframe):
self.dataframe = dataframe
def decompose_time(self, feature):
"""Splits a datetime column into year, month
and day, adds these columns directly to dataframe"""
if self.dataframe[feature].dtype != '<M8[ns]':
try:
self.dataframe[feature] = pd.to_datetime(
self.dataframe[feature]
)
except TypeError:
print("Error: Not a recognized datetime type")
self.dataframe['year'] = self.dataframe[feature].dt.year
self.dataframe['month'] = self.dataframe[feature].dt.month
self.dataframe['day'] = self.dataframe[feature].dt.day
return self.dataframe
def auto_split(self):
"""
Automatically splits into a 64% train, 16% validate and
20% test set, returns train, val, test
"""
df = self.dataframe.copy()
train, test = train_test_split(df,
test_size=0.2,
random_state=42)
train, val = train_test_split(train,
test_size=0.2,
random_state=42)
return train, val, test
def get_chi2(dataframe, f1, f2):
"""Displays a contingency table and prints a chi square report"""
observed = pd.crosstab(dataframe[f1], dataframe[f2])
cs, pv, dof, expected = csc(observed)
display(observed)
print(f"chi^2: {cs}")
print(f"p-value: {pv}")
print(f"dof: {dof}")
if __name__ == "__main__":
test_df = pd.read_csv('test_data.csv')
op = DataFrameOperator(test_df)
train, val, test = op.auto_split()
print(train.shape, val.shape, test.shape, '\n')
print("DATAFRAME BEFORE DECOMPOSE:\n")
print(test_df.head(), '\n')
print("DATAFRAME AFTER DECOMPOSE:\n")
test_df = op.decompose_time('date_recorded')
print(test_df.head())
``` |
{
"source": "jose-marquez89/naive-bayes-from-scratch",
"score": 3
} |
#### File: naive-bayes-from-scratch/nayes/nayes.py
```python
import numpy as np
class MultiNayes:
"""
Multinomial Naive Bayes algorithm.
Paramaters
----------
alpha : float, default=1.0
Smoothing paramater, can be set to smaller values
(0 for no smoothing)
"""
def __init__(self, alpha=1.0):
self.alpha = alpha
self.fitted = False
def label_binarizer(self, y, classes=None, bin_labels=None):
"""convert labels into an array of shape
(length of y, number of classes). This
will assist in getting the log priors and probabilities"""
if classes is None:
classes = np.unique(y)
bin_labels = np.zeros((y.shape[0], classes.shape[0]))
self.classes = classes
self.bin_labels = bin_labels
if bin_labels.shape[0] < 1:
return None
x = np.where(classes == y[0])
bin_labels[0][x] = 1
return self.label_binarizer(y[1:], classes, bin_labels[1:])
def fit(self, X, y):
# if X is not np.ndarray, convert from csr with `toarray()`
if type(X) is not np.ndarray:
X = X.toarray()
self.label_binarizer(y)
n_classes = self.classes.shape[0]
n_features = X.shape[1]
# initialize counter arrays
self.class_count = np.zeros(n_classes)
self.feature_count = np.zeros((n_classes, n_features))
# count classes and features by getting
# dot product of transposed binary labels
# they are automatically separated into their
# appropriate arrays
self.feature_count += np.dot(self.bin_labels.T, X)
self.class_count += self.bin_labels.sum(axis=0)
# add smoothing
if self.alpha > 0.0:
self.feature_count += self.alpha
smoothed_class_count = self.feature_count.sum(axis=1)
# get conditional log probabilities
self.feat_log_probs = (np.log(self.feature_count) -
np.log(smoothed_class_count.reshape(-1, 1)))
else:
print(
f"Alpha is {self.alpha}. A value this small will cause "
"result in errors when feature count is 0"
)
self.feat_log_probs = np.log(
self.feature_count /
self.feature_count
.sum(axis=1)
.reshape(-1, 1)
)
# get log priors
self.class_log_priors = (np.log(self.class_count) -
np.log(self.class_count
.sum(axis=0)
.reshape(-1, 1)))
self.fitted = True
def predict(self, X):
"""Predict target from features of X"""
# check if model has fit data
if not self.fitted:
print("The classifier has not yet "
"been fit. Not executing predict")
if type(X) is not np.ndarray:
X = X.toarray()
scores = np.dot(X, self.feat_log_probs.T) + self.class_log_priors
predictions = self.classes[np.argmax(scores, axis=1)]
return predictions
def accuracy(self, y_pred, y):
points = (y_pred == y).astype(int)
score = points.sum() / points.shape[0]
return score
if __name__ == "__main__":
clf = MultiNayes()
X_train = np.array([[1, 2, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 2, 1, 0, 0],
[2, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 3, 1],
[0, 0, 0, 0, 1, 2]])
y_train = np.array([1, 2, 2, 1, 3, 3])
X_test = np.array([[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 3]])
clf = MultiNayes()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(y_pred)
``` |
{
"source": "jose-marquez89/pbi-transplant",
"score": 3
} |
#### File: jose-marquez89/pbi-transplant/tools.py
```python
import os
import json
import sys
def transplant_bookmarks(source, target):
"""Copy over bookmarks"""
source_config = json.loads(source['config'])
target_config = json.loads(target['config'])
source_bookmarks = source_config['bookmarks']
target_config['bookmarks'] = source_bookmarks
target['config'] = json.dumps(target_config)
return target
# TODO: Zip functionality
if __name__ == "__main__":
with open(source, 'rb') as s:
source_json = json.load(s)
with open(target, 'rb') as t:
target_json = json.load(s)
output = transplant_bookmarks(source_json, target_json)
with open("CopiedLayouy", "wb") as f:
json.dump(output, f)
``` |
{
"source": "jose-marquez89/sftp-azure-poc",
"score": 3
} |
#### File: jose-marquez89/sftp-azure-poc/connection.py
```python
import os
import pysftp
from azure.storage.blob import BlobClient
CONTAINER_NAME = os.environ["CONTAINER"]
CONNECTION = os.environ["AzureWebJobsStorage"]
HOST_NAME = os.environ["HOST_NAME"]
USER_NAME = os.environ["USER_NAME"]
PASSWORD = os.environ["PASSWORD"]
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
def read_and_upload():
with pysftp.Connection(HOST_NAME,
username=USER_NAME,
password=PASSWORD,
cnopts=cnopts) as sftp:
with sftp.open("readme.txt", "r") as readme:
# upload blob
blob_conn = BlobClient.from_connection_string(
conn_str=CONNECTION,
container_name=CONTAINER_NAME,
blob_name="demo-sftp/sftp_readme.txt"
)
blob_conn.upload_blob(readme, overwrite=True)
with sftp.cd('pub/example'):
for file in sftp.listdir():
blob_conn = BlobClient.from_connection_string(
conn_str=CONNECTION,
container_name=CONTAINER_NAME,
blob_name=f"demo-sftp-images/{file}"
)
with sftp.open(file) as image_file:
blob_conn.upload_blob(image_file, overwrite=True)
``` |
{
"source": "jose-marquez89/tech-job-landscape",
"score": 3
} |
#### File: tech-job-landscape/python_scrape/test_functions.py
```python
import unittest
import scrape
class TestScrapeFunctions(unittest.TestCase):
def test_build_url(self):
url = scrape.build_url("indeed",
"/jobs?q=Data+Scientist&l=Texas&start=10",
join_next=True)
expected = ("https://www.indeed.com/"
"jobs?q=Data+Scientist&l=Texas&start=10")
url2 = scrape.build_url("indeed", job="Data Scientist", state="Texas")
expected2 = ("https://www.indeed.com/"
"jobs?q=Data%20Scientist&l=Texas&start=0")
self.assertEqual(url, expected)
self.assertEqual(url2, expected2)
def test_fetch_page(self):
fpl = scrape.fetch_page_listings
job_data = fpl("indeed",
job="Data Scientist",
state="Texas")
self.assertNotEqual(len(job_data), 0)
self.assertIsInstance(job_data, tuple)
self.assertIsInstance(job_data[0][0], dict)
self.assertIsInstance(job_data[1], str)
job_data = fpl("indeed",
next_page="/jobs?q=Data+Scientist"
"&l=Texas&start=10")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jose-marquez89/twitoff",
"score": 3
} |
#### File: twitoff/TwitOff/basilica_service.py
```python
import os
import basilica
from dotenv import load_dotenv
load_dotenv()
BASILICA_KEY = os.getenv("BASILICA_KEY")
def basiliconn():
connection = basilica.Connection(BASILICA_KEY)
return connection
if __name__ == "__main__":
sentences = ["Hello world", "What's up"]
embeddings = [
i for i in basiliconn().embed_sentences(sentences,
model="twitter")
]
both = list(zip(sentences, embeddings))
def to_dict(zipped):
new = {}
for s, e in zipped:
new[s] = e
return new
print(to_dict(both))
```
#### File: TwitOff/routes/home_routes.py
```python
from flask import Blueprint, render_template, jsonify
from TwitOff.models import db
home_routes = Blueprint("home_routes", __name__)
@home_routes.route("/")
def index():
return render_template("home.html")
@home_routes.route("/reset")
def reset_db():
db.drop_all()
db.create_all()
return jsonify({"message": "DB RESET OK"})
```
#### File: twitoff/TwitOff/twitter_service.py
```python
import os
from dotenv import load_dotenv
import tweepy
load_dotenv()
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
def twitter_api():
auth = tweepy.OAuthHandler(TWITTER_API_KEY,
TWITTER_API_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN,
TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
return api
if __name__ == "__main__":
api = twitter_api()
user = api.get_user("tferriss")
print("USER", user)
print(user.screen_name)
print(user.name)
print(user.followers_count)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.