metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jhonasn/templatizator",
"score": 3
} |
#### File: templatizator/domain/container.py
```python
from templatizator.domain.repository import ConfigurationRepository, \
VariableRepository, TemplateRepository, TemplateFileRepository, \
ConfigurableRepository, ConfigurableFileRepository
from templatizator.domain.service import ProjectService, \
ConfigurationService, VariableService, TemplateService, ConfigurableService
from templatizator.domain.application import ProjectApplication, \
VariableApplication, TemplateApplication, ConfigurableFileApplication
from templatizator.domain.helper import Event
# pylint: disable=too-few-public-methods
class Container:
'''Static container class that holds the important instances available
for presentation layer
'''
def __init__(self):
raise Exception('Static class is not instantiable')
@staticmethod
def configure():
'''Instantiate events, and DDD layers'''
# events
project_changed_event = Event()
configuration_changed_event = Event()
# repository layer
configuration_repository = ConfigurationRepository()
variable_repository = VariableRepository()
template_repository = TemplateRepository()
template_file_repository = TemplateFileRepository()
configurable_repository = ConfigurableRepository()
configurable_file_repository = ConfigurableFileRepository()
# service layer
# the order affects the event subscribe and publish into the services
variable_service = VariableService(
variable_repository,
project_changed_event
)
template_service = TemplateService(
template_repository,
template_file_repository,
project_changed_event
)
configurable_service = ConfigurableService(
configurable_repository,
configurable_file_repository,
project_changed_event
)
project_service = ProjectService(
configuration_repository, variable_repository, template_repository,
configurable_repository, template_file_repository,
configurable_file_repository, configuration_changed_event,
project_changed_event
)
configuration_service = ConfigurationService(
configuration_repository,
configuration_changed_event
)
# application layer
Container.project_application = ProjectApplication(
project_service,
configuration_service
)
Container.variable_application = VariableApplication(variable_service)
Container.template_application = TemplateApplication(template_service)
Container.configurable_file_application = ConfigurableFileApplication(
configurable_service
)
```
#### File: templatizator/domain/domain.py
```python
from abc import ABC, abstractmethod
# Just one method contract required
# pylint: disable=too-few-public-methods
class Serializable(ABC):
'''Base class for serializable classes (all domains must be)'''
@abstractmethod
def serialize(self):
'''Contract method to serialize object'''
raise NotImplementedError('Object serialize method not implemented')
class Node(Serializable):
'''Base class for node graph.
Commonly used as a file tree in this project
'''
def __init__(self, path=None, name=None):
self.path = path
self.name = name
self.children = []
self.parent = None
def add_child(self, child):
'''Add child to this node and set parent to the child'''
self.children.append(child)
child.parent = self
def remove_child(self, child):
'''Remove child from this node'''
self.children.remove(child)
def remove(self):
'''Remove itself from its parent'''
self.parent.remove_child(self)
del self
def serialize(self):
'''Serialize relevant node attributes'''
return {'path': self.path}
class Directory(Node):
'''Represents a directory into a file tree'''
def __init__(self, path=None, name=None, opened=False):
super().__init__(path, name)
self.open = opened
def serialize(self):
'''Serialize relevant attributes'''
obj = super().serialize()
obj['open'] = self.open
return obj
class File(Node):
'''Represents a file into a file tree'''
def __init__(self, path=None, name=None, save=True):
super().__init__(path, name)
self.save = save
def serialize(self):
obj = super().serialize()
obj['save'] = self.save
return obj
class Template(File):
'''Represents a template file into a file tree'''
pass
class ConfigurableFile(File):
'''Represents a configurable file into a file tree.
Samples of configurables files can be xml or json files but any file can be
a configurable, the configurable will receive the templates in placeholders
in any way desired through the placeholder template lines
'''
pass
class Project(Directory):
'''Represents the project directory into the file tree'''
def __init__(self, path=None, name=None, path_name=None, selected=False):
super().__init__(path, name, True)
# friendly project name
self.path_name = path_name
self.selected = selected
def serialize(self):
'''Serialize relevant attributes'''
obj = super().serialize()
del obj['open']
obj['path_name'] = self.path_name
obj['selected'] = self.selected
return obj
class Variable(Serializable):
'''Project variables that will be replacing placeholders in file names
or file contents
'''
def __init__(self, name=None, value=None):
self.name = name
self.value = value
def serialize(self):
'''Serialize relevant attributes'''
return {'name': self.name, 'value': self.value}
```
#### File: templatizator/domain/service.py
```python
from abc import ABC
from templatizator.domain.domain import Variable
from templatizator.domain.infrastructure import ProjectNotSet
from templatizator.domain.helper import OS
class ConfigurationService:
'''Handle configuration rules'''
def __init__(self, repository, configuration_changed_event):
self.repository = repository
self.event = configuration_changed_event
def change_path(self, path):
'''Changes repository path and notify it through event'''
# save configuration path
self.repository.change_path(path)
self.event.publish(path)
def get_path(self):
'''Returns configuration path'''
return self.repository.path
class ProjectService:
'''Handle project rules'''
# Project has no repository so its not necessary to call base class
# pylint: disable=super-init-not-called,too-many-arguments
def __init__(self, configuration_repository, variable_repository,
template_repository, configurable_repository,
template_file_repository, configurable_file_repository,
configuration_changed_event, project_change_event):
self.configuration_repository = configuration_repository
self.variable_repository = variable_repository
self.template_repository = template_repository
self.configurable_repository = configurable_repository
self.template_file_repository = template_file_repository
self.configurable_file_repository = configurable_file_repository
self.event = project_change_event
configuration_changed_event.subscribe(self.configuration_changed)
path = self.configuration_repository.get_project_path()
if path:
self.event.publish(path)
def get_home_path(self):
'''Get home path'''
return self.configuration_repository.get_home_path()
def change_path(self, path):
'''Changes repository path and notify it through event'''
project_path = self.configuration_repository.change_project(path)
self.event.publish(project_path)
def configuration_changed(self, path):
'''Configuration path change listener, change path when receives a
notification from configuration and notify other services through event
'''
self.configuration_repository.path = path
path = self.configuration_repository.get_project_path()
self.event.publish(path)
def find_node(self, filetree, path):
'''Find node instance of informed path into the filetree'''
return self.configuration_repository.find_node(filetree, path)
def get_filetree(self):
'''Get filetree graph and fills it with templates and configurables'''
filetree = self.configuration_repository.get_filetree()
templates = self.template_repository.get()
configurables = self.configurable_repository.get()
for template in templates:
parent = self.find_node(
filetree,
self.configuration_repository.get_parent_path(template.path)
)
if parent:
parent.add_child(template)
for configurable in configurables:
parent = self.find_node(
filetree,
self.configuration_repository.get_parent_path(
configurable.path
)
)
if parent:
parent.add_child(configurable)
return filetree
def replace_variables(self, text):
'''Replaces placeholders in the passed text with
recorded application variables
'''
new_text = text
for var in self.variable_repository.get():
new_text = new_text.replace(f'[{var.name}]', var.value)
return new_text
def save_into_project(self):
'''Save configured templates and configurables
into the project folder
'''
local_path = self.configuration_repository.get_project_path()
if not local_path:
raise ProjectNotSet
from re import sub, findall
# save templates into the project
prev_name = self.template_file_repository.name
for template in self.template_repository.get():
if template.save:
self.template_file_repository.path = local_path
self.template_file_repository.name = template.name
content = self.template_file_repository.get()
content = self.replace_variables(content)
self.template_file_repository.path = \
self.configuration_repository.get_parent_path(
template.path)
self.template_file_repository.name = \
self.replace_variables(template.name)
self.template_file_repository.save(content)
self.template_file_repository.path = local_path
self.template_file_repository.name = prev_name
# save configurable files into the project
prev_name = self.configurable_file_repository.name
for configurable in self.configurable_repository.get():
if configurable.save:
self.configurable_file_repository.path = local_path
self.configurable_file_repository.name = configurable.name
content = self.configurable_file_repository.get()
templates = self.template_repository.get()
# first remount content replacing template.All placeholders
# by each template
template_all_props = ['name', 'path', 'relative_path']
template_all_props = list(map(
lambda p: {'prop': p, 'template': f'[template.All.{p}]'},
template_all_props))
found_templates = [{'found': f, 'inline': True} for f in
findall(r'\<\[.*?\]\>', content)]
found_templates.extend([{'found': f, 'inline': False} for f in
list(filter(
lambda s: '<[' not in s
and ']>' not in s,
findall(r'.*\[.*\].*', content)))])
# create templates with relative_path
project_path = self.get_filetree().path
def map_template(template):
template.name = self.replace_variables(template.name)
template.path = self.replace_variables(OS.get_default_path(
template.path))
template = template.__dict__
template['relative_path'] = \
template['path'].replace(project_path, '')
# remove first slash
template['relative_path'] = template['relative_path'][1:]
return template
templates = list(map(map_template, templates))
for template_found in found_templates:
found, is_inline = template_found.values()
templates_in = list(filter(
lambda p: p['template'] in found, template_all_props))
replacement = ''
for prop in templates_in:
for template in templates:
result = found[2:-2] if is_inline else found
prp, templ = prop.values()
template = template[prp]
replacement += result.replace(templ, template)
replacement += '\n' if not is_inline else ''
replacement += found
index = content.index(found)
content = content[:index] + replacement + \
content[index + len(found):]
# save new content into the template
self.configurable_file_repository.name = configurable.name
self.configurable_file_repository.save(content)
# save new content to the configurable in project
# removing the template.All placeholders
self.configurable_file_repository.path = \
self.configuration_repository.get_parent_path(
configurable.path)
content = sub(r'\<\[.*?\]\>', lambda m: '', content)
content = sub(r'(?<=\n).*\[template\.All\.(\w+)\].*\n',
lambda m: '', content)
self.configurable_file_repository.save(content)
self.configurable_file_repository.path = local_path
self.configurable_file_repository.name = prev_name
class VariableService:
'''Handle variable rules'''
def __init__(self, repository, project_change_event):
self.repository = repository
project_change_event.subscribe(self.project_changed)
def get(self):
'''Get project variables'''
return self.repository.get()
@staticmethod
def get_defaults():
'''Returns default application variables'''
return [Variable('ext', 'py')]
def save_defaults(self):
'''Saves default variables if them aren\'t deleted before
and project is set
'''
if not self.repository.exists() and self.repository.path:
for var in VariableService.get_defaults():
self.add(var)
def add(self, variable):
'''Add variable'''
if not self.repository.path:
raise ProjectNotSet
self.repository.add(variable)
def change(self, old_name, variable):
'''Updates variable'''
variables = self.repository.get()
db_variable = self.repository.first(lambda v: v.name == old_name,
variables)
if isinstance(db_variable, Variable):
db_variable.name = variable.name
db_variable.value = variable.value
self.repository.save(variables)
def remove(self, name):
'''Removes variable by name'''
self.repository.remove(lambda v: v.name == name)
def project_changed(self, path):
'''Project path change listener that change repository path when
project path is changed
'''
self.repository.path = path
self.save_defaults()
class FileService(ABC):
'''Base service class for file model handlers'''
def __init__(self, repository, file_repository):
self.repository = repository
self.file_repository = file_repository
def create_child(self, parent, name):
'''Add child node into the parent and get correct child path'''
return self.repository.create_child(parent, name)
def add(self, file_node, content):
'''Add file node with content in the hard disk'''
self.repository.add(file_node)
self.file_repository.name = file_node.name
self.file_repository.save(content)
def save(self, file_node):
'''Save file node state'''
self.repository.update(file_node, file_node.name)
def save_file(self, file_node, new_name, content):
'''Write file node in the hard disk and rename if necessary'''
if not new_name:
new_name = file_node.name
self.repository.update(file_node, new_name)
self.file_repository.save_file(file_node.name, new_name, content)
def remove(self, template):
'''Removes file node from collection and its file'''
self.repository.remove_node(template)
self.file_repository.name = template.name
self.file_repository.drop()
class TemplateService(FileService):
'''Handle template rules'''
def __init__(self, repository, file_repository, project_change_event):
super().__init__(repository, file_repository)
project_change_event.subscribe(self.project_changed)
def get(self, template):
'''Get file content'''
self.file_repository.name = template.name
return self.file_repository.get()
def get_all(self):
'''Get all templates'''
return self.repository.get()
def project_changed(self, path):
'''Project path change listener that change repository path when
project path is changed
'''
self.file_repository.path = path
self.repository.path = path
def get_path(self, template):
'''Get template file path'''
self.repository.name = template.name
path = OS.get_default_path(self.repository.full_path)
# reset repository name
del self.repository.name
return path
class ConfigurableService(FileService):
'''Handle configurable rules'''
def __init__(self, repository, file_repository, project_change_event):
super().__init__(repository, file_repository)
project_change_event.subscribe(self.project_changed)
def get(self, configurable):
'''Get file content'''
previous_path = self.file_repository.path
is_new = not self.repository.filter(
lambda c: c.path == configurable.path
)
if is_new:
self.file_repository.path = configurable.path
else:
self.file_repository.name = configurable.name
content = self.file_repository.get()
self.file_repository.path = previous_path
return content
def project_changed(self, path):
'''Project path change listener that change repository path when
project path is changed
'''
self.repository.path = path
self.file_repository.path = path
def get_filename(self, path):
'''Get filename from entire path'''
return self.repository.get_basename(path)
def is_child(self, parent_path, filename):
'''Verify if filename is a existent file into the parent_path folder'''
return self.repository.is_child(parent_path, filename)
```
#### File: templatizator/presentation/helper.py
```python
import re
from templatizator.domain.helper import OS
_NONBMP = re.compile(r'[\U00010000-\U0010FFFF]')
def _surrogatepair(match):
char = match.group()
assert ord(char) > 0xffff
encoded = char.encode('utf-16-le')
return (
chr(int.from_bytes(encoded[:2], 'little')) +
chr(int.from_bytes(encoded[2:], 'little')))
def get_tkinter_unicode(text):
'''Convert unicode icon to unicode pair that is readable to tkinter'''
return _NONBMP.sub(_surrogatepair, text.upper())
def is_unicode_available(text):
'''Verify if unicode passed text is available to use in this os'''
if OS.is_windows:
return True
try:
print(_NONBMP.sub(_surrogatepair, text.upper()))
except UnicodeError:
return False
return True
```
#### File: templatizator/presentation/widgets.py
```python
import tkinter as tk
class Tooltip:
'''
Create a tooltip for a given widget.
Inform col to add tooltip to a treeview column
'''
def __init__(self, widget, text='widget info', col=None, before=None):
self.widget = widget
self.text = text
self.tooltip_window = None
self.col = col
self.before = before
if col:
self.widget.bind('<Motion>', self.enter)
else:
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.close)
def enter(self, event=None):
'''Shows the tooltip when mouse enter'''
point = {'x': 0, 'y': 0}
point['x'] += self.widget.winfo_rootx() + 25
point['y'] += self.widget.winfo_rooty() + 20
if self.col:
self.close(event)
col = self.widget.identify_column(event.x)
iid = self.widget.identify('item', event.x, event.y)
if (col != self.col and self.col != '#') or not iid:
# do not show tooltip
return
if self.before:
show = self.before(col, iid, self)
if not show:
return
point['x'] += event.x
point['x'] -= 15
point['y'] += event.y
point['y'] -= 10
# creates a toplevel window
self.tooltip_window = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tooltip_window.wm_overrideredirect(True)
self.tooltip_window.wm_geometry("+%d+%d" % (point['x'], point['y']))
label = tk.Label(self.tooltip_window, text=self.text, justify='left',
background='#f7f7da', relief='solid', borderwidth=1,
font=("times", "8", "normal"))
label.pack(ipadx=1)
# pylint: disable=unused-argument
def close(self, event=None):
'''Closes tooltip when mouse leaves'''
if self.tooltip_window:
self.tooltip_window.destroy()
```
#### File: templatizator/tests/test_configurable_application.py
```python
from os.path import join, basename, dirname, exists, normpath
from json import dumps
from pytest import fixture
from templatizator.domain.container import Container
from tests import configuration_path, project_path, configure_paths, \
delete_configuration_folders
from tests.file_application_test_helper import FileApplicationTestHelper
from tests.test_variable_application import add_variables
from tests.test_template_application import add_templates, templates_add
class TestConfigurableApplication:
initial_configurable_content_object = {'name': 'Test project',
'version': '1.0.0'}
configurable_content_object = {'name': 'Test project', 'version': '1.0.0',
'files': ['[template.All.name]'], 'paths': ['[template.All.path]'],
'relative_paths': ['[template.All.relative_path]']}
@classmethod
def setup_method(cls):
delete_configuration_folders()
@staticmethod
def get_configurables():
from templatizator.domain.domain import ConfigurableFile
path = join(project_path, 'package.json')
path2 = join(project_path, 'multiline_package.json')
return [ConfigurableFile(path), ConfigurableFile(path2)]
@fixture
def application(self):
configure_paths()
add_variables()
add_templates()
return Container.configurable_file_application
@fixture
def repository(self):
from templatizator.domain.repository import ConfigurableRepository
repository = ConfigurableRepository()
repository.path = join(Container.project_application.configuration_path,
basename(project_path))
return repository
@fixture
def configurables(self, application, repository):
obj = TestConfigurableApplication.configurable_content_object
configurables = TestConfigurableApplication.get_configurables()
for configurable in configurables:
configurable.name = application.get_filename(configurable.path)
content = dumps(obj, indent=2 \
if configurable.name == 'multiline_package.json' else None)
# apply inline template
# and add comma at the end of template lines on multiline_package
sufix = ','
prefix = ''
template = '{}"[template.All.{}]"{}'
replace = lambda prop: content.replace(
template.format('', prop, ''),
template.format(prefix, prop, sufix))
if configurable.name == 'package.json':
sufix = ', ]>'
prefix = '<['
content = replace('name')
content = replace('path')
content = replace('relative_path')
application.add(configurable, content)
return repository.get()
def test_get(self, application):
for configurable in TestConfigurableApplication.get_configurables():
content = application.get(configurable)
obj = TestConfigurableApplication.\
initial_configurable_content_object
content_result = dumps(obj, indent=2 \
if basename(configurable.path) == 'multiline_package.json' \
else None)
assert content == content_result
def test_get_created(self, application, configurables):
for configurable in configurables:
content = application.get(configurable)
init_obj = TestConfigurableApplication.\
initial_configurable_content_object
content_result = dumps(init_obj, indent=2 \
if configurable.name == 'multiline_package.json' else None)
assert content != content_result
def test_create_child(self, application, configurables):
FileApplicationTestHelper.test_create_child(application, configurables)
def test_save(self, application, repository, configurables):
FileApplicationTestHelper.test_save(application, configurables,
repository.get)
def test_save_file(self, application, repository, configurables):
FileApplicationTestHelper.test_save_file(application, configurables,
repository.get)
def test_remove(self, application, repository, configurables):
FileApplicationTestHelper.test_remove(application, configurables,
repository.get)
def test_get_filename(self, application, configurables):
assert configurables[0].name == 'package.json'
assert configurables[1].name == 'multiline_package.json'
def test_is_child(self, application, configurables):
for configurable in configurables:
assert exists(configurable.path)
assert exists(join(project_path, configurable.name))
assert application.is_child(project_path, configurable.path)
assert not application.is_child(dirname(project_path),
configurable.path)
assert not application.is_child(join(project_path, 'application'),
configurable.path)
def test_save_into_project(self, application, configurables):
Container.project_application.save_into_project()
expected_content = f'''{{
"name": "[pname]",
"version": "1.0.0",
"files": [
[files] <["[template.All.name]", ]>
],
"paths": [
[paths] <["[template.All.path]", ]>
],
"relative_paths": [
[relative_paths] <["[template.All.relative_path]", ]>
]
}}'''
indentation = ' '
files = {'files': '', 'paths': '', 'relative_paths': ''}
for template_info in templates_add:
directory, name = template_info.values()
name = name.replace('[name]', 'person').replace('[ext]', 'py')
path = normpath(join(project_path, directory, name))
rpath = normpath(join(directory, name))
files['files'] += f'{indentation}"{name}",\n'
files['paths'] += f'{indentation}"{path}",\n'
files['relative_paths'] += f'{indentation}"{rpath}",\n'
def replace_content(content, files, is_inline=False):
if is_inline:
files = {k: v.replace(' ', '').replace('\n', ' ')
for k, v in files.items()}
content = content.replace('[pname]', 'Test project')
content = content.replace('[files]', files['files'])\
.replace('[paths]', files['paths'])\
.replace('[relative_paths]', files['relative_paths'])
return content
expected_content_inline = expected_content.replace('\n', '')\
.replace(' ', '').replace(':', ': ').replace(',', ', ')
expected_content = expected_content.replace('<[', '').replace(' ]>', '')
expected_content = replace_content(expected_content, files)
expected_content_inline = replace_content(expected_content_inline,
files, True)
from re import sub
for configurable in configurables:
path = join(project_path, configurable.name)
assert exists(path)
is_inline = configurable.name == 'package.json'
content = application.get(configurable)
content_result = expected_content_inline if is_inline \
else expected_content
# test configurable template content
assert content == content_result
with open(path) as f:
content = f.read()
# remove inline template
regex = r'\<\[.*?\]\>' if is_inline else r'\n.*?\[.*?\].*?\n'
replacement = '' if is_inline else '\n'
content_result = sub(regex, lambda m: replacement, content_result)
# test configurable content result in project
assert content == content_result
def test_not_save_into_project(self, application, configurables):
variables = Container.variable_application.get()
FileApplicationTestHelper.test_not_save_into_project(application,
configurables, variables,
Container.project_application.save_into_project)
# def test_add(self):
# already tested on configurables fixture
```
#### File: templatizator/tests/test_presentation_window.py
```python
from unittest.mock import Mock, MagicMock
from templatizator.presentation.container import Container
from tests import container
namespace = 'templatizator.presentation.window'
def test_initialization(container):
win = Container.window
assert bool(win.application)
assert bool(win.template_application)
assert bool(win.configurable_application)
assert bool(win.variables)
assert bool(win.editor)
assert bool(win.window)
assert bool(win.treeview)
def test_icons(container, monkeypatch):
'''Test methods get_filetree_icon, get_filetree_action_icon
and get_filetree_checkbox
'''
from templatizator.presentation.window import ICONS, ICONS_UGLY, ICON_ADD,\
ICON_REMOVE, ICON_CHECKED, ICON_UNCHECKED
from templatizator.domain.domain import Directory, Template, \
ConfigurableFile
win = Container.window
# do not use helper to convert unicode icon
monkeypatch.setattr(f'{namespace}.get_tkinter_unicode', lambda i: i)
win.pretty_icons = True
node = Directory(opened=True)
assert node.open == True
icon = win.get_filetree_icon(node)
assert icon == ICONS.folderopened
win.pretty_icons = False
icon = win.get_filetree_icon(node)
assert icon == ICONS_UGLY.folderopened
assert icon != ICONS.folderopened
icon = win.get_filetree_action_icon(node)
assert icon == ICON_ADD
icon = win.get_filetree_checkbox(node)
assert icon == ''
win.pretty_icons = True
node = Directory(opened=False)
assert node.open == False
icon = win.get_filetree_icon(node)
assert icon == ICONS.folderclosed
win.pretty_icons = False
icon = win.get_filetree_icon(node)
assert icon == ICONS_UGLY.folderclosed
assert icon != ICONS.folderclosed
icon = win.get_filetree_action_icon(node)
assert icon == ICON_ADD
icon = win.get_filetree_checkbox(node)
assert icon == ''
win.pretty_icons = True
node = Template(save=True)
assert node.save == True
icon = win.get_filetree_icon(node)
assert icon == ICONS.template
win.pretty_icons = False
icon = win.get_filetree_icon(node)
assert icon == ICONS_UGLY.template
assert icon != ICONS.template
icon = win.get_filetree_action_icon(node)
assert icon == ICON_REMOVE
icon = win.get_filetree_checkbox(node)
assert icon == ICON_CHECKED
node = Template(save=False)
assert node.save == False
icon = win.get_filetree_checkbox(node)
assert icon == ICON_UNCHECKED
assert icon != ICON_CHECKED
win.pretty_icons = True
node = ConfigurableFile(save=True)
assert node.save == True
icon = win.get_filetree_icon(node)
assert icon == ICONS.configurable
win.pretty_icons = False
icon = win.get_filetree_icon(node)
assert icon == ICONS_UGLY.configurable
assert icon != ICONS.configurable
icon = win.get_filetree_action_icon(node)
assert icon == ICON_REMOVE
icon = win.get_filetree_checkbox(node)
assert icon == ICON_CHECKED
node = ConfigurableFile(save=False)
assert node.save == False
icon = win.get_filetree_checkbox(node)
assert icon == ICON_UNCHECKED
assert icon != ICON_CHECKED
def test_render_treeview(container):
from tkinter.ttk import Treeview
from templatizator.domain.domain import Project
from templatizator.presentation.window import Window
win = Container.window
tv = win.treeview
mock_win = Mock(spec=Window)
mock_win.treeview = MagicMock(spec=Treeview)
mock_win.filetree = Mock(spec=Project)
Window.render_treeview(mock_win)
mock_win.treeview.delete.assert_called_once()
mock_win.fill_treeview.assert_called_once()
mock_win.filetree = None
mock_win.reset_mock()
Window.render_treeview(mock_win)
mock_win.fill_treeview.assert_not_called()
#def test_fill_treeview(container):
#def test_select_project(container):
#def test_project_selected(container):
#def test_select_configuration(container):
#def test_configuration_selected(container):
#def test_add_template(container):
#def test_add_configurable(container):
#def test_open_file(container):
#def test_open_with(container):
#def test_remove_file(container):
#def test_row_popup_selected(container):
#def test_row_selected(container):
#def test_row_opened(container):
#def test_row_closed(container):
#def test_before_show_tooltip(container):
#def test_save_templates(container):
#def test_center(container):
``` |
{
"source": "jhonata-antunes/basic-compiler",
"score": 3
} |
#### File: basic-compiler/modules/file_system.py
```python
class FileSystem:
def __init__(self, file_name):
self.file = open(file_name, 'rb')
def read_line(self):
if not self.file:
return 'EOF'
line = self.file.readline()
if not line:
self.file.close()
self.file = None
return 'EOF'
return line
```
#### File: basic-compiler/modules/lexical_classifier.py
```python
from recognizers.lexical import Lexical
from tkn import Token
class LexicalClassifier:
def __init__(self, ascii_classifier):
self.ascii_classifier = ascii_classifier
self.lexical = Lexical()
self.current_token = None
def get_token(self):
token_value = ''
try:
while True:
if not self.current_token:
self.current_token = self.ascii_classifier.get_categorized_char()
if self.current_token.value == 'EOF':
raise ValueError
self.lexical.process_atom(self.current_token)
token_value += self.current_token.value
self.current_token = None
except ValueError:
token = Token(token_class=self.lexical.get_class(),
value=token_value.strip(' \n').rstrip(' \n'))
self.lexical.reset()
return token
```
#### File: basic-compiler/recognizers/lexical.py
```python
from recognizers.automaton import Automaton
from tkn import Token
class Lexical(Automaton):
def __init__(self):
super().__init__([1, 2, 3, 4, 5, 6, 7, 8])
def get_class(self):
class_map = {
Token.INT: [1],
Token.ID: [2],
Token.SPECIAL: [3, 4, 5, 6, 7, 8]
}
if self.get_state() in class_map[Token.INT]:
return Token.INT
elif self.get_state() in class_map[Token.ID]:
return Token.ID
elif self.get_state() in class_map[Token.SPECIAL]:
return Token.SPECIAL
def process_atom(self, token):
ve = False
if self.state == 0:
if token.is_digit():
self.state = 1
elif token.is_letter():
self.state = 2
elif token.is_special():
if token.value == '>':
self.state = 3
elif token.value == '<':
self.state = 5
elif token.value == ' ' or token.value == '\n':
pass
else:
self.state = 8
else:
ve = True
elif self.state == 1:
if token.is_digit():
pass
else:
ve = True
elif self.state == 2:
if token.is_letter() or token.is_digit():
pass
else:
ve = True
elif self.state == 3:
if token.value == '=':
self.state = 4
else:
ve = True
elif self.state == 4:
ve = True
elif self.state == 5:
if token.value == '=':
self.state = 6
elif token.value == '>':
self.state = 7
else:
ve = True
elif self.state == 6:
ve = True
elif self.state == 7:
ve = True
elif self.state == 8:
ve = True
else:
raise AttributeError("Current state '{}' does not exist".format(self.state))
if ve:
raise ValueError("{}: Invalid input '{}' to state '{}'"
.format(self.__class__.__name__, token.value, self.state))
```
#### File: test/test_lexical_classifier/test_lexical_classifier.py
```python
import os
import sys
sys.path.insert(0, '{}/../../'.format(os.path.dirname(os.path.abspath(__file__))))
from modules.ascii_classifier import AsciiClassifier
from modules.ascii_filter import AsciiFilter
from modules.file_system import FileSystem
from modules.lexical_classifier import LexicalClassifier
def test_lexical_classifier():
path = os.path.dirname(os.path.abspath(__file__)) + '/'
fn_in = path + 'test_lexical_classifier_in.txt'
fn_out = path + 'test_lexical_classifier_out.txt'
fs = FileSystem(file_name=fn_in)
af = AsciiFilter(file_system=fs)
ac = AsciiClassifier(ascii_filter=af)
lc = LexicalClassifier(ascii_classifier=ac)
fs = None
af = None
ac = None
with open(fn_out, 'r') as f:
while True:
token = lc.get_token()
if token.value == 'EOF':
break
token_class, value = f.readline().rstrip('\n').split(',')
assert token_class == token.token_class
assert value == token.value
assert lc.get_token().value == 'EOF'
assert lc.get_token().value == 'EOF'
assert lc.get_token().value == 'EOF'
assert lc.get_token().value == 'EOF'
``` |
{
"source": "jhonata-antunes/python-live-video-straming",
"score": 3
} |
#### File: jhonata-antunes/python-live-video-straming/server.py
```python
import argparse
import socket
import time
import cv2
import numpy as np
def arg_parse():
parser = argparse.ArgumentParser(description='Client')
parser.add_argument('--save', default=False, help='Save video', action='store_true')
parser.add_argument("--ip", help="Client IP address", default="localhost")
parser.add_argument("--port", help="UDP port number", type=int, default=60444)
return parser.parse_args()
def get_video_writer(frame):
w, h = frame.shape[1], frame.shape[0]
is_color = True
try:
frame.shape[2]
except IndexError:
is_color = False
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
vr = cv2.VideoWriter('video.avi', fourcc, 10, (w, h), is_color)
return vr
def main(args):
data = b''
buffer_size = 65536
window = 'video streaming'
out = None
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((args.ip, args.port))
if not args.save:
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window, 600, 600)
try:
start = time.time()
while True:
data += sock.recv(buffer_size)
a = data.find(b'\xff\xd8')
b = data.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = data[a:b + 2]
vt = data[b + 2: b + 2 + 8]
data = data[b + 2 + 8:]
# decode frame and video time
frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
vt = np.fromstring(vt, dtype=np.float64)[0]
if args.save:
if out is None:
out = get_video_writer(frame)
out.write(frame)
else:
cv2.imshow(window, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
end = time.time()
print('FPS: {0:0.2f}'.format(1 / (end - start)))
start = time.time()
except KeyboardInterrupt:
cv2.destroyAllWindows()
sock.close()
if args.save:
out.release()
cv2.destroyAllWindows()
sock.close()
if args.save:
out.release()
if __name__ == '__main__':
arguments = arg_parse()
main(arguments)
``` |
{
"source": "JhonataAugust0/Analise-nucleotideos",
"score": 4
} |
#### File: JhonataAugust0/Analise-nucleotideos/__init__.py
```python
cont_bac = dict()
cont_human = dict()
lista_bac = list()
lista_human = list()
entrada_bac = open("bacteria.fasta").read()
saida_bac = open("bacteria.html","w")
entrada_human = open("human.fasta").read()
saida_human = open("human.html","w")
entrada_bac = entrada_bac.replace("\n","")
entrada_human = entrada_human.replace("\n","")
def formacao_nucleotideos(codificacao):
"""
Essa função realiza as operações
de codificação e atribuição dos
pares de nucleotídeos existentes
no DNA humano e bacterial para
dicionários, que serão de suma
importância para a geração das
páginas HTML.
"""
for i in codificacao:
for j in codificacao:
cont_bac[i+j] = 0
cont_human[i+j] = 0
#Ciclos de repetição que contam a quantia de pares de nucleotídeos nos DNA's.
for a in range(len(entrada_bac)-1):
cont_bac[entrada_bac[a]+entrada_bac[a+1]] += 1
for b in range(len(entrada_human )-1):
cont_human[entrada_human [b]+entrada_human[b+1]] += 1
#Ciclos de repetição que atribuem os pares ao dicionário
return cont_bac, cont_human
def formacao_html():
"""
Essa função realiza as operações que
constroem as páginas em html que permitem
a visualização da representação dos nucle-
otídeos do DNA humano e bacterial.
"""
i = 1
for a in cont_bac:
transparencia_bac = cont_bac[a]/max(cont_bac.values())
saida_bac.write("<div style='width:100px; border:1px solid #111; color:#fff; heigth:100px; float:left; background-color:rgba(0, 0, 0, "+str(transparencia_bac)+"')>"+a+"</div>")
if i % 4 == 0:
saida_bac.write("<div style='clear:both'></div>")
i+=1
for b in cont_human:
transparencia_human = cont_human[b]/max(cont_human.values())
saida_human.write("<div style='width:100px; border:1px solid #111; color:#fff; heigth:100px; float:left; background-color:rgba(0, 0, 0, "+str(transparencia_human)+"')>"+b+"</div>")
if i % 4 == 0:
saida_human.write("<div style='clear:both'></div>")
i+=1
lista_bac.append(cont_bac.values())
lista_human.append(cont_human.values())
return lista_bac, lista_human
saida_bac.close()
saida_human.close()
``` |
{
"source": "JhonataAugust0/Python",
"score": 3
} |
#### File: Aulas/Aula04/Buscas_e_navegacao.py
```python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pprint import pprint
from urllib.parse import urlparse
class Buscas_e_navagacao:
def __init__(self):
self.driver = webdriver.Chrome(executable_path="/run/media/jhonata/_home/Programação/Python/Selenium/chromedriver")
def entrar_no_site(self, url):
driver = self.driver
driver.get(url)
time.sleep(2)
def buscar_lnks_da_pagina(self):
resultado01 = dict()
driver = self.driver
aside = driver.find_element_by_tag_name('aside')
aside_ancoras = aside.find_elements_by_tag_name('a')
for ancora in aside_ancoras:
resultado01[ancora.text] = ancora.get_attribute('href')
pprint(resultado01)
teste01 = Buscas_e_navagacao()
teste01.entrar_no_site('https://selenium.dunossauro.live/aula_04.html')
teste01.buscar_lnks_da_pagina()
```
#### File: exercicios/tabela verdade/tabela_logica.py
```python
def e(a, b):
if a and b:
return True
else:
return False
def ou(a, b):
if (not a) and (not b):
return False
else:
return True
A = [True, True, False, False]
B = [True, False, True, False]
for a, b in zip(A, B):
print("{} V {} = {}".format(a, b, ou(a, b)))
``` |
{
"source": "jhonatacaiob/Bootcamp-Data-Science",
"score": 4
} |
#### File: Bootcamp-Data-Science/Secao-4/exemplo 1.py
```python
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return x**2 + x + 1
def df(x):
return 2*x + 1
x = np.linspace(start = -3, stop = 3, num = 500)
x_novo = 3
x_anterior = 0
taxa_de_aprendizado = 0.1
precisao = 0.00001
x_novos = []
for i in range(50000):
x_anterior = x_novo
gradiente = df(x_anterior)
x_novo = x_anterior - gradiente * taxa_de_aprendizado
if(abs(x_anterior - x_novo) < precisao):
print("Numero de tentativas:", i)
break
x_novos.append(x_novo)
print("Valor minimo:", x_novo)
print("Inclinaçao, ou o valor de df(x) neste ponto:", df(x_novo))
print("Custo neste ponto:", f(x_novo))
x_novos = np.array(x_novos)
plt.figure(figsize=[20,5])
plt.subplot(1, 3, 1)
plt.title("Função de custo")
plt.xlim(-3, 3)
plt.ylim(0, 8)
plt.xlabel("x")
plt.ylabel("f(x)")
plt.plot(x, f(x), color = "blue", linewidth = 5, alpha = 0.8)
plt.scatter(x_novos, f(x_novos), color = "red", s = 100, alpha = 0.6)
plt.subplot(1, 3, 2)
plt.title("Derivada da funçao de custo")
plt.xlim(-2, 3)
plt.ylim(-3, 6)
plt.grid("True")
plt.xlabel("x")
plt.ylabel("df(x)")
plt.plot(x, df(x), color = "skyblue", linewidth = 5, alpha = 0.8)
plt.scatter(x_novos, df(x_novos), color = "red", s = 100, alpha = 0.6)
plt.subplot(1, 3, 3)
plt.title("Gradiente (Zoom)")
plt.xlim(-0.55, 0.2)
plt.ylim(-0.3, 0.8)
plt.grid("True")
plt.xlabel("x")
plt.ylabel("df(x)")
plt.plot(x, df(x), color = "skyblue", linewidth = 5, alpha = 0.8)
plt.scatter(x_novos, df(x_novos), color = "red", s = 100, alpha = 0.6)
#plt.show()
#plt.savefig("Aula 05.jpg")
```
#### File: Bootcamp-Data-Science/Secao-4/exemplo 2.py
```python
import matplotlib.pyplot as plt
import numpy as np
def g(x):
return x**4 - 4*(x**2) + 5
def dg(x):
return 4*(x**3) - 8*x
def gradient_descent(derivative_func, initial_guess, taxa_de_aprendizado = 0.01, precisao = 0.000001):
x_novo = initial_guess
lista_x = []
lista_inc = []
for i in range(500):
x_anterior = x_novo
gradiente = derivative_func(x_anterior)
x_novo = x_anterior - gradiente * taxa_de_aprendizado
lista_x.append(x_novo)
lista_inc.append(derivative_func(x_novo))
if(abs(x_anterior - x_novo) < precisao):
break
return x_novo, lista_x, lista_inc
x = np.linspace(start = -2, stop = 2, num = 1000)
minimo, lista_x, lista_deriv = gradient_descent(derivative_func = dg,initial_guess=3)
print("Valor minimo local:", minimo)
print("Passos dados:", len(lista_x))
plt.subplot(1, 2, 1)
plt.title("Função de custo")
plt.xlim(-2, 2)
plt.ylim(0.5, 5.5)
plt.xlabel("x")
plt.ylabel("g(x)")
plt.plot(x, g(x), color = "blue", linewidth = 5, alpha = 0.8)
plt.scatter(lista_x, g(np.array(lista_x)), color = "red", s = 100, alpha = 0.6)
plt.subplot(1, 2, 2)
plt.title("Derivada da funçao de custo")
plt.xlim(-2, 2)
plt.ylim(-6, 8)
plt.grid("True")
plt.xlabel("x")
plt.ylabel("dg(x)")
plt.plot(x, dg(x), color = "skyblue", linewidth = 5, alpha = 0.8)
plt.scatter(lista_x, lista_deriv, color = "red", s = 100, alpha = 0.6)
plt.savefig("Aula 06.jpg")
``` |
{
"source": "jhonatan612/Projeto-exemplo",
"score": 3
} |
#### File: jhonatan612/Projeto-exemplo/principal.py
```python
def somar(x,y):
return x+ y
def subtrair(x,y):
return x- y
#Comentário
``` |
{
"source": "JHONATAN9A/Algritmo_num_narcisistas",
"score": 4
} |
#### File: JHONATAN9A/Algritmo_num_narcisistas/ArmstrongN.py
```python
def proceso(num, suma=0):
numero = []
for i in str(num):
exp = int(i) ** len(str(num))
numero.append(exp)
if len(numero) == len(str(num)):
total = sum(numero)
return num, total
numero.clear()
entrada = input()
datos = []
for i in range(int(entrada)):
entrada2 = input()
datos.append(entrada2)
for n in datos:
resul1, resul2 = proceso(int(n))
if resul1 == resul2:
print("Armstrong")
elif resul1 != resul2:
print("Not Armstrong")
``` |
{
"source": "JHONATAN9A/Pagina_Web_Django",
"score": 2
} |
#### File: website/siteapp/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def covid19(request):
dic ={'etiqueta':"Esto se introdujo deste django"}
return render(request,"Covid19.html",context=dic)
``` |
{
"source": "jhonatancasale/appointment-system",
"score": 2
} |
#### File: scheduling_system/appointment/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.db.models import TextField
class Appointment(models.Model):
date = models.DateField()
start_at = models.TimeField()
end_at = models.TimeField()
patient = models.ForeignKey(User, on_delete=models.PROTECT)
procedure: TextField = models.TextField()
def __str__(self):
return '{date!r}, [{start_at!r} - {end_at!r}] - {patient!r}'.format(
date=str(self.date),
start_at=self.start_at.strftime("%H:%M"),
end_at=self.end_at.strftime("%H:%M"),
patient=self.patient.username
)
``` |
{
"source": "jhonatancasale/learning-python",
"score": 4
} |
#### File: learning-python/snippets/defaultdict.function.py
```python
from collections import defaultdict
def default() -> str:
return "Maybe I missed something?"
def main() -> None:
print()
default_dict = defaultdict(default)
default_dict['a'] = 1
default_dict['b'] = 2
print(default_dict['a'])
print(default_dict['b'])
print(default_dict['c'])
if __name__ == '__main__':
main()
```
#### File: learning-python/snippets/defaultdict.lambda.py
```python
from collections import defaultdict
def main() -> None:
print()
default_dict = defaultdict(lambda: "Maybe I missed something?")
default_dict['a'] = 1
default_dict['b'] = 2
print(default_dict['a'])
print(default_dict['b'])
print(default_dict['c'])
if __name__ == '__main__':
main()
```
#### File: learning-python/snippets/global.py
```python
print()
name = 'abc'.upper()
def get_name():
name = 'def'.upper()
print(f"inside: {name}")
get_name()
print(f"Outside: {name}")
print()
def get_name():
global name
name = 'def'.upper()
print(f"inside: {name}")
get_name()
print(f"Outside: {name}")
def get_name():
global name
#but
#name: str = 'gHi'.upper()
# SyntaxError: annotated name 'name' can't be global
#and
#global name = 'gHi'.upper()
# ^
#SyntaxError: invalid syntax
print(f"inside: {name}")
get_name()
print(f"Outside: {name}")
```
#### File: learning-python/snippets/set.difference.py
```python
def main() -> None:
s = set("Test")
print(s.difference("Another"))
print(s.difference(set(['a', 'b', 'c', 'd'])))
print(s.difference(['a', 'b', 'c', 'd']))
print(s.difference(enumerate(['a', 'b', 'c', 'd'])))
print(s.difference({"Another":1}))
print(s - set("Another"))
if __name__ == '__main__':
main()
```
#### File: learning-python/snippets/set.union.py
```python
def main() -> None:
s = set("Test")
print(s.union("Another"))
#{'n', 'r', 'o', 'h', 's', 'T', 'A', 'e', 't'}
print(s.union(set(['T', 'e', 's', 't'])))
#{'s', 'T', 'e', 't'}
print(s.union(['T', 'E', 'S', 'B']))
#{'B', 's', 'S', 'T', 'E', 'e', 't'}
# immutable!
print(s.union("Another"))
#{'n', 'r', 'o', 'h', 's', 'T', 'A', 'e', 't'}
print(s.union(enumerate(['T', 'e', 's', 't'])))
#{(1, 'e'), (2, 's'), (3, 't'), (0, 'T'), 's', 'T', 'e', 't'}
print(s.union({"Test":1}))
#{'s', 'T', 'Test', 'e', 't'}
print(s | set("Test"))
#{'s', 'T', 'e', 't'}
if __name__ == '__main__':
print()
main()
``` |
{
"source": "jhonatancasale/ML-T3",
"score": 3
} |
#### File: dev/get.data.from.web/get.data.from.web.py
```python
import logging
import sys
import requests
import os.path
logging.basicConfig(filename='history.log', level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(message)s'
)
BASE_YEAR = 2016
def main():
'''Retrieve some data from http://www.fuvest.br'''
range_years = int(sys.argv[1]) if len(sys.argv) > 1 else 10
years = list(range(BASE_YEAR, BASE_YEAR - range_years, -1))
logging.debug('Trying to get data of the last %d years', range_years)
for year in years:
if year >= 2014: # For some magical reason the url changes in this year
common_url = 'http://www.fuvest.br/vest{0}/download/FUVEST_{0}_qase_{1}_car_fuvest_{0}.pdf'
else:
common_url = 'http://www.fuvest.br/vest{0}/download/qase_{1}_car.pdf'
for phase in ['inscr', 'conv', '1matr']:
url = common_url.format(year, phase)
filename = 'FUVEST_{0}_qase_{1}_car_fuvest_{0}.pdf'.format(year, phase)
logging.debug('Request year: %d to server', year)
if os.path.isfile(filename):
logging.debug('Skiping this, file already exists')
continue
try:
data = requests.get(url, stream=True)
except FileNotFoundError as e:
logging.debug('Fails because of %s', e)
else:
logging.debug('Done!')
logging.debug('Saving data on file %s', filename)
try:
f = open(filename, 'wb')
f.write(data.content)
f.close()
except IOError as e:
logging.debug('Fails because of %s', e)
else:
logging.debug('Done!')
logging.debug('Finished')
if __name__ == '__main__':
main()
```
#### File: dev/questionnaire/main.py
```python
import pandas as pd
import numpy as np
from parse import *
from question import *
import sys
def main():
data = pd.read_csv("../../../dataset/inscr_db.csv")
print 'You can choose one of these options:'
# best so we can use 2 more correlation between variables plus new careears have been created.
print '\t1 - use 3 last years of data.'
#more data!
print '\t2 - using all 10 last years of data.'
print '\t3 - specify year.'
year = raw_input()
print 'You can choose one of these options:'
print '\t1 - answer questionary'
print '\t2 - pass filename'
opt = raw_input()
if opt == 1:
query = getInstance()
else:
query = pd.read_csv( filename )
main()
``` |
{
"source": "JhonatanGuilherme/GraphTheory",
"score": 2
} |
#### File: GraphTheory/Roteiro 6/grafo_test.py
```python
import unittest
from meu_grafo_matriz_adjacencia_nao_dir import *
from bibgrafo.grafo_exceptions import *
class TestGrafo(unittest.TestCase):
def setUp(self):
# Grafo da Paraíba
self.g_p = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
self.g_p.adicionaAresta('a1', 'J', 'C')
self.g_p.adicionaAresta('a2', 'C', 'E')
self.g_p.adicionaAresta('a3', 'C', 'E')
self.g_p.adicionaAresta('a4', 'P', 'C')
self.g_p.adicionaAresta('a5', 'P', 'C')
self.g_p.adicionaAresta('a6', 'T', 'C')
self.g_p.adicionaAresta('a7', 'M', 'C')
self.g_p.adicionaAresta('a8', 'M', 'T')
self.g_p.adicionaAresta('a9', 'T', 'Z')
# Grafo da Paraíba sem arestas paralelas
self.g_p_sem_paralelas = MeuGrafo(['J', 'C', 'E', 'P', 'M', 'T', 'Z'])
self.g_p_sem_paralelas.adicionaAresta('a1', 'J', 'C')
self.g_p_sem_paralelas.adicionaAresta('a2', 'C', 'E')
self.g_p_sem_paralelas.adicionaAresta('a3', 'P', 'C')
self.g_p_sem_paralelas.adicionaAresta('a4', 'T', 'C')
self.g_p_sem_paralelas.adicionaAresta('a5', 'M', 'C')
self.g_p_sem_paralelas.adicionaAresta('a6', 'M', 'T')
self.g_p_sem_paralelas.adicionaAresta('a7', 'T', 'Z')
# Grafos completos
self.g_c = MeuGrafo(['J', 'C', 'E', 'P'])
self.g_c.adicionaAresta('a1','J','C')
self.g_c.adicionaAresta('a2', 'J', 'E')
self.g_c.adicionaAresta('a3', 'J', 'P')
self.g_c.adicionaAresta('a4', 'E', 'C')
self.g_c.adicionaAresta('a5', 'P', 'C')
self.g_c.adicionaAresta('a6', 'P', 'E')
self.g_c2 = MeuGrafo(['Nina', 'Maria'])
self.g_c2.adicionaAresta('amiga', 'Nina', 'Maria')
self.g_c3 = MeuGrafo(['J'])
# Grafos com laco
self.g_l1 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l1.adicionaAresta('a1', 'A', 'A')
self.g_l1.adicionaAresta('a2', 'A', 'B')
self.g_l1.adicionaAresta('a3', 'A', 'A')
self.g_l2 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l2.adicionaAresta('a1', 'A', 'B')
self.g_l2.adicionaAresta('a2', 'B', 'B')
self.g_l2.adicionaAresta('a3', 'B', 'A')
self.g_l3 = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_l3.adicionaAresta('a1', 'C', 'A')
self.g_l3.adicionaAresta('a2', 'C', 'C')
self.g_l3.adicionaAresta('a3', 'D', 'D')
self.g_l3.adicionaAresta('a4', 'D', 'D')
self.g_l4 = MeuGrafo(['D'])
self.g_l4.adicionaAresta('a1', 'D', 'D')
self.g_l5 = MeuGrafo(['C', 'D'])
self.g_l5.adicionaAresta('a1', 'D', 'C')
self.g_l5.adicionaAresta('a2', 'C', 'C')
# Grafos desconexos
self.g_d = MeuGrafo(['A', 'B', 'C', 'D'])
self.g_d.adicionaAresta('asd', 'A', 'B')
# Grafos não direcionados
self.g_nd = MeuGrafo(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
self.g_nd.adicionaAresta('a1', 'A', 'B')
self.g_nd.adicionaAresta('a2', 'A', 'G')
self.g_nd.adicionaAresta('a3', 'A', 'J')
self.g_nd.adicionaAresta('a4', 'G', 'K')
self.g_nd.adicionaAresta('a5', 'J', 'K')
self.g_nd.adicionaAresta('a6', 'G', 'J')
self.g_nd.adicionaAresta('a7', 'I', 'J')
self.g_nd.adicionaAresta('a8', 'G', 'I')
self.g_nd.adicionaAresta('a9', 'G', 'H')
self.g_nd.adicionaAresta('a10', 'F', 'H')
self.g_nd.adicionaAresta('a11', 'B', 'F')
self.g_nd.adicionaAresta('a12', 'B', 'G')
self.g_nd.adicionaAresta('a13', 'B', 'C')
self.g_nd.adicionaAresta('a14', 'C', 'D')
self.g_nd.adicionaAresta('a15', 'D', 'E')
self.g_nd.adicionaAresta('a16', 'B', 'D')
self.g_nd.adicionaAresta('a17', 'B', 'E')
def test_adiciona_aresta(self):
self.assertTrue(self.g_p.adicionaAresta('a10', 'J', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.assertTrue(self.g_p.adicionaAresta('b1', '', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.assertTrue(self.g_p.adicionaAresta('b1', 'A', 'C'))
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('aa-bb')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('x', 'J', 'V')
with self.assertRaises(ArestaInvalidaException):
self.g_p.adicionaAresta('a1', 'J', 'C')
def test_vertices_nao_adjacentes(self):
self.assertEqual(self.g_p.vertices_nao_adjacentes(), ['J-E', 'J-P', 'J-M', 'J-T', 'J-Z',
'C-Z', 'E-P', 'E-M', 'E-T', 'E-Z', 'P-M',
'P-T', 'P-Z', 'M-Z'])
self.assertEqual(self.g_p_sem_paralelas.vertices_nao_adjacentes(), ['J-E', 'J-P', 'J-M', 'J-T',
'J-Z', 'C-Z', 'E-P', 'E-M',
'E-T', 'E-Z', 'P-M', 'P-T',
'P-Z', 'M-Z'])
self.assertEqual(self.g_c.vertices_nao_adjacentes(), [])
self.assertEqual(self.g_l1.vertices_nao_adjacentes(), ['A-C', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_l2.vertices_nao_adjacentes(), ['A-C', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_l3.vertices_nao_adjacentes(), ['A-B', 'A-D', 'B-C', 'B-D', 'C-D'])
self.assertEqual(self.g_nd.vertices_nao_adjacentes(), ['A-C', 'A-D', 'A-E', 'A-F', 'A-H', 'A-I',
'A-K', 'B-H', 'B-I', 'B-J', 'B-K', 'C-E',
'C-F', 'C-G', 'C-H', 'C-I', 'C-J', 'C-K',
'D-F', 'D-G', 'D-H', 'D-I', 'D-J', 'D-K',
'E-F', 'E-G', 'E-H', 'E-I', 'E-J', 'E-K',
'F-G', 'F-I', 'F-J', 'F-K', 'H-I', 'H-J',
'H-K', 'I-K'])
def test_ha_laco(self):
self.assertFalse(self.g_p.ha_laco())
self.assertFalse(self.g_p_sem_paralelas.ha_laco())
self.assertFalse(self.g_c2.ha_laco())
self.assertTrue(self.g_l1.ha_laco())
self.assertTrue(self.g_l2.ha_laco())
self.assertTrue(self.g_l3.ha_laco())
self.assertTrue(self.g_l4.ha_laco())
self.assertTrue(self.g_l5.ha_laco())
def test_grau(self):
# Paraíba
self.assertEqual(self.g_p.grau('J'), 1)
self.assertEqual(self.g_p.grau('C'), 7)
self.assertEqual(self.g_p.grau('E'), 2)
self.assertEqual(self.g_p.grau('P'), 2)
self.assertEqual(self.g_p.grau('M'), 2)
self.assertEqual(self.g_p.grau('T'), 3)
self.assertEqual(self.g_p.grau('Z'), 1)
with self.assertRaises(VerticeInvalidoException):
self.assertEqual(self.g_p.grau('G'), 5)
self.assertEqual(self.g_d.grau('A'), 1)
self.assertEqual(self.g_d.grau('C'), 0)
self.assertNotEqual(self.g_d.grau('D'), 2)
# Completos
self.assertEqual(self.g_c.grau('J'), 3)
self.assertEqual(self.g_c.grau('C'), 3)
self.assertEqual(self.g_c.grau('E'), 3)
self.assertEqual(self.g_c.grau('P'), 3)
# Com laço. Lembrando que cada laço conta uma única vez por vértice para cálculo do grau
self.assertEqual(self.g_l1.grau('A'), 5)
self.assertEqual(self.g_l2.grau('B'), 4)
self.assertEqual(self.g_l4.grau('D'), 2)
def test_ha_paralelas(self):
self.assertTrue(self.g_p.ha_paralelas())
self.assertFalse(self.g_p_sem_paralelas.ha_paralelas())
self.assertFalse(self.g_c.ha_paralelas())
self.assertFalse(self.g_c2.ha_paralelas())
self.assertFalse(self.g_c3.ha_paralelas())
self.assertTrue(self.g_l1.ha_paralelas())
def test_arestas_sobre_vertice(self):
self.assertEqual(set(self.g_p.arestas_sobre_vertice('J')), set(['a1']))
self.assertEqual(set(self.g_p.arestas_sobre_vertice('C')), set(['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7']))
self.assertEqual(set(self.g_p.arestas_sobre_vertice('M')), set(['a7', 'a8']))
self.assertEqual(set(self.g_l2.arestas_sobre_vertice('B')), set(['a1', 'a2', 'a3']))
self.assertEqual(set(self.g_d.arestas_sobre_vertice('C')), set())
self.assertEqual(set(self.g_d.arestas_sobre_vertice('A')), set(['asd']))
with self.assertRaises(VerticeInvalidoException):
self.g_p.arestas_sobre_vertice('A')
def test_eh_completo(self):
self.assertFalse(self.g_p.eh_completo())
self.assertFalse((self.g_p_sem_paralelas.eh_completo()))
self.assertTrue((self.g_c.eh_completo()))
self.assertTrue((self.g_c2.eh_completo()))
self.assertTrue((self.g_c3.eh_completo()))
self.assertFalse((self.g_l1.eh_completo()))
self.assertFalse((self.g_l2.eh_completo()))
self.assertFalse((self.g_l3.eh_completo()))
self.assertFalse((self.g_l4.eh_completo()))
self.assertFalse((self.g_l5.eh_completo()))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhonatan-lopes/alive-progress",
"score": 3
} |
#### File: utils/terminal/jupyter.py
```python
from types import SimpleNamespace
from . import tty
def get(original):
def cols():
# it seems both `jupyter notebook` and `jupyter-lab` do not return cols, only 80 default.
return 120
def clear_line():
write(_clear_line)
flush()
def clear_end_line(available=None):
for _ in range(available or 0):
write(' ')
flush()
clear_end_screen = clear_end_line
# it seems spaces are appropriately handled to not wrap lines.
_clear_line = f'\r{" " * cols()}\r'
from .void import factory_cursor_up, hide_cursor, show_cursor # noqa
flush, write, carriage_return = original.flush, original.write, original.carriage_return
return SimpleNamespace(**locals())
BASE = get(tty.BASE) # support for jupyter notebooks.
```
#### File: utils/terminal/void.py
```python
def write(_text):
return 0
def flush():
pass
def _ansi_escape_sequence(_=''):
def inner(_available=None):
pass
return inner
clear_line = _ansi_escape_sequence()
clear_end_line = _ansi_escape_sequence()
clear_end_screen = _ansi_escape_sequence()
hide_cursor = _ansi_escape_sequence()
show_cursor = _ansi_escape_sequence()
factory_cursor_up = lambda _: _ansi_escape_sequence()
def cols():
return 0 # more details in `alive_progress.tools.sampling#overhead`.
carriage_return = ''
```
#### File: tests/animations/test_spinners.py
```python
import pytest
from alive_progress.animations.spinners import alongside_spinner_factory, \
bouncing_spinner_factory, delayed_spinner_factory, frame_spinner_factory, \
scrolling_spinner_factory, sequential_spinner_factory
from alive_progress.utils.cells import join_cells
@pytest.mark.parametrize('frames, expected', [
('a\nb', (('a', ' ', 'b'),)),
('abc', (('a', 'b', 'c'),)),
(('a\nb', '\nc '), (('a b', ' c '),)),
(('a ', ' b ', ' c'), (('a ', ' b ', ' c'),)),
(('a', '(a)', ' (*) '), (('aaaaa', '(a)(a', ' (*) '),)),
(('ok', '😺😺'), (('okok', '😺😺'),)),
])
def test_frame_spinner(frames, expected):
spinner_factory = frame_spinner_factory(frames)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('length, block, background, right, hide, expected', [
(None, None, ' ', True, True, ((' ', 'c ', 'bc ', 'abc', ' ab', ' a'),)),
(None, None, ' ', False, True, ((' ', ' a', ' ab', 'abc', 'bc ', 'c '),)),
(None, None, ' ', True, False, (('abc', 'cab', 'bca'),)),
(None, None, ' ', False, False, (('abc', 'bca', 'cab'),)),
(2, None, '~', True, True, (('~~', 'c~', 'bc', 'ab', '~a'),)),
(2, None, '~', True, False, (('bc', 'ab', 'ca'),)),
(2, None, '~', False, True, (('~~', '~a', 'ab', 'bc', 'c~'),)),
(2, None, '~', False, False, (('ab', 'bc', 'ca'),)),
(3, None, '~', True, True, (('~~~', 'c~~', 'bc~', 'abc', '~ab', '~~a'),)),
(3, None, '~', True, False, (('abc', 'cab', 'bca'),)),
(3, None, '~', False, True, (('~~~', '~~a', '~ab', 'abc', 'bc~', 'c~~'),)),
(3, None, '~', False, False, (('abc', 'bca', 'cab'),)),
(4, None, ' ', True, True, ((' ', 'c ', 'bc ', 'abc ', ' abc', ' ab', ' a'),)),
(4, None, ' ', True, False, (('abc ', ' abc', 'c ab', 'bc a'),)),
(4, None, ' ', False, True, ((' ', ' a', ' ab', ' abc', 'abc ', 'bc ', 'c '),)),
(4, None, ' ', False, False, ((' abc', 'abc ', 'bc a', 'c ab'),)),
(4, 1, '_', True, True, (('____', 'a___', '_a__', '__a_', '___a'),
('____', 'b___', '_b__', '__b_', '___b'),
('____', 'c___', '_c__', '__c_', '___c'))),
(4, 2, '_', True, False, (('aa__', '_aa_', '__aa', 'b__a'),
('bb__', '_bb_', '__bb', 'c__b'),
('cc__', '_cc_', '__cc', 'a__c'))),
])
def test_scrolling_spinner(length, block, background, right, hide, expected):
spinner_factory = scrolling_spinner_factory('abc', length, block, background,
right=right, hide=hide)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('length, block, background, hide, expected', [
(None, None, None, True, ((' ', 'c ', 'bc ', 'abc', ' ab', ' a'),
(' ', ' d', ' de', 'def', 'ef ', 'f '),)),
(None, None, None, False, (('abc',), ('def',),)),
(2, None, '~', True, (('~~', 'c~', 'bc', 'ab', '~a'), ('~~', '~d', 'de', 'ef', 'f~'),)),
(2, None, '~', False, (('bc', 'ab'), ('de', 'ef'),)),
(3, None, '+', True, (('+++', 'c++', 'bc+', 'abc', '+ab', '++a'),
('+++', '++d', '+de', 'def', 'ef+', 'f++'),)),
(3, None, '+', False, (('abc',), ('def',),)),
(4, None, ' ', True, ((' ', 'c ', 'bc ', 'abc ', ' abc', ' ab', ' a'),
(' ', ' d', ' de', ' def', 'def ', 'ef ', 'f '),)),
(4, None, ' ', False, (('abc ', ' abc'), (' def', 'def '),)),
(3, 1, '_', True, (('___', 'a__', '_a_', '__a'),
('___', '__d', '_d_', 'd__'),
('___', 'b__', '_b_', '__b'),
('___', '__e', '_e_', 'e__'),
('___', 'c__', '_c_', '__c'),
('___', '__f', '_f_', 'f__'))),
(5, 2, '_', False, (('aa___', '_aa__', '__aa_', '___aa'),
('___dd', '__dd_', '_dd__', 'dd___'),
('bb___', '_bb__', '__bb_', '___bb'),
('___ee', '__ee_', '_ee__', 'ee___'),
('cc___', '_cc__', '__cc_', '___cc'),
('___ff', '__ff_', '_ff__', 'ff___'))),
])
def test_bouncing_spinner(length, block, background, hide, expected):
spinner_factory = bouncing_spinner_factory(('abc', 'def'), length, block, background,
right=True, hide=hide)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1a', '2b', '3c'),)),
(('12', 'abc'), (('1a', '2b', '1c', '2a', '1b', '2c'),)),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a', '12b', '34a', '56b'),)),
])
def test_alongside_spinner(inputs, expected, spinner_test):
spinner_factory = alongside_spinner_factory(*(spinner_test(x) for x in inputs))
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1a', '2b', '3c'),)),
(('12', 'abc'), (('1a', '2b'), ('1c', '2a'), ('1b', '2c'))),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a'), ('12b', '34a', '56b'))),
])
def test_alongside_spinner_with_pivot(inputs, expected, spinner_test):
spinner_factory = alongside_spinner_factory(*(spinner_test(x) for x in inputs), pivot=0)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('11a', '22b', '33c'),)),
(('12', 'abc'), (('11a', '22b', '11c', '22a', '11b', '22c'),)),
((('12', '34', '56'), 'ab'), (('12a', '34b', '56a', '12b', '34a', '56b'),)),
])
def test_alongside_spinner_custom(inputs, expected, spinner_test):
spinner_factory = alongside_spinner_factory(*(spinner_test(x) for x in inputs))
spinner = spinner_factory(3) # custom spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1',), ('a',), ('2',), ('b',), ('3',), ('c',))),
(('12', 'abc'), (('1',), ('a',), ('2',), ('b',), ('1',), ('c',),
('2',), ('a',), ('1',), ('b',), ('2',), ('c',))),
((('12', '34', '56'), 'ab'), (('1', '2'), ('a',), ('3', '4'), ('b',), ('5', '6'), ('a',),
('1', '2'), ('b',), ('3', '4'), ('a',), ('5', '6'), ('b',))),
])
def test_sequential_spinner(inputs, expected, spinner_test):
spinner_factory = sequential_spinner_factory(*(spinner_test(*x) for x in inputs))
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('inputs, expected', [
(('123', 'abc'), (('1',), ('2',), ('3',), ('a',), ('b',), ('c',))),
(('12', 'abc'), (('1',), ('2',), ('a',), ('b',), ('c',))),
((('12', '34', '56'), 'ab'), (('1', '2'), ('3', '4'), ('5', '6'), ('a',), ('b',))),
])
def test_sequential_spinner_no_intermix(inputs, expected, spinner_test):
spinner_factory = sequential_spinner_factory(*(spinner_test(*x) for x in inputs),
intermix=False)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
@pytest.mark.parametrize('copies, offset, expected', [
(3, 1, (('123', '234', '345', '451', '512'),)),
(4, 2, (('1352', '2413', '3524', '4135', '5241'),)),
])
def test_delayed_spinner(copies, offset, expected, spinner_test):
spinner_factory = delayed_spinner_factory(spinner_test('12345'), copies, offset)
spinner = spinner_factory() # natural spinner size.
assert tuple(tuple(join_cells(f) for f in spinner()) for _ in expected) == expected
``` |
{
"source": "jhonatanlteodoro/ecommerce-django",
"score": 2
} |
#### File: accounts/tests/test_views.py
```python
from django.test import Client
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from model_mommy import mommy
from django.conf import settings
User = get_user_model()
class RegisterViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.register_url = reverse('accounts:register')
def test_register_ok(self):
data = {
'username': 'jhowuserteste', 'email': '<EMAIL>',
'password1': '<PASSWORD>','password2': '<PASSWORD>',
}
response = self.client.post(self.register_url, data)
login_url = reverse('login')
self.assertRedirects(response, login_url)
self.assertEquals(User.objects.count(), 1)
def test_register_fail(self):
data = {
'username': 'jhowuserteste', 'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
}
response = self.client.post(self.register_url, data)
self.assertFormError(response, 'form', 'email', 'Este campo é obrigatório.')
class UpdateUserTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('accounts:update_user')
self.user = mommy.prepare(settings.AUTH_USER_MODEL)
self.user.set_password('<PASSWORD>')
self.user.save()
def tearDown(self):
self.user.delete()
def test_update_user_ok(self):
data = {'name': 'humbree', 'email':'<EMAIL>'}
response = self.client.get(self.url)
self.assertEquals(response.status_code, 302)
self.client.login(username=self.user.username, password='<PASSWORD>')
response = self.client.post(self.url, data)
accounst_index_url = reverse('accounts:index')
self.assertRedirects(response, accounst_index_url)
#user = User.objects.get(username=self.user.username)
self.user.refresh_from_db()
self.assertEquals(self.user.email, '<EMAIL>')
self.assertEquals(self.user.name, 'humbree')
def test_update_user_error(self):
data = {}
self.client.login(username=self.user.username, password='<PASSWORD>')
response = self.client.post(self.url, data)
self.assertFormError(response, 'form', 'email', 'Este campo é obrigatório.')
class UpdatePasswordTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('accounts:update_password')
self.user = mommy.prepare(settings.AUTH_USER_MODEL)
self.user.set_password('<PASSWORD>')
self.user.save()
def tearDown(self):
self.user.delete()
def test_update_password_ok(self):
data = {
'old_password': '<PASSWORD>', 'new_password1':'<PASSWORD>',
'new_password2':'<PASSWORD>',
}
self.client.login(username=self.user.username, password='<PASSWORD>')
response = self.client.post(self.url, data)
self.user.refresh_from_db()
#user = User.objects.get(username=self.user.username)
self.assertTrue(self.user.check_password('<PASSWORD>'))
```
#### File: ecommerce-django/accounts/views.py
```python
from django.shortcuts import render
from django.urls import reverse_lazy
from .models import User
from .forms import UserAdminCreationForm
from django.views.generic import CreateView
from django.views.generic import TemplateView
from django.views.generic import UpdateView
from django.views.generic import FormView
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.mixins import LoginRequiredMixin
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'accounts/index.html'
class RegisterView(CreateView):
model = User
template_name = 'accounts/register.html'
form_class = UserAdminCreationForm
success_url = reverse_lazy('login')
class UpdateUserView(LoginRequiredMixin, UpdateView):
model = User
template_name = 'accounts/update_user.html'
fields = ['name', 'email']
success_url = reverse_lazy('accounts:index')
def get_object(self):
return self.request.user
class UpdatePasswordView(LoginRequiredMixin, FormView):
template_name = 'accounts/update_password.html'
success_url = reverse_lazy('accounts:index')
form_class = PasswordChangeForm
def get_form_kwargs(self):
kwargs = super(UpdatePasswordView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
return super(UpdatePasswordView, self).form_valid(form)
``` |
{
"source": "jhonatanmaia/python",
"score": 4
} |
#### File: curso-em-video/exercises/099.py
```python
def maior(* num):
print('-='*30)
print('Analisando os valroes passados...')
maior_numero=max(num[0])
tamanho=len(num[0])
for i in num[0]:
print(f'{i} ',end='')
print(f'Foram informados {tamanho} valores ao todo.')
print(f'O maior valor informado foi {maior_numero}')
print('-='*30)
lista=[]
while True:
lista.append(int(input('Digite um valor: ')))
sair=str(input('Deseja sair? [S/N] ')).upper()
while sair != 'S' and sair != 'N':
sair=str(input('Erro, digite novamente [S/N] ')).upper()
if sair == 'S':
break
maior(lista)
```
#### File: curso-em-video/exercises/101 - Data.py
```python
def voto(ano_nascimento):
from datetime import date
ano_atual=date.today().year
r=ano_atual-ano_nascimento
if 16 <= r < 18 or r>65:
return 'OPCIONAL'
elif r<18:
return 'NÃO VOTA'
else:
return 'OBRIGATÓRIO'
ano=int(input('Digite o seu ano de nascimento: '))
print(f'O seu voto é {voto(ano)}')
```
#### File: curso-em-video/exercises/104.py
```python
def leiaInt(n):
j=input(f'{n}')
while True:
try:
int(j)
float(j)
return j
break
except ValueError:
j=input('Erro, digite novamente: ')
n=leiaInt('Digite um número: ')
print(f'Você acabou de digitar o número {n}')
```
#### File: curso-em-video/exercises/105.py
```python
def notas(* n, sit=False):
"""
-> Função para analisar notas e situação de vários alunos.
:paran n: uma ou mais notas dos alunos (aceita várias)
:param sit: valor opcional, indicando se deve ou não adicionar a situação.
:return: dicionário com várias informações sobre a situação da turma.
"""
total=len(n)
maior=max(n)
media=sum(n)/len(n)
if sit==False:
return {'total':total,'maior':maior,'media':media}
elif sit==True:
if media>7:
return {'total':total,'maior':maior,
'media':media,'situacao':'BOA'}
elif 7<media<5:
return {'total':total,'maior':maior,
'media':media,'situacao':'RAZOÁVEL'}
else:
return {'total':total,'maior':maior,
'media':media,'situacao':'RUIM'}
resp = notas(10,4,5.6,3,6.9,sit=True)
print(resp)
```
#### File: udemy/python-basic-to-advanced/Class - 01 - pep8.py
```python
import this
print('something')
```
#### File: udemy/python-basic-to-advanced/Class - 16 - Debugging and Handling Errors.py
```python
import pdb
pdb.set_trace()
# or
import pdb; pdb.set_trace()
def division(a,b):
try:
return int(a)/int(b)
except (ValueError,ZeroDivisionError) as err:
print(err)
num1=input('First number: ')
num2=input('Second number: ')
print(division(num1,num2))
``` |
{
"source": "JhonatanPatrocinio/GLAB",
"score": 2
} |
#### File: base/forms/request.py
```python
from django import forms
from django.contrib.auth import get_user_model
from base.models import Requester
User = get_user_model()
class RequesterForm(forms.ModelForm):
user = forms.ModelChoiceField(queryset=User.objects.all(), required=False)
class Meta:
model = Requester
fields = ('user', 'department', 'registry')
widgets = {
'registry': forms.TextInput(attrs={'class': 'form-control'}),
'department': forms.Select(attrs={'class': 'form-control'}),
'user': forms.HiddenInput()
}
def save(self, commit=True, user=None):
self.instance.user = user
return super().save(commit)
```
#### File: base/models/reservation.py
```python
from django.utils import timezone
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import ValidationError
from django.contrib.auth import get_user_model
from django.urls import reverse
User = get_user_model()
class Reservation(models.Model):
RESPONSE_WAITING = 1
RESPONSE_DENIED = 2
RESPONSE_ACCEPTED = 3
RESPONSE_CANCELED = 4
STATUS_RESERVE_CHOICES = [
(RESPONSE_WAITING, _('Aguardando Resposta')),
(RESPONSE_DENIED, _('Utilização Negada')),
(RESPONSE_ACCEPTED, _('Utilização Aceita')),
(RESPONSE_CANCELED, _('Cancelado pelo usuário'))
]
place = models.ForeignKey('base.Place', on_delete=models.PROTECT, verbose_name=_('Nome do Espaço'))
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, verbose_name=_('Solicitante'))
phone = models.CharField(_('Telefone'), max_length=16, help_text=_('Informe um telefone para contato'))
date = models.DateField(_('Data'))
initial_time = models.TimeField(_('Hora Inicio'))
end_time = models.TimeField(_('Hora Término'))
status = models.IntegerField(_('Status'), choices=STATUS_RESERVE_CHOICES, default=1)
reason = models.TextField(_('Motivo'), max_length=400)
obs = models.TextField(_('Observações'), max_length=355, blank=True)
# About OBJ
created_at = models.DateTimeField(auto_now_add=True, editable=False)
update_at = models.DateTimeField(null=True, blank=True)
class Meta:
ordering = ['date']
verbose_name = _('Reserva')
verbose_name_plural = _('Reservas')
def __str__(self):
return f'{self.user} - {self.place.name}'
def save(self, *args, **kwargs):
if self.created_at:
self.update_at = timezone.now()
return super().save(*args, **kwargs)
def clean(self):
if self.id and Reservation.objects.filter(
date=self.date, initial_time__range=([self.initial_time, self.end_time]),
status=self.RESPONSE_ACCEPTED).exclude(id=self.id).exists():
raise ValidationError(_('Já existe uma reserva confirmada neste horário'))
return super().clean()
@property
def get_start_date(self):
return timezone.datetime(
self.date.year, self.date.month, self.date.day, self.initial_time.hour, self.initial_time.minute
)
@property
def get_end_date(self):
return timezone.datetime(
self.date.year, self.date.month, self.date.day, self.end_time.hour, self.end_time.minute
)
@property
def get_date_display(self):
return f'{self.date.strftime("%d/%m/%Y")} ' \
f'{self.initial_time.strftime("%H:%M")} - {self.end_time.strftime("%H:%M")}'
@property
def get_text_status_color(self):
if self.status == Reservation.RESPONSE_WAITING:
return 'text-warning'
elif self.status == Reservation.RESPONSE_ACCEPTED:
return 'text-success'
elif self.status == Reservation.RESPONSE_DENIED:
return 'text-danger'
elif self.status == Reservation.RESPONSE_CANCELED:
return 'text-danger'
else:
return ''
@property
def get_absolute_url(self):
return reverse("view_reservation", kwargs={"pk": self.pk})
```
#### File: base/templatetags/get_verbose_name.py
```python
from django import template
register = template.Library()
@register.simple_tag
def get_verbose_field_name(instance, field_name):
"""
Returns verbose_name for a field.
"""
return instance._meta.get_field(field_name).verbose_name.title()
``` |
{
"source": "JhonatanRSantos/CBERS-to-GEE",
"score": 2
} |
#### File: JhonatanRSantos/CBERS-to-GEE/CBERSTOGEE.py
```python
from tkinter import *
import json
import os
import time
import zipfile
from tkinter import Toplevel
import requests
from bs4 import BeautifulSoup
import wget
import shutil
from PIL import Image, ImageTk
import threading
import multiprocessing
task = None
# Para gerar o executavel, digitar no terminal:
# pyinstaller <nome do script>.py
# pyinstaller.exe --icon=favicon.ico CBERSTOGEE.py // para ter icone
fileConfigName = "config.json" # ALTERAR DEPOIS DOS TESTES PARA config.json
configJson = json.loads(open(fileConfigName).read())
if configJson["installDependencies"]:
print("Instalando dependencias.")
result = os.system("pip install -r dependencies.txt")
if result == 1:
print("Erro ao instalar dependencias do Python.")
exit()
result = os.system("earthengine ls")
if result == 1:
os.system("cls")
print("Para continuar voce deve fazer sua autenticacao\n")
os.system("earthengine authenticate")
os.system("cls")
currentTime = None
result = 1
while result == 1:
currentTime = str(time.time()).replace(".", "")
result = os.system("gsutil mb gs://bk{}".format(currentTime))
result = os.system(
"gsutil iam ch allUsers:objectViewer gs://bk{}".format(currentTime))
os.system("cls")
with open(fileConfigName, "w") as configFile:
configJson["bucketName"] = "bk" + currentTime
configJson["installDependencies"] = False
json.dump(configJson, configFile)
configJson = json.loads(open(fileConfigName).read())
nomeDoUsuarioINPE = configJson["nomeDoUsuarioINPE"].strip()
senhaINPE = configJson["senhaINPE"].strip()
geeUserName = configJson["geeUserName"].strip()
bucketName = configJson["bucketName"].strip()
startDate = configJson["startDate"].strip()
endDate = configJson["endDate"].strip()
startOrbit = configJson["startOrbit"].strip()
endOrbit = configJson["endOrbit"].strip()
startPoint = configJson["startPoint"].strip()
endPoint = configJson["endPoint"].strip()
# ---------------------Execução do programa apos click no botão---------------------
def Carregando():
textoProgresso = "Carregando...\n"
print(textoProgresso)
text_box_Progress.insert(INSERT, textoProgresso)
# ,senhaINPE,geeUserName,bucketName,startDate,endDate,startOrbit,endOrbit,startPoint,endPoint):
def executeCrawler():
Carregando()
# print('teste', entry_1.get())
nomeDoUsuarioINPE = entry_1.get()
senhaINPE = entry_2.get()
geeUserName = entry_3.get()
startDate = entry_4.get()
endDate = entry_5.get()
startOrbit = entry_6.get()
endOrbit = entry_7.get()
startPoint = entry_8.get()
endPoint = entry_9.get()
with open(fileConfigName, "w") as configFile:
configJson["nomeDoUsuarioINPE"] = nomeDoUsuarioINPE
configJson["senhaINPE"] = senhaINPE
configJson["geeUserName"] = geeUserName
configJson["startDate"] = startDate
configJson["endDate"] = endDate
configJson["startOrbit"] = startOrbit
configJson["endOrbit"] = endOrbit
configJson["startPoint"] = startPoint
configJson["endPoint"] = endPoint
json.dump(configJson, configFile)
# ---Inicio do Crawller
# Post de autenticação do usuário
nomeDoUsuarioINPE = configJson["nomeDoUsuarioINPE"]
senhaINPE = configJson["<PASSWORD>haINPE"]
s = requests.session()
p = s.post('http://www.dgi.inpe.br/catalogo/login.php',
{'enviar': 'Realizar+acesso', 'name': nomeDoUsuarioINPE, 'pwd': <PASSWORD>, 'submitted': '1'})
phpSessID = p.cookies['PHPSESSID']
# Definindo as variaveis de busca de imagens.
startPage = '1'
startDate = configJson["startDate"]
endDate = configJson["endDate"]
startOrbit = configJson["startOrbit"]
endOrbit = configJson["endOrbit"]
startPoint = configJson["startPoint"]
endPoint = configJson["endPoint"]
imagesPath = configJson["imagesPath"]
pageSize = '20' # Quantidade de itens maximo encontrados por pagina web
zipFolder = 'tempZip'
# ---------
# Primeiro link utilizado para obter as infirmações iniciais do processo
link1 = f'http://www.dgi.inpe.br/catalogo/buscarimagens.php?p=1&pg={startPage}&TRIGGER=BTNOPTODOS&CQA=CA&SATELITE=CB4&SENSOR=MUX&DATAINI={startDate}&DATAFIM={endDate}&Q1=&Q2=&Q3=&Q4=&ORBITAINI={startOrbit}&ORBITAFIM={endOrbit}&PONTOINI={startPoint}&PONTOFIM={endPoint}&TAMPAGINA={pageSize}'
L1 = s.get(link1)
html = L1.content
html = str(html)
# Codigo html da primeira pagina requisitada
site = BeautifulSoup(html, 'lxml')
# Quantidade de paginas.
totalPages = site.find(id="pgatualtopo").get('max')
# tag que possui os indices das imagens
ref = site.find_all(class_="icon-shopping-cart")
# Fazer loop para obter todos os indices das imagens e adicionar no carrinho.
for i in ref:
tagBtn = i.previous_element
IndiceImg = str(tagBtn).split("chamaAdicionarAoCarrinho")[
1].split(",")[1].replace("\\'", "").replace("\\'", "")
IndiceImg = IndiceImg.strip()
link2 = f'http://www.dgi.inpe.br/catalogo/addtocart.php?ACTION=CART&INDICE={IndiceImg}'
L2 = s.get(link2)
if (int(totalPages) > 1):
for pagina in range(2, int(totalPages) + 1):
link1 = f'http://www.dgi.inpe.br/catalogo/buscarimagens.php?p=1&pg={str(pagina)}&TRIGGER=BTNOPTODOS&CQA=CA&SATELITE=CB4&SENSOR=MUX&DATAINI={startDate}&DATAFIM={endDate}&Q1=&Q2=&Q3=&Q4=&ORBITAINI={startOrbit}&ORBITAFIM={endOrbit}&PONTOINI={startPoint}&PONTOFIM={endPoint}&TAMPAGINA={pageSize}'
L1 = s.get(link1)
html = L1.content
html = str(html)
site = BeautifulSoup(html, 'lxml')
ref = site.find_all(class_="icon-shopping-cart")
for i in ref:
tagBtn = i.previous_element
IndiceImg = str(tagBtn).split("chamaAdicionarAoCarrinho")[1].split(",")[1].replace("\\'", "").replace(
"\\'", "")
IndiceImg = IndiceImg.strip()
link2 = f'http://www.dgi.inpe.br/catalogo/addtocart.php?ACTION=CART&INDICE={IndiceImg}'
L2 = s.get(link2)
# Obter a quantidade de itens/imagens adicionadas no carrinho
link3 = 'http://www.dgi.inpe.br/catalogo/numeroitenscarrinho.php'
L3 = s.get(link3)
strNumItensNoCarrinho = str(L3.content)
quantDeItensNoCarrinho = strNumItensNoCarrinho.split("\\")[
0].split("'")[1]
# Fechar o pedido de imagens
link4 = 'http://www.dgi.inpe.br/catalogo/cart.php'
L4 = s.get(link4)
link5 = 'http://www.dgi.inpe.br/catalogo/cartAddress.php'
L5 = s.post(link5, {'action': 'Prosseguir',
'sesskey': phpSessID, 'userid': nomeDoUsuarioINPE})
link6 = f'http://www.dgi.inpe.br/catalogo/cartAddress.php?userid={nomeDoUsuarioINPE}&nItens={quantDeItensNoCarrinho}&sesskey={phpSessID}&mediaCD=&total=0&action=Prosseguir'
L6 = s.get(link6)
link7 = 'http://www.dgi.inpe.br/catalogo/cartAddress.php'
p = s.post(link7,
{'action': 'Fechar+Pedido', 'mediaCD': None, 'nItens': quantDeItensNoCarrinho, 'sesskey': phpSessID,
'total': '0', 'userid': nomeDoUsuarioINPE})
link8 = f'http://www.dgi.inpe.br/catalogo/cartAddress.php?action=Fechar+Pedido&mediaCD=&nItens={quantDeItensNoCarrinho}&sesskey={phpSessID}&total=0&userid={nomeDoUsuarioINPE}'
L8 = s.get(link8)
# Obter o número do pedido atribuido ao id do usuario logado
pagWebPedido = L8.content
pagWebPedido = BeautifulSoup(str(pagWebPedido), 'lxml')
numeroDoPedido = pagWebPedido.find(
class_='icon-thumbs-up').previous_element.text
numeroDoPedido = str(numeroDoPedido).split(
" foi aceito ")[0].split("número ")[1]
print(
f'Número de imagens que serão baixadas: {quantDeItensNoCarrinho} \n')
# Espera um determinado tempo para o processamento das imagens no servidor
time.sleep(60)
# Preparar links das imagens
link9 = f'http://imagens.dgi.inpe.br/cdsr/{nomeDoUsuarioINPE}{numeroDoPedido}'
L9 = s.get(link9)
pagWebLinks = BeautifulSoup(str(L9.content), 'lxml')
linksImagens = []
for link in pagWebLinks.find_all('a'):
strLink = str(link.get('href'))
if strLink.endswith('.zip'):
linksImagens.append(
f'http://imagens.dgi.inpe.br/cdsr/{nomeDoUsuarioINPE}{numeroDoPedido}/{strLink}')
# Baixar as imagens e dezipar o arquivo .zip
os.mkdir(imagesPath + zipFolder)
time.sleep(1)
def BaixarExtrairImagens(location):
try:
filename = wget.download(location, imagesPath + zipFolder)
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(imagesPath)
zip_ref.close()
except Exception as error:
print("Não foi possível baixar os arquivos. Verifique sua conexão.")
exit()
auxPercenBaixado = 100 / (int(quantDeItensNoCarrinho) * 4)
for linkImg in linksImagens:
BaixarExtrairImagens(str(linkImg))
print(f" Baixando arquivos: {auxPercenBaixado}% \n")
auxPercenBaixado = auxPercenBaixado + \
(100 / (int(quantDeItensNoCarrinho) * 4))
# Mensagem de termino do download das imagens
print(
f'Download terminado. Voce baixou: {quantDeItensNoCarrinho} imagens para o seu computador.')
print(f'Cada imagem possui 4 arquivos .tiff e 4 arquivos .xml.')
print(
f'Cada arquivo .tiff corresponde a uma banda da imagem, e cada arquivo .xml corresponde aos metadados de cada banda.')
# Delata pasta zipada
time.sleep(1)
# result = os.system("gsutil rm gs://{}/*".format(configJson["bucketName"]))
print("Enviando arquivos para o GEE.\n")
# ADICIONAR AQUI O NOME DE TODAS AS TASKS CRIADAS PELO SISTEMA!!!!!GILBERTO
taskIDs = []
for root, dirs, files in os.walk("images"):
print('iniciando')
for filename in files:
print('preparando envio de dados...')
currentFile = str(filename)
if currentFile.endswith(".tif"):
result = os.system(
"gsutil cp {} gs://{}".format("images\\" + currentFile, configJson["bucketName"]))
if result == 0:
try:
result = os.popen(
"earthengine upload image --asset_id=users/{}/{} --pyramiding_policy=sample gs://{}/{}".format(
configJson["geeUserName"], currentFile.replace(
".tif", ""), configJson["bucketName"],
currentFile)
).readlines()
taskIDs.append(result[0].split('ID: ')[
1].replace("\n'", ''))
print('Id adicionado com sucesso.')
except:
result = 1
if result == 1:
print("Error ao baixar a imagem {} para o GEE.".format(
currentFile))
else:
print("Erro ao enviar o arquivo {}".format(currentFile))
print()
shutil.rmtree(imagesPath + zipFolder)
shutil.rmtree(imagesPath)
print('Ids ', taskIDs)
textoProgresso = "Imagens transferidas para o GEE com sucesso!\n"
print(textoProgresso)
text_box_Progress.insert(INSERT, textoProgresso)
# CODIGO QUE APAGA AS IMAGENS DO BUCKET --- TESTAR!!!!!
import re
tasksCompleds = 0
taskList = os.popen('earthengine task list').readlines()
taskList = [re.sub('[^A-Za-z0-9\_\-]+', ' ', task) for task in taskList]
doTasks = (len(taskIDs) != 0)
total = len(taskIDs)
while doTasks:
for tskId in taskIDs:
out = os.popen(
'earthengine task info {}'.format(tskId)).readlines()
out = out[1].split(': ')[1]
if out == 'COMPLETED\n':
tasksCompleds += 1
elif out == 'FAILED\n':
print('A task com o ID {} falhou.'.format(tskId))
tasksCompleds += 1
if tasksCompleds == total:
os.system('gsutil rm -r gs://{}/*.tif'.format(bucketName))
break
else:
tasksCompleds = 0
# ---sFim do Crawller
def start():
threading.Thread(target=executeCrawler).start()
# ---------------------Interface---------------------
# -- Janela de Aviso inicial
def Ajuda(txt=''):
windowWelcome = Toplevel()
windowWelcome.iconbitmap('favicon.ico')
windowWelcome.title('Bem vindo ao CEBRS4 to GEE!')
image = Image.open("banner3_ajudaC4GEE.png")
photo = ImageTk.PhotoImage(image)
label = Label(windowWelcome, image=photo)
label.image = photo # keep a reference!
label.pack()
'''
img_ajuda = PhotoImage(file='banner3_ajudaC4GEE.png')
label_ajudaimg = Label(windowWelcome, image=img_ajuda)
label_ajudaimg.grid(row=2,column=0)'''
text_box_Welcome = Text(windowWelcome, width=80,
height=20, wrap=WORD, background="white")
text_box_Welcome.pack() # .grid(row=4,column=0)
textoInicial = "Bem Vindo ao CBERS4 to GEE! Este programa realiza a importação de imagens do satélite CBERS4 banda MUX para sua conta no Google Earth Engine.\n\n" \
"Você precisa preencher requisitos mínimos para utilizar este programa:\n" \
"1 - Ter um conta no Google.\n" \
"2 - Ter uma conta no Catálogo do INPE (Instituto Nacional de Pesquisas Espaciais). Você pode se cadastrar no catálogo do INPE atravês do site http://www.dgi.inpe.br/CDSR/ clicando em <Cadastro> e registrando-se pelo formulário de cadastro. \n" \
"3 - Ter uma conta no Google Earth Engine (GEE). Você pode ter uma conta no Google Earth Engine registrando-se através do site https://earthengine.google.com/ . \n" \
"4 - Ter uma conta no Google Cloud. Você pode ter uma conta no Google Cloud registrando-se através do site https://cloud.google.com/. Será solicitado o número do seu cartão de crédito, porém isso é só para registro da conta caso você deseje no futuro obter uma conta Google Cloud com mais recursos. \n"
textoInicial = textoInicial + txt
text_box_Welcome.insert(INSERT, textoInicial)
def closeWindowWelcome():
windowWelcome.destroy()
button_1_Welcome = Button(windowWelcome, text="Ok",
command=closeWindowWelcome)
# .grid(row=6,column=0,ipadx=100)#.pack(ipadx=100)#(row=2, column=0)
button_1_Welcome.pack(ipadx=100)
windowWelcome.mainloop()
# --------------------------
# -- Janela do programa principal
def sheet():
# if threadWorker != None and threadWorker.is_alive():
# pass
# os.system("taskkill /f /im python.exe")
os.system('taskkill /f /pid {pid}'.format(pid=os.getpid()))
# os.system('pkill -TERM -P {pid}'.format(pid=os.getpid()))
main_window = Tk()
main_window.protocol("WM_DELETE_WINDOW", sheet)
main_window.title('CBERS4 To GEE')
img1 = PhotoImage(file="banner1_topC4GEE.png")
label_aux0 = Label(main_window, image=img1) # text="\n\n")
label_aux0.grid(row=1, column=0)
frameLogin = Frame(main_window, height=100, width=100,
borderwidth=4, relief=GROOVE)
frameLogin.grid(row=2, column=0)
img2 = PhotoImage(file="banner2_medC4GEE.png")
label_aux0 = Label(main_window, image=img2) # ,text="\n\n")
label_aux0.grid(row=3, column=0)
framePesquisa = Frame(main_window, height=100, width=100,
borderwidth=4, relief=GROOVE)
framePesquisa.grid(row=4, column=0)
label_1 = Label(frameLogin, text="Nome de usuário do INPE:")
label_1.grid(row=2, column=0, sticky=W)
entry_1 = Entry(frameLogin)
entry_1.grid(row=3, column=0, sticky=W)
entry_1.insert(0, nomeDoUsuarioINPE)
label_2 = Label(frameLogin, text="Senha de usuário do INPE:")
label_2.grid(row=4, column=0, sticky=W)
entry_2 = Entry(frameLogin) # ,show="*")
entry_2.grid(row=5, column=0, sticky=W)
entry_2.insert(0, senhaINPE)
label_space1 = Label(frameLogin, text=" ")
label_space1.grid(row=2, column=1)
label_space1 = Label(frameLogin, text=" ")
label_space1.grid(row=3, column=1)
label_3 = Label(frameLogin, text="Nome de usuário do GEE :")
label_3.grid(row=2, column=2, sticky=W)
entry_3 = Entry(frameLogin)
entry_3.grid(row=3, column=2, sticky=W)
entry_3.insert(0, geeUserName)
label_4 = Label(framePesquisa, text="Data início. Ex: 01/09/2018 :")
label_4.grid(row=8, column=0, sticky=W)
entry_4 = Entry(framePesquisa)
entry_4.grid(row=9, column=0, sticky=W)
entry_4.insert(0, startDate)
label_space1 = Label(framePesquisa, text=" ")
label_space1.grid(row=8, column=1)
label_space1 = Label(framePesquisa, text=" ")
label_space1.grid(row=9, column=1)
label_5 = Label(framePesquisa, text="Data fim. Ex: 01/10/2018 :")
label_5.grid(row=8, column=2, sticky=W)
entry_5 = Entry(framePesquisa)
entry_5.grid(row=9, column=2, sticky=W)
entry_5.insert(0, endDate)
label_6 = Label(framePesquisa, text="Órbita inicial. Ex: 162 :")
label_6.grid(row=10, column=0, sticky=W)
entry_6 = Entry(framePesquisa)
entry_6.grid(row=11, column=0, sticky=W)
entry_6.insert(0, startOrbit)
label_7 = Label(framePesquisa, text="Órbita final. Ex: 162 :")
label_7.grid(row=10, column=2, sticky=W)
entry_7 = Entry(framePesquisa)
entry_7.grid(row=11, column=2, sticky=W)
entry_7.insert(0, endOrbit)
label_8 = Label(framePesquisa, text="Ponto inicial. Ex: 102 :")
label_8.grid(row=12, column=0, sticky=W)
entry_8 = Entry(framePesquisa)
entry_8.grid(row=13, column=0, sticky=W)
entry_8.insert(0, startPoint)
label_9 = Label(framePesquisa, text="Ponto final. Ex: 102 :")
label_9.grid(row=12, column=2, sticky=W)
entry_9 = Entry(framePesquisa)
entry_9.grid(row=13, column=2, sticky=W)
entry_9.insert(0, endPoint)
label_aux1 = Label(main_window, text=" ")
label_aux1.grid(row=20, column=0)
label_aux2 = Label(main_window, text=" ")
label_aux2.grid(row=22, column=0)
textoAjuda = "\nPara realizar a importação de imagens CBERS para o GEE, é necessário: \n" \
"1 - Preencher os dados: \n" \
" 1.1 - Nome de usuário do Catálogo do INPE, \n" \
" 1.2 - Senha do Catálogo do INPE, \n" \
" 1.3 - Nome de usuário do GEE, \n" \
" 1.4 - Um intervalo entre uma data inicial e uma data final do imagiamento do satélite,\n" \
" 1.5 - Um intervalo com uma orbita inicial e uma orbita final, \n" \
" 1.6 - Um intervalo do ponto inicial e do ponto final.\n" \
"2 - Após o preenchimento dos dado clicar no botão <Baixar e importar imagens para o GEE> e aguardar o término da execução.\n" \
"3 - Após o término da execução, as imagens que você pesquisou estarão na aba <Assets> da sua conta no Google Earth Engine.\n\n" \
"--Equipe de desenvolvimento--\n" \
"<NAME>: cesar.diniz<EMAIL>.br\n" \
"<NAME>: <EMAIL>\n" \
"<NAME>: <EMAIL>\n" \
"<NAME>: <EMAIL>\n" \
"<NAME>: <EMAIL>\n" \
"Visite o nosso site: www.solved.eco.br"
button_2 = Button(main_window, text="Ajuda!",
command=lambda: Ajuda(textoAjuda))
button_2.grid(row=23, column=0, sticky=N)
label_aux3 = Label(main_window, text=" ")
label_aux3.grid(row=24, column=0)
text_box_Progress = Text(main_window, width=40, height=3,
wrap=WORD, background="white")
text_box_Progress.grid(row=25, column=0, columnspan=3)
textoProgresso = "..."
text_box_Progress.insert(INSERT, textoProgresso)
label_aux4 = Label(main_window, text=" ")
label_aux4.grid(row=26, column=0)
button_1 = Button(
main_window, text="Baixar e importar imagens para o GEE", command=start)
button_1.grid(row=21, column=0)
main_window.iconbitmap('favicon.ico')
# --- Exibe ajuda
if configJson["installDependencies"]:
Ajuda()
main_window.mainloop()
``` |
{
"source": "Jhonatanslopes/Ecommerce-Customer-Churn",
"score": 3
} |
#### File: Ecommerce-Customer-Churn/src/api.py
```python
import pickle
import pandas as pd
import os
import sklearn
import numpy as np
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import VotingClassifier
from flask import Flask, request, Response
from classChurn.churn import Churn
model = pickle.load(open('model/model.pkl', 'rb')) # load model saved with pickle
app = Flask(__name__) # initialize API
@app.route('/CustomerChurn/predict', methods=['POST'])
def customer_churn():
test_json = request.get_json()
if test_json: # there is data
if isinstance(test_json, dict): # unique example
test_raw = pd.DataFrame(test_json, index=[0])
else: # multiple exemples
test_raw = pd.DataFrame(test_json, columns=test_json[0].keys())
# instance class
churn = Churn()
# data cleaning
df_cleaning = churn.cleaning(df=test_raw)
print('cleaning OK')
# feature engineering
df_feature = churn.feature_engineering(df=df_cleaning)
print('feature engineering OK')
# data preparation
df_prearation = churn.preparation(df=df_feature)
print('prearation OK')
# feature selection
df_filtered = churn.feature_selection(df=df_prearation)
print('feature selection OK')
# prediction
df_response = churn.get_prediction(model=model, original_data=df_cleaning, test_data=df_filtered)
print('prediction OK')
return df_response
if __name__ == '__main__':
porta = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=porta)
``` |
{
"source": "Jhonatanslopes/Etl-Car-Recommendation",
"score": 3
} |
#### File: Etl-Car-Recommendation/src/report.py
```python
import win32com.client as win32
import sqlalchemy
import pandas as pd
import os
from datetime import datetime
def send_report(conn):
try:
date_query = str(datetime.now().strftime('%Y-%m-%d'))
query = '''
SELECT * FROM tb_cars
WHERE scrapy_date = "{}"
'''.format(date_query)
data = pd.read_sql_query(query, conn)
conn.dispose()
except sqlalchemy.exc.InvalidRequestError:
print('SELECT Error')
# Create file excel
try:
path = 'report/report_icarros.xlsx'
f = open(path)
f.close()
os.remove(path)
data.to_excel('report/report_icarros.xlsx', index=False)
except:
data.to_excel('report/report_icarros.xlsx', index=False)
# Send Email
date = datetime.now().strftime('%d-%m')
outlook = win32.Dispatch('outlook.application')
email = outlook.CreateItem(0)
email.To = '<EMAIL>'
email.Subject = f'Relatório Veículos - Icarros {date}'
email.HTMLBody = '''
<p>Olá,</p>
<p>Segue em anexo, relatório dos veículos coletados do site.</p>
<p> </p>
<p>Atenciosamente,</p>
<p><NAME></p>
'''
# all path the file
file = 'C:/Users/Jhonatans/projects/ETL/Etl-Car-Recommendation/report/report_icarros.xlsx'
email.Attachments.Add(file)
email.Send()
``` |
{
"source": "Jhonatanslopes/Financial-Fraud-Detection",
"score": 3
} |
#### File: Financial-Fraud-Detection/src/api.py
```python
import pickle
import pandas as pd
import os
import sklearn
import numpy as np
from flask import Flask, request, Response
from lightgbm import LGBMClassifier
from class_.FraudDetection import FraudDetection
model = pickle.load(open('model/lgbm.pkl', 'rb')) # loading model
app = Flask(__name__) # initialize API
@app.route('/fraudDetection/predict', methods=['POST'])
def fraudDetection_predict():
test_json = request.get_json()
if test_json: # there is data
if isinstance(test_json, dict): # unique example
test_raw = pd.DataFrame(test_json, index=[0])
else: # multiple example
test_raw = pd.DataFrame(test_json, columns=test_json[0].keys())
# instantiate class
detection = FraudDetection()
# data cleaning
df1 = detection.cleaning(df=test_raw)
print('cleaning OK')
# feature engineering
df2 = detection.feature_engineering(df=df1)
print('feature engineering OK')
# data preparation
df3 = detection.preparation(df=df2)
print('data preparation OK')
# feature selection
df4 = detection.feature_selection(df=df3)
print('feature selection OK')
# prediction
df_response = detection.get_prediction(
model=model, original_data=df1, test_data=df4
)
print('prediction OK')
return df_response
else:
return Response('{}', status=200, minetype='application/json')
if __name__ == '__main__':
porta = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=porta)
"""
import json
import requests
# data to json
data = json.dumps(x_test.to_dict(orient='records'))
#url = 'http://127.0.0.1:5000/fraudDetection/predict'
url = 'https://api-fraud.herokuapp.com/fraudDetection/predict' # local host
header = {'content-type': 'application/json'} # set type as json
# request with method POST
response = requests.post(url, data=data, headers=header)
print('Status code: {}'.format(response.status_code))
# json to dataframe
d1 = pd.DataFrame(response.json(), columns=response.json()[0].keys())
d1"""
``` |
{
"source": "jhonatantft/ckl",
"score": 2
} |
#### File: api/user/models.py
```python
from django.db import models
from interest.models import Interest
class User(models.Model):
firstName = models.CharField(max_length=150)
interests = models.ManyToManyField(Interest)
def __str__(self):
return self.firstName
``` |
{
"source": "jhonatantft/digital-image-processing",
"score": 3
} |
#### File: image processing algorithm/operadorSobel/sobel.py
```python
import cv2
import numpy as np
from PIL import Image
import math
def sobel():
path = "einstein.jpg" # Your image path
img = Image.open(path)
width = 544
height = 340
newimg = Image.new("RGB", (544, 340), "white")
for x in range(1, width-1): # ignore the edge pixels for simplicity (1 to width-1)
for y in range(1, height-1): # ignore edge pixels for simplicity (1 to height-1)
# initialise Gx to 0 and Gy to 0 for every pixel
Gx = 0
Gy = 0
# top left pixel
p = img.getpixel((x-1, y-1))
r = p[0]
g = p[1]
b = p[2]
# intensity ranges from 0 to 765 (255 * 3)
intensity = r + g + b
# accumulate the value into Gx, and Gy
Gx += -intensity
Gy += -intensity
# remaining left column
p = img.getpixel((x-1, y))
r = p[0]
g = p[1]
b = p[2]
Gx += -2 * (r + g + b)
p = img.getpixel((x-1, y+1))
r = p[0]
g = p[1]
b = p[2]
Gx += -(r + g + b)
Gy += (r + g + b)
# middle pixels
p = img.getpixel((x, y-1))
r = p[0]
g = p[1]
b = p[2]
Gy += -2 * (r + g + b)
p = img.getpixel((x, y+1))
r = p[0]
g = p[1]
b = p[2]
Gy += 2 * (r + g + b)
# right column
p = img.getpixel((x+1, y-1))
r = p[0]
g = p[1]
b = p[2]
Gx += (r + g + b)
Gy += -(r + g + b)
p = img.getpixel((x+1, y))
r = p[0]
g = p[1]
b = p[2]
Gx += 2 * (r + g + b)
p = img.getpixel((x+1, y+1))
r = p[0]
g = p[1]
b = p[2]
Gx += (r + g + b)
Gy += (r + g + b)
# calculate the length of the gradient (Pythagorean theorem)
length = math.sqrt((Gx * Gx) + (Gy * Gy))
# normalise the length of gradient to the range 0 to 255
length = length / 4328 * 255
length = int(length)
# draw the length in the edge image
#newpixel = img.putpixel((length,length,length))
newimg.putpixel((x, y), (length, length, length))
newimg.save('sobel.jpg')
newimg.show()
return newimg
def robert_full():
roberts_cross_v = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, -1]])
roberts_cross_h = np.array([[0, 0, 0],
[0, 0, 1],
[0, -1, 0]])
def load_image(infilename):
img = Image.open(infilename)
img.load()
# note signed integer
return np.asarray(img, dtype="int32")
def save_image(data, outfilename):
img = Image.fromarray(np.asarray(
np.clip(data, 0, 255), dtype="uint8"), "L")
img.save(outfilename)
def main():
sobel()
if __name__ == '__main__':
main()
```
#### File: digital-image-processing/zoomOut/zoomOut.py
```python
import cv2
import numpy as np
import PIL
from PIL import Image
import utils
def zoom_out(image_name, step):
image = Image.open(image_name)
width = range(0, image.size[0], step)
height = range(0, image.size[1], step)
image_data = []
new_x = 0
new_y = 0
for x in width:
new_y = 0
for y in height:
quadrants = [(x, y), (x + 1, y), (x, y + 1), (x + 1, y + 1)]
int_rgb_total = 0
for quadrant in quadrants:
if x <= image.size[0] and y <= image.size[1]:
pixel = image.getpixel((x,y))
int_rgb_total += utils.getIfromRGB(pixel)
else:
quadrants.remove(quadrant)
new_pixel_int = round(int_rgb_total / len(quadrants))
new_pixel = utils.getRGBfromI(int(new_pixel_int))
image_data.append({
'coordinates': (new_x, new_y),
'pixel': new_pixel
})
new_y = new_y + 1
new_x = new_x + 1
utils.create_new_image('zoom_out.jpg', image_data)
zoom_out('../grama.jpg', 4)
``` |
{
"source": "jhonatantft/multiplanar-reconstruction",
"score": 3
} |
#### File: jhonatantft/multiplanar-reconstruction/main.py
```python
import os, re
import numpy as np
import matplotlib.pyplot as plt
def rename_files(path):
for count, filename in enumerate(os.listdir(path)):
name, number = filename.split('.')
if (bool(re.search('^[-+]?[0-9]+$', number))):
number = str('%03d' % int(number),)
new_filename = name + '.' + number
os.rename(path + filename, path + new_filename)
def buildsSectionByAxis(images, section, axis = None):
newImage = []
if not axis:
return images[section]
if axis == 'y':
for image in images:
newImage.append(image[section])
if axis == 'z':
for image in images:
newLine = []
for line in image:
newLine.append(line[section])
newImage.append(newLine)
return newImage
def retrieveEachImage(root, files, images):
resolution = [512, 512]
for file in files:
image = np.fromfile(os.path.join(root, file), dtype='int16', sep='')
images.append(image.reshape(resolution))
def getImages(path, images):
for (root, directories, files) in os.walk(path):
files.sort()
retrieveEachImage(root, files, images)
return images
def show_images(images):
for i in range(len(images)):
plt.imshow(images[i], cmap='gray')
plt.show()
def main():
# rename_files('/home/jhonatan/Desktop/multiplanar-reconstruction/Arterielle/')
frame = 350
images = getImages('./Arterielle', [])
sectionX = buildsSectionByAxis(images, frame)
sectionY = buildsSectionByAxis(images, frame, 'y')
sectionZ = buildsSectionByAxis(images, frame, 'z')
show_images([sectionX, sectionY, sectionZ])
if __name__ == '__main__':
main()
``` |
{
"source": "jhonatantirado/CheXNet-Keras",
"score": 2
} |
#### File: jhonatantirado/CheXNet-Keras/predict_pb.py
```python
from keras.preprocessing import image
import numpy as np
import os
from configparser import ConfigParser
import tensorflow as tf
import torch
import cxr_dataset as CXR
from torchvision import transforms, utils
from torch.utils.data import Dataset, DataLoader
def load_pb(path_to_pb):
with tf.gfile.GFile(path_to_pb, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
return graph
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(224, 224))
img_tensor = image.img_to_array(img)# (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
return img_tensor
if __name__ == "__main__":
LABEL="Pneumonia"
STARTER_IMAGES=True
PATH_TO_IMAGES = "starter_images/"
POSITIVE_FINDINGS_ONLY=True
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
data_transform = transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
if not POSITIVE_FINDINGS_ONLY:
finding = "any"
else:
finding = LABEL
dataset = CXR.CXRDataset(
path_to_images=PATH_TO_IMAGES,
fold='test',
transform=data_transform,
finding=finding,
starter_images=STARTER_IMAGES)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)
inputs, labels, filename = next(iter(dataloader))
dummy_input = torch.autograd.Variable(inputs.cpu())
pb_model_path='experiments/3/checkpoint2.tf.pb'
graph = tf.Graph()
with graph.as_default():
print("** load model **")
graph = load_pb(pb_model_path)
with tf.Session(graph=graph) as sess:
output_tensor = graph.get_tensor_by_name('Sigmoid:0')
input_tensor = graph.get_tensor_by_name('input:0')
output = sess.run(output_tensor, feed_dict={input_tensor: dummy_input})
print(output)
```
#### File: jhonatantirado/CheXNet-Keras/weights.py
```python
import numpy as np
def get_class_weights(total_counts, class_positive_counts, multiply):
"""
Calculate class_weight used in training
Arguments:
total_counts - int
class_positive_counts - dict of int, ex: {"Effusion": 300, "Infiltration": 500 ...}
multiply - int, positve weighting multiply
use_class_balancing - boolean
Returns:
class_weight - dict of dict, ex: {"Effusion": { 0: 0.01, 1: 0.99 }, ... }
"""
def get_single_class_weight(pos_counts, total_counts):
denominator = (total_counts - pos_counts) * multiply + pos_counts
return {
0: pos_counts / denominator,
1: (denominator - pos_counts) / denominator,
}
class_names = list(class_positive_counts.keys())
label_counts = np.array(list(class_positive_counts.values()))
class_weights = []
for i, class_name in enumerate(class_names):
class_weights.append(get_single_class_weight(label_counts[i], total_counts))
return class_weights
``` |
{
"source": "jhonatasfender/googlesearch",
"score": 3
} |
#### File: googlesearch/googlesearch/googlesearch.py
```python
import math
import tempfile
import urllib
from collections import deque
from threading import Thread
from time import sleep
from urllib.request import urlopen
from urllib.request import urlretrieve
import numpy as np
import requests
from PIL.PpmImagePlugin import PpmImageFile
from bs4 import BeautifulSoup
from numpy import long
from pdf2image import convert_from_path
from pytesseract import image_to_string
class GoogleSearch:
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/ 58.0.3029.81 Safari/537.36"
SEARCH_URL = "https://google.com/search"
RESULT_SELECTOR = "#rso .g .r a:first-child:not(.fl)"
TOTAL_SELECTOR = "#result-stats"
RESULTS_PER_PAGE = 10
DEFAULT_HEADERS = {
'User-Agent': USER_AGENT,
"Accept-Language": "pt-BR,pt;q=0.9,en-US;q=0.8,en;q=0.7",
}
__total = None
@staticmethod
def build_request(url, headers=None):
payload = {}
headers = GoogleSearch.DEFAULT_HEADERS if headers is None else headers
resp = requests.request("GET", url, headers=headers, data=payload)
html = ''
if resp.raw.headers.get('Content-Type') == 'application/pdf':
tf = tempfile.NamedTemporaryFile()
urlretrieve(url, tf.name)
images = np.array(convert_from_path(tf.name), dtype=PpmImageFile.__class__)
extracted_text = np.array([image_to_string(img, lang='por') for img in images])
html = "\n".join(extracted_text)
else:
html = resp.text
resp.close()
return html
def set_total(self, soup):
if self.__total is None:
element_html_total = soup.select(GoogleSearch.TOTAL_SELECTOR)
total_text = element_html_total[0].encode('utf-8')
self.__total = long(''.join(text for text in str(total_text) if text.isdigit()))
def search(self, query, num_results=10, prefetch_pages=True, prefetch_threads=10):
search_results = []
pages = int(math.ceil(num_results / float(GoogleSearch.RESULTS_PER_PAGE)))
fetcher_threads = deque([])
for i in range(pages):
start = i * GoogleSearch.RESULTS_PER_PAGE
resp = GoogleSearch.build_request(GoogleSearch.SEARCH_URL + "?q=" + urllib.request.quote(query) + ("" if start == 0 else ("&start=" + str(start))))
soup = BeautifulSoup(resp, "lxml")
results = GoogleSearch.parse_results(soup.select(GoogleSearch.RESULT_SELECTOR))
self.set_total(soup)
if len(search_results) + len(results) > num_results:
del results[num_results - len(search_results):]
search_results += results
if prefetch_pages:
for result in results:
while True:
running = 0
for thread in fetcher_threads:
if thread.is_alive():
running += 1
if running < prefetch_threads:
break
sleep(1)
fetcher_thread = Thread(target=result.getText)
fetcher_thread.start()
fetcher_threads.append(fetcher_thread)
for thread in fetcher_threads:
thread.join()
return SearchResponse(search_results, self.__total)
@staticmethod
def parse_results(results):
return [SearchResult(result.text, result.get('href')) for result in results if result.get('href') and result.text]
class SearchResponse:
def __init__(self, results, total):
self.results = results
self.total = total
class SearchResult:
def __init__(self, title, url):
self.title = title
self.url = url
self.__text = None
self.__markup = None
def getText(self):
markup = self.getMarkup()
if self.__text is None and markup:
soup = BeautifulSoup(markup, "lxml")
for junk in soup(["script", "style"]):
junk.extract()
self.__text = soup.get_text()
return self.__text
def getMarkup(self):
if self.__markup is None:
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
self.__markup = GoogleSearch.build_request(self.url, headers)
return self.__markup
def __str__(self):
return str(self.__dict__)
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
# search = GoogleSearch()
# i = 1
# query = " ".join(sys.argv[1:])
# if len(query) == 0:
# query = "python"
# count = 10
# print("Fetching first " + str(count) + " results for \"" + query + "\"...")
# response = search.search(query, count)
# print("TOTAL: " + str(response.total) + " RESULTS")
# for result in response.results:
# print("RESULT #" + str(i) + ": " + result.url + "\n\n")
# i += 1
response = GoogleSearch.build_request(
"https://ww2.stj.jus.br/processo/dj/documento?seq_documento=20012703&data_pesquisa=02/10/2018¶metro=42",
{
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'
}
)
print(response)
``` |
{
"source": "JhonatasMenezes/Projetos_Python",
"score": 3
} |
#### File: APIFlask/controllers/book.py
```python
from flask.globals import request
from flask_restplus import Resource, fields
from marshmallow.utils import EXCLUDE
from models.book import BookModel
from schemas.book import BookSchema
from server.instance import server
book_ns = server.book_ns
book_schema = BookSchema()
book_list_schema = BookSchema(many=True)
ITEM_NOT_FOUND = 'Book not found'
item = book_ns.model('Book', {
'title': fields.String(description='book title'),
'pages': fields.Integer(default=0)
})
class Book(Resource):
def get(self, id):
book_data = BookModel.find_by_id(id)
if book_data:
return book_schema.dump(book_data)
return {'message': ITEM_NOT_FOUND}
class BookList(Resource):
def get(self, ):
return book_list_schema.dump(BookModel.find_all()), 200
@book_ns.expect(item)
@book_ns.doc('Create an item')
def post(self, ):
book_json = request.get_json()
book_data = book_schema.load(book_json)
book_data.save_to_db()
return book_schema.dump(book_data), 201
```
#### File: projeto_contratacoes/ferramentas/validaDado.py
```python
from ferramentas.create_db import Vagas
from .utilidades import textoCor
"""
Módulo de funções para validação de alguns dados como Nomes, CPFs e Datas de nascimento.
NOTA: Todas as funções são parecidas e utilizam o mesmo princípio.
Irei comentar detalhadamente apenas a primeira e, em caso de peculiaridades,
farei comentários isolados na respectiva função.
"""
def validaNome(mensagem='Nome: '):
"""
Função que valida nomes de forma a verificar se todos
os caracteres são letras e não outros tipos de dados.
:param mensagem: recebe uma mensagem que aparece no input
:return nome: retorna o nome em forma de str
"""
# loop para permitir nova inserção após um erro
while True:
try:
# checagem dos dados recebidos
nome = str(input(mensagem)).split() # transformo a entrada em uma lista para poder checar nomes compostos
# se a lista estiver vazia gera um erro logo de início
if nome == []:
raise KeyboardInterrupt
else:
# checar se cada item na lista é composto apenas por letras
for i in nome:
if i.isalpha():
pass
else:
raise ValueError
# retransformar a lista em string para o retorno
nome = ' '.join(nome)
# tratamento de erros
except ValueError:
# emitir os avisos de erro na cor vermelha
textoCor('Tipo de dado inválido. Tente novamente!', 31)
except KeyboardInterrupt:
textoCor('Informação obrigatória. Impossível prosseguir!', 31)
except:
textoCor('Erro desconhecido. Tente novamente!',31)
else:
# após passar por todos os filtros é retornado o nome em forma de string
return nome
def validaCPF(mensagem='CPF (somente números): '):
"""
Função que valida CPFs de forma a verificar se todos
os caracteres são numéricos e se não contém outros
tipos de dados.
Também verifica o tamanho do CPF inserido, são sendo possível
validar CPFs maiores ou menores do que 11 números.
:param mensagem: recebe uma mensagem que aparece no input
:return cpf: retorna cpf em formato str
"""
while True:
try:
cpf = str(input(mensagem))
cpf = list(cpf.strip(''))
if cpf == []:
raise KeyboardInterrupt
else:
# checar se cada digito é um número
for i in cpf:
if i.isnumeric():
pass
else:
raise ValueError
if len(cpf) > 11 or len(cpf) < 11:
raise Exception
cpf = ''.join(cpf)
except ValueError:
textoCor('Tipo de dado inválido. Tente novamente!', 31)
except KeyboardInterrupt:
textoCor('Informação obrigatória. Impossível prosseguir!', 31)
except Exception:
textoCor('Tamanho inválido. Verifique o dado digitado!', 31)
except:
textoCor('Erro desconhecido. Tente novamente!')
else:
return cpf
def validaNascimento(mensagem='Data nasc. (DD/MM/AAAA): '):
"""
Função que valida datas de forma a verificar se todos
os caracteres, entre as '/' são numéricos e não outros tipos de dados.
Também verifica se o dia, mês e ano estão dentro dos limites válidos.
:param mensagem: recebe uma mensagem que aparece no input
:return data: retorna data em formato str
"""
# variável que facilita a mudança do ano atual
anoAtual = 2021
while True:
try:
data = str(input(mensagem))
data = list(data.split('/'))
if data == []:
raise KeyboardInterrupt
else:
for i in data:
if i.isnumeric():
pass
else:
raise ValueError
# utilizo dos índices para verificar cada dado
if int(data[0]) > 31:
raise Exception('Dia')
if int(data[1]) > 12:
raise Exception('Mês')
if int(data[2]) > anoAtual:
raise Exception('Ano')
data = '/'.join(data)
except ValueError:
textoCor('Tipo de dado inválido. Tente novamente!', 31)
except KeyboardInterrupt:
textoCor('Informação obrigatória. Impossível prosseguir!', 31)
except Exception:
textoCor('Conteúdo(s) - DIA, MÊS ou ANO - Inválido(s)! Verifique os dados digitados!', 31)
except:
textoCor('Erro desconhecido. Tente novamente!', 31)
else:
return data
def validaVaga(mensagem='Vaga: ',inserir=False,vagaNome=str):
"""
Função que valida vagas de forma a verificar se a vaga
existe na base de dados, sendo impossível adicionar um
candidato relacionado a uma vaga inexistente.
Se usada no momento de inserir uma nova vaga, retorna True
para uma vaga existente e False para não existência.
:param mensagem: recebe uma mensagem que aparece no input
:return vaga: retorna vaga em formato str
"""
while True:
vaga = ''
try:
if inserir == False:
vaga = str(input(mensagem))
validar = Vagas.select()
for row in validar:
if vaga == row.vaga or int(vaga) == row.id:
existe = True
return vaga
else:
existe = False
if existe:
pass
else:
raise Exception
else:
validar = Vagas.select()
for row in validar:
if vagaNome == row.vaga:
return True
else:
return False
except ValueError:
textoCor('Tipo de dado inválido. Tente novamente!', 31)
except KeyboardInterrupt:
textoCor('Informação obrigatória. Impossível prosseguir!', 31)
except Exception:
textoCor('Vaga não encontrada!', 31)
except:
textoCor('Erro desconhecido. Tente novamente!')
else:
return vaga
``` |
{
"source": "jhonathanmpg/clase-22",
"score": 4
} |
#### File: clase-22/src/main.py
```python
def main():
"""
main() -> None
"""
myVariable = complex()
print(myVariable)
return None
# Esto sera una constante
complex_zero = {0,0}
def complex(real=0.0, imag=0.0):
"""Form a complex number.
Keyword arguments:
real -- the real part (default 0.0)
imag -- the imaginary part (default 0.0)
"""
if imag == 0.0 and real == 0.0:
return complex_zero
if __name__ == "__main__":
main()
``` |
{
"source": "jhonatheberson/artificial-intelligence",
"score": 3
} |
#### File: gromacs/tools/pymol_rotate.py
```python
import argparse
from pathlib import Path
import numpy as np
import pymol
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('min', type=float,
help="Start point for the torsion")
parser.add_argument('max', type=float,
help="End point for the torsion")
parser.add_argument('nsteps', type=int,
help="Number of torsion steps")
parser.add_argument('-a', '--atom_idx', nargs=4, type=int, required=True,
help="Atom indices for torsion. Example: 6 1 7 8")
parser.add_argument('-f', '--file', required=True,
help="PDB input file")
parser.add_argument('-n', '--name', required=True,
help="PDB file basename")
parser.add_argument('-d', '--destdir',
help="Destination directory")
return parser.parse_args()
def main():
args = parse_args()
angles = np.around(np.linspace(args.min, args.max, args.nsteps), 2)
if args.destdir is None:
destdir = Path.cwd()
else:
destdir = Path(args.destdir)
destdir.mkdir(parents=True, exist_ok=True)
pymol.pymol_argv = ['pymol','-qc']
pymol.finish_launching()
cmd = pymol.cmd
cmd.load(args.file)
cmd.set("retain_order", '1')
for angle in angles:
outfile = destdir.joinpath(f'{args.name}_{angle:07.2f}.pdb')
cmd.set_dihedral(f'id {args.atom_idx[0]}',
f'id {args.atom_idx[1]}',
f'id {args.atom_idx[2]}',
f'id {args.atom_idx[3]}',
angle)
cmd.save(str(outfile))
if __name__ == '__main__':
main()
```
#### File: examples/run/run.py
```python
import argparse
import pickle
import os.path
import logging
from confparse import read_conf
import numpy as np
from fffit import pso
def rosenbrock(x):
total = 0
for i in range(x.size - 1):
total += 100 * ((x[i] ** 2 - x[i + 1]) ** 2) + (1 - x[i]) ** 2
return total
# END
def createPso(fitness_tests, num_particles, maxiter, initial, bounds):
P = pso.PSO(fitness_tests, maxiter=maxiter)
P.populate(num_particles, initial, bounds)
return P
def write(PSO, fileName):
with open(fileName, 'wb') as p:
pickle.dump(PSO, p)
def read(fileName):
with open(fileName, 'rb') as p:
T = pickle.load(p)
return T
def swarmUpdate(PSO, bounds):
PSO.swarmUpdate(bounds)
def updateFitness(PSO, function):
return PSO.calculateFitness(function)
def initialize(fitness_tests, num_particles, maxiter, initial, bounds,
writeFile):
P = createPso(fitness_tests, num_particles, maxiter, initial, bounds)
write(P, writeFile)
def prepareJobs():
# TODO
pass
def fitness(readFile, writeFile):
if (os.path.exists(readFile)):
T = read(readFile)
fitness = updateFitness(T, rosenbrock)
print(fitness)
write(T, writeFile)
else:
logging.debug("file does not exist with this name.")
def step(bounds, readFile, writeFile):
if (os.path.exists(readFile)):
T = read(readFile)
swarmUpdate(T, bounds)
print([p.position for p in T.swarm])
write(T, writeFile)
else:
logging.debug("file does not exist with this name.")
def parse_args():
parser = argparse.ArgumentParser()
runmode = parser.add_mutually_exclusive_group()
parser.add_argument('-c', '--conffile',
default='conf.ini',
help='Read configuration file',
required=True)
runmode.add_argument('-i', '--init',
help='Create PSO object',
action='store_true')
runmode.add_argument('-p', '--prepare',
help='Prepare and submit fitness jobs.',
action='store_true')
runmode.add_argument('-f', '--fitness',
help='calculates the fitnnes',
action='store_true')
runmode.add_argument('-s', '--steps',
help='calculates the step, updating' +
' speeds and positions',
action='store_true')
return parser.parse_args()
def main():
args = parse_args()
# PSO_setup, test_setup = read_conf(args.conffile)
PSO_setup = read_conf(args.conffile)
if args.init:
initialize(PSO_setup['fitness_tests'],
PSO_setup['num_particles'],
PSO_setup['maxiter'],
PSO_setup['initial'],
PSO_setup['bounds'],
PSO_setup['WriteFile'])
elif args.prepare:
prepareJobs()
elif args.fitness:
fitness(PSO_setup['WriteFile'], PSO_setup['ReadFile'])
elif args.steps:
step(PSO_setup['bounds'],
PSO_setup['WriteFile'],
PSO_setup['ReadFile'])
if __name__ == '__main__':
main()
```
#### File: fffit/helpers/slurm.py
```python
from collections import namedtuple
import subprocess
def submit(job_script, *, job_name="job_by_dispatcher", time="24:00:00",
partition="cluster", nodes=1, cpus_per_task=None, array=None,
afterany=None, shell='#!/bin/bash', **sbatch_opts):
"""Call sbatch with given options and job_script as a script piped to it.
Parameters
----------
job_script : str,
Script to be submitted. Should be the contents of the `job.sh` file if
the user submitted by calling `sbatch job.sh`.
job_name : str, optional (default="job_by_dispatcher")
Name of the job, as in sbatch --job-name option.
time : str, optional (default="24:00:00")
Time limit, as in sbatch `--time` option.
partition : str, optional (default="cluster")
Partition (queue) name, as expected in sbatch `--partition` option.
nodes : str or int, optional (default="1")
Number of nodes specified as in sbatch `--nodes`.
cpus_per_task : int, optional (default=None)
Number of cpus per task specified as in sbatch `--cpus-per-task`.
array : str, optional (default=None)
Array settings expressed as a sequence or range on indexes, as per
sbatch `--array` option.
afterany : str or list, optional (default=None)
List of dependencies for this job, as specified in sbatch
`--dependency=afterany`.
**sbatch_opts : dict (as list of keyword arguments)
Extra options passed to sbatch, such that key=val is added to the
arguments as `--key=val`.
Returns
-------
returncode, int
Return code givem by sbatch. 0 = success.
jobid, int or None
Job ID as interpreted from the standard output.
stdout, str
sbatch's stdandard output.
stderr, str
sbatch's standard error. If returncode == 0, this should be empty.
"""
args = ['sbatch',
'--job-name=' + job_name,
'--time=' + time,
'--partition=' + partition,
'--nodes=' + str(nodes)]
if cpus_per_task is not None:
args.append('--cpus-per-task=' + str(cpus_per_task))
if array is not None:
if isinstance(array, str):
arraystr = array
elif isinstance(array, list):
arraystr = ','.join(map(str, array))
# TODO: detect range formats?
# For example: if array == list(range())
else:
raise TypeError(f"Invalid format for array: {array}")
args.append('--array=' + arraystr)
if afterany is not None:
# TODO: implement multiple dependencies.
dependencystr = 'afterany:'
if isinstance(afterany, str):
dependencystr += afterany
elif isinstance(afterany, list):
dependencystr += ':'.join(map(str, afterany))
else:
raise TypeError(f"Invalid format for afterany: {afterany}")
args.append('--dependency=' + dependencystr)
for option, value in sbatch_opts.items():
args.append('--' + option + '=' + str(value))
if shell is not None:
if not job_script.startswith('#!'):
job_script = '\n'.join((shell, job_script))
# TODO: Print args if loglevel=DEBUG
print(' '.join(args))
sbatch = subprocess.run(args, input=job_script,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
if len(sbatch.stdout) > 0:
jobid = int(sbatch.stdout.split()[-1])
else:
jobid = None
result = namedtuple("result", ["returncode", "jobid", "stdout", "stderr"])
# https://docs.quantifiedcode.com/python-anti-patterns/readability/
return result(sbatch.returncode, jobid, sbatch.stdout, sbatch.stderr)
def submit_script(jobfile, *args, **kwargs):
"""Call the submit function with the contents of `jobfile'.
Parameters
----------
jobfile : str, Path
File containing the job script to be submitted.
*args, **kwargs: optional
Arguments to submit().
Returns
-------
returncode, int
Return code givem by sbatch. 0 = success.
jobid, int or None
Job ID as interpreted from the standard output.
stdout, str
sbatch's stdandard output.
stderr, str
sbatch's standard error. If returncode == 0, this should be empty.
"""
with open(jobfile, 'r') as f:
job_script = f.read()
return submit(job_script, *args, **kwargs)
```
#### File: src/fffit/pso.py
```python
import multiprocessing as mp
import sys
import numpy as np
# TODO: inherit logger
#logging.basicConfig(filename='output.log', level=logging.DEBUG,
# format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
class Particle(object):
"""Creates the particle and updates position and velocity."""
def __init__(self, x0, bounds, w=0.5, c=(2,2), sigma=None, vsigma=None):
"""Initialize the particle.
Args:
:param x0(str): Initial value for the sample space to create the
Gaussian.
:param bounds(:obj:`list` of :obj:`str`): Limits for the sample
space to create the Gaussian.
"""
self.pos_best = [] # Best position individual.
self.fitness_best = None # Error best individual.
self.curr_fitness = None
self.w = w
self.c = c
bounds = np.array(bounds)
if sigma is None:
sigma = np.abs(bounds[1] - bounds[0])
elif isinstance(sigma, float) or isinstance(sigma, int):
sigma = np.abs(bounds[1] - bounds[0])/sigma
self.position = np.random.normal(x0, sigma)
if vsigma is None:
vsigma = np.abs(bounds[1] - bounds[0])
elif isinstance(vsigma, float) or isinstance(vsigma, int):
vsigma = np.abs(bounds[1] - bounds[0])/vsigma
self.velocity = np.random.normal(np.zeros(len(x0)), vsigma)
def check_fitness(self):
"""Update personal best fitness."""
# Check to see if the current position is an individual best:
if self.fitness_best is None or self.curr_fitness < self.fitness_best:
self.pos_best = self.position
self.fitness_best = self.curr_fitness
def update_velocity(self, pos_best_g):
"""Update new particle velocity.
Args:
:param pos_best_g(str): best overall swarm position.
Returns:
:return: Void.
"""
# TODO Make these adjustable parameters
r1 = np.random.random(len(self.velocity))
r2 = np.random.random(len(self.velocity))
vel_cognitive = self.c[0] * r1 * (self.pos_best - self.position)
vel_social = self.c[1] * r2 * (pos_best_g - self.position)
self.velocity = self.w * self.velocity + vel_cognitive + vel_social
def update_position(self, bounds):
"""Update the particle position based off new velocity updates.
Args:
:param bounds(:obj:`list` of :obj:`str`): Limits for the sample
space to create the Gaussian.
Returns:
:return: Void.
"""
self.position += self.velocity
# TODO Deal with velocities when particle goes out of bounds
np.clip(self.position, bounds[0], bounds[1], out=self.position)
np.clip(self.velocity, bounds[0], bounds[1], out=self.velocity)
self.velocity[np.isclose(self.position, bounds[0])] *= -1
self.velocity[np.isclose(self.position, bounds[1])] *= -1
class PSO(object):
"""Contains the population and methods for performing steps."""
def __getstate__(self):
"""Remove unpickable entries from object.
Currently, removes fitness tests as callable functions.
"""
state = self.__dict__.copy()
del state['tests']
if 'hooks' in state:
del state['hooks']
return state
def __setstate__(self, state):
"""Recover unpickable items to restore original object.
Currently, calls self.load_tests in order to get callable fitness
tests and self.load_hooks to get pre_ and _post step hooks.
"""
self.__dict__.update(state)
if 'testfiles' in self.__dict__:
# TODO: log
self.load_tests()
if 'hookfiles' in self.__dict__:
# TODO: log
self.load_hooks(self.hookfiles)
def __init__(self, maxiter=None, goal=1.0, w=0.5, c = (2,2), submit_to_cluster=False):
"""Initialize the PSO object."""
self.ncpu = 1
self.goal = goal
self.w = w
self.c = c
self.submit_to_cluster = submit_to_cluster
self.fitness = None
self.step_number = 0
self.maxiter = maxiter
self.swarm = None
if self.submit_to_cluster:
# TODO: correctly handle the cluster and multitest cases.
raise NotImplementedError('Cluster submission in review.')
def populate(self, num_particles, x0=None, bounds=None, sigma=None,
vsigma=None):
"""Create the population of particles that is the swarm.
Args:
:param num_particles(:obj:`int`): Number of particles to be
created.
:param initial(): Initial value for the sample space to create the
Gaussian.
:param bounds(:obj:`list` of :obj:`str`): Limits for the sample
space to create the Gaussian.
Returns:
:return swarm(:obj:`list` of :obj:`Particles`): a list of swarms.
"""
if self.swarm is None:
self.bounds = bounds
self.swarm = [Particle(x0, bounds, w=self.w, c=self.c,
sigma=sigma, vsigma=vsigma) for i in range(num_particles)]
else:
raise RuntimeError("Tried to populate non-empty swarm")
def evaluate_single_fitness_test(self, func,
enum_particles=False, add_step_num=False,
**kwargs):
"""Run the given function as the fitness test for all particles.
Parameters:
-----------
fun : callable
The fitness test function to be minimized:
``func(particle.position, **kwargs) -> float``.
enum_particles : boolean
If `True`, the swarm will be enumerated and the particle index will
be passed to `func` as keyword `part_idx`, added to `kwargs`
add_step_num : boolean
If `True`, the current step number will be passed to `func`
as keyword `step_num`, added to `kwargs`
**kwargs: Other keywords to the fitness function, will be passed as is.
"""
if add_step_num:
kwargs['step_num'] = self.step_number
if self.ncpu == 1:
if enum_particles:
for part_idx, particle in enumerate(self.swarm):
kwargs['part_idx'] = part_idx
particle.curr_fitness = func(particle.position, **kwargs)
else:
for particle in self.swarm:
particle.curr_fitness = func(particle.position, **kwargs)
elif self.ncpu > 1:
with mp.Pool(processes=self.ncpu) as pool:
argslist = []
p = []
for part_idx, particle in enumerate(self.swarm):
argslist.append(dict(kwargs))
# argslist[-1]['x'] = particle.position
if enum_particles:
argslist[-1]['part_idx'] = part_idx
for idx, args in enumerate(argslist):
p.append(pool.apply_async(func, args=(self.swarm[idx].position,),kwds=args))
results = [ r.get() for r in p ]
for part_idx, particle in enumerate(self.swarm):
particle.curr_fitness = results[part_idx]
def calculate_global_fitness(self):
"""Calculate the fitness of the function or sample space.
Returns:
:return fitness(:obj:`float`): Returns the fitness of the function
or sample space.
"""
self.swarm_radius = 0
for particle in self.swarm:
particle.check_fitness()
# determine if current particle is the best(globally)
if self.fitness is None or particle.curr_fitness < self.fitness:
self.pos_best_glob = np.array(particle.position)
self.fitness = float(particle.curr_fitness)
# Stop criteria
for particle in self.swarm:
dist = np.linalg.norm(particle.position - self.pos_best_glob)
if dist > self.swarm_radius:
self.swarm_radius = dist
return self.fitness # Do we actually need to return something?
def update_swarm(self):
"""Update the swarm with new positions and speeds.
Returns:
:return swarm(:obj:`list` of :obj:`Particles`): returns a list of
swarms.
"""
if self.fitness is None:
logging.error("Cannot update the swarm before calculating Fitness")
raise RuntimeError("Updated the swarm before calculating Fitness")
# cycle through swarm and update velocities and position
for particle in self.swarm:
particle.update_velocity(self.pos_best_glob)
particle.update_position(self.bounds)
if self.submit_to_cluster:
self.curr_iter['update'] += 1
def do_full_step(self, func, **kwargs):
"""Perform a full PSO step.
This method goes through all other methods in order to perform a full
PSO step, so it can be called from a loop in the run() method.
"""
if self.fitness is not None and self.step_number < self.maxiter:
self.update_swarm()
if self.submit_to_cluster:
raise NotImplementedError('Multistep jobs are under revision.')
else:
self.evaluate_single_fitness_test(func, **kwargs)
self.calculate_global_fitness()
self.step_number += 1
def run(self, func, PSO_DEBUG=None, **kwargs):
"""Perform a full optimization run.
Does the optimization with the execution of the update of the speeds
and coordinates also checks the criterion stopped to find fitnnes.
Parameters
----------
func : callable
Function that calculates fitnnes.
Returns
-------
The dictionary that stores the optimization results.
"""
self.swarm_radius = None
# TODO make a better radius-based stop criterion.
while (self.swarm_radius is None or
self.step_number < self.maxiter and
self.swarm_radius > 1e-3):
self.do_full_step(func, **kwargs)
if PSO_DEBUG is not None:
with open(PSO_DEBUG, 'a') as dbg_file:
curr_best = min([p.curr_fitness for p in self.swarm])
print(f"# {self.step_number} {curr_best} {self.fitness}")
print(f"\n\n# {self.step_number} {curr_best} {self.fitness}",
file=dbg_file)
np.savetxt(dbg_file,
[(*p.position, p.curr_fitness)
for p in self.swarm])
if self.fitness < self.goal:
break
self.results = {}
self.results['best_pos'] = self.pos_best_glob
self.results['fitness'] = self.fitness
return self.results
``` |
{
"source": "jhonatheberson/autonomous-robotic-systems",
"score": 3
} |
#### File: projeto_1/projeto_1_b/script.py
```python
import math
import numpy as np
point_init = (-0.0000,-0.0000, 0.0000)
point_end = (+1.5250e+00, -1.4750e+00, +5.0000e-02)
#point_end = (1.5250e+00, 1.8000e+00, 0.0000)
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import time
import ctypes
def send_path_4_drawing(path, sleep_time = 0.07):
#the bigger the sleep time the more accurate the points are placed but you have to be very patient :D
for i in path:
#point2send = transform_points_from_image2real(i)
#print(point2send)
#print(type(point2send))
packedData=sim.simxPackFloats(i.flatten())
#print(packedData)
#print(type(packedData))
raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData)
#print(raw_bytes)
#print(type(raw_bytes))
returnCode=sim.simxWriteStringStream(clientID, "path_coord", raw_bytes, sim.simx_opmode_oneshot)
time.sleep(sleep_time)
def polinomio(point_init, point_end):
sigma = 1
deltaX = point_end[0] - point_init[0]
deltaY = point_end[1] - point_init[1]
alfa_init = math.tan(point_init[2])
alfa_end = math.tan(point_end[2])
if point_init[2] >= ((math.pi/2) - sigma) and point_init[2] <= ((math.pi/2) + sigma) and point_end[2] >= ((math.pi/2) - sigma) and point_end[2] >= ((math.pi/2) + sigma):
print('i')
b1 = deltaY
b2 = 0
a0 = point_init[0]
a1 = 0
a2 = 3*deltaX
a3 = -2*deltaX
b0 = point_init[1]
b3 = deltaY-b1-b2
elif point_init[2] >= ((math.pi/2) - sigma) and point_init[2] <= ((math.pi/2) + sigma):
print('ii')
a3 = -(deltaX/2)
b3 = 1
a0 = point_init[0]
a1 = 0
a2 = deltaX-a3
b0 = point_init[1]
b1 = 2*(deltaY-alfa_end*deltaX) - alfa_end*a3 + b3
b3 = (2*alfa_end*deltaX-deltaY) + alfa_end*a3 - 2*b3
elif point_end[2] >= ((math.pi/2) - sigma) and point_init[2] <= ((math.pi/2) + sigma):
print('iii')
a1 = 3*(deltaX/2)
b2 = 1
a0 = point_init[0]
a2 = 3*deltaX - 2*a1
a3 = a1 - 2*deltaX
b0 = point_init[1]
b1 = alfa_init*a1
b3 = deltaY - alfa_init*a1 - b2
else:
print('iv')
a1 = deltaX
a2 = 0
a0 = point_init[0]
a3 = deltaX - a1 - a2
b0 = point_init[1]
b1 = alfa_init*a1
b2 = 3*(deltaY-alfa_end*deltaX) + 2*(alfa_end-alfa_init)*a1 + alfa_end*a2
b3 = 3*alfa_end*deltaX - 2*deltaY - (2*alfa_end-alfa_init)*a1 - alfa_end*a2
result = []
orien = []
x = np.arange(0,1,0.01)
for i in range(len(x)):
fx = a0 + a1*x[i] + a2*x[i]**2 + a3*x[i]**3
fy = b0 + b1*x[i] + b2*x[i]**2 + b3*x[i]**3
if fx != 0:
orientation = np.arctan(float(fy/fx))
else:
orientation = 0
position = np.array((fx, fy, 0))
result.append(position)
return (result, orientation)
def coeficientes (origem,destino):
sigma=1
deltaX = origem[0] - destino[0]
deltaY = origem[1] - destino[1]
alfa_init = math.tan(point_init[2])
alfa_end = math.tan(point_end[2])
if point_init[2] >= ((math.pi/2) - sigma) and point_init[2] <= ((math.pi/2) + sigma) and point_end[2] >= ((math.pi/2) - sigma) and point_end[2] >= ((math.pi/2) + sigma):
print('i')
b1 = deltaY
b2 = 0
a0 = point_init[0]
a1 = 0
a2 = 3*deltaX
a3 = -2*deltaX
b0 = point_init[1]
b3 = deltaY-b1-b2
elif point_init[2] >= ((math.pi/2) - sigma) and point_init[2] <= ((math.pi/2) + sigma):
print('ii')
a3 = -(deltaX/2)
b3 = 1
a0 = point_init[0]
a1 = 0
a2 = deltaX-a3
b0 = point_init[1]
b1 = 2*(deltaY-alfa_end*deltaX) - alfa_end*a3 + b3
b3 = (2*alfa_end*deltaX-deltaY) + alfa_end*a3 - 2*b3
elif point_end[2] >= ((math.pi/2) - sigma) and point_init[2] <= ((math.pi/2) + sigma):
print('iii')
a1 = 3*(deltaX/2)
b2 = 1
a0 = point_init[0]
a2 = 3*deltaX - 2*a1
a3 = a1 - 2*deltaX
b0 = point_init[1]
b1 = alfa_init*a1
b3 = deltaY - alfa_init*a1 - b2
else:
print('iv')
a1 = deltaX
a2 = 0
a0 = point_init[0]
a3 = deltaX - a1 - a2
b0 = point_init[1]
b1 = alfa_init*a1
b2 = 3*(deltaY-alfa_end*deltaX) + 2*(alfa_end-alfa_init)*a1 + alfa_end*a2
b3 = 3*alfa_end*deltaX - 2*deltaY - (2*alfa_end-alfa_init)*a1 - alfa_end*a2
a = [a0,a1,a2,a3]
b = [b0,b1,b2,b3]
x = np.arange(0,1,0.01)
for i in range(len(x)):
fx = a0 + a1*x[i], a2*x[i]**2 + a3*x[i]**3
fy = b0 + b1*x[i] + b2*x[i]**2 + b3*x[i]**3
#if fx != 0:
#orientation = np.arctan(float(fy/fx))
#else:
# orientation = 0
return (a,b)
print ('Program started')
sim.simxFinish(-1) # just in case, close all opened connections
clientID=sim.simxStart('127.0.0.1',19999,True,True,5000,5) # Connect to CoppeliaSim
if clientID!=-1:
print ('Connected to remote API server')
# Now try to retrieve data in a blocking fashion (i.e. a service call):
res,objs=sim.simxGetObjects(clientID,sim.sim_handle_all,sim.simx_opmode_blocking)
if res==sim.simx_return_ok:
print ('Number of objects in the scene: ',len(objs))
else:
print ('Remote API function call returned with error code: ',res)
time.sleep(2)
#############################
#teste controle estabilizante
caminho, orientation = polinomio(point_init,point_end)
send_path_4_drawing(caminho, 0.05)
#v, w = controlador_posicao(posicao_robo, orientacao_robo)
#teste controle estabilizante
#############################
#Fazendo os handles
err_code,motor_direito = sim.simxGetObjectHandle(clientID,"Motor_Direito", sim.simx_opmode_blocking)
err_code,motor_esquerdo = sim.simxGetObjectHandle(clientID,"Motor_Esquerdo", sim.simx_opmode_blocking)
err_code,carro = sim.simxGetObjectHandle(clientID,"Carro", sim.simx_opmode_blocking)
#Zerando as velocidades das rodas
err_code = sim.simxSetJointTargetVelocity(clientID,motor_direito,0,sim.simx_opmode_streaming)
err_code = sim.simxSetJointTargetVelocity(clientID,motor_esquerdo,0,sim.simx_opmode_streaming)
#Posicao e orientacao do robo
err_code,posicao_robo = sim.simxGetObjectPosition(clientID,carro,-1, sim.simx_opmode_blocking)
er_code,orientacao_robo = sim.simxGetObjectOrientation(clientID,carro,-1,sim.simx_opmode_streaming)
#Ganhos do controlador
k_theta = 2.0
k_l = 0.1
#parametros do robo
i = 1
v = 0.5
d = 0.21 #distancia do eixo entre as rodas
rd = 0.0625 #raio roda direita
re = 0.0625 #raio roda esquerda
caminho,orientacao = polinomio(point_init, point_end)
lamb = 0
while True:
#Posicao e orientacao do robo
err_code,posicao_robo = sim.simxGetObjectPosition(clientID,carro,-1, sim.simx_opmode_blocking)
er_code,orientacao_robo = sim.simxGetObjectOrientation(clientID,carro,-1,sim.simx_opmode_streaming)
theta_robo = orientacao_robo[2] + math.pi/2
#raio de giro
a,b = coeficientes(point_init,point_end)
dx = a[1] +2*a[2]*lamb + 3*a[3]*(lamb**2)
dy = b[1] + 2*b[2]*lamb + 3*b[3]*(lamb**2)
d2x = 2*a[2] + 6*a[3]*lamb
d2y = 2*b[2] + 6*b[3]*lamb
r = ((((dx**2)+(dy**2))**1.5)/((d2y*dx)-(d2x*dy)))
k = (1/r)
#delta theta
theta_SF = math.atan((b[1] + 2*b[2]*lamb + 3*b[3]*lamb**2)/(a[1] + 2*a[2]*lamb + 3*a[3]*lamb**2))
Delta_theta = theta_robo - theta_SF
#garantir o sinal correto de delta L
theta_posicao_robo = math.atan2(posicao_robo[1],posicao_robo[0])
delta_L = np.linalg.norm(posicao_robo-caminho[0])
ponto_curva = caminho[0]
for i in range(len(caminho)-1):
distance = np.linalg.norm(posicao_robo-caminho[i+1])
if(delta_L > distance):
delta_L = distance
ponto_curva = caminho[i+1]
theta_ref = math.atan2(ponto_curva[1],ponto_curva[0])
if (theta_ref > theta_posicao_robo):
delta_L = -delta_L
i = i +1
u = -(k_theta*Delta_theta + (k_l*delta_L*v*math.sin(Delta_theta)/Delta_theta))
w = u + ((k*v*math.cos(Delta_theta))/(1-(k*delta_L)))
wd = (v/rd) + (d/(2*rd))*w
we = (v/re) - (d/(2*re))*w
err_code = sim.simxSetJointTargetVelocity(clientID,motor_direito,wd,sim.simx_opmode_streaming)
err_code = sim.simxSetJointTargetVelocity(clientID,motor_esquerdo,we,sim.simx_opmode_streaming)
print('lamb :', lamb)
if (lamb >= 1):
err_code = sim.simxSetJointTargetVelocity(clientID,motor_direito,0.0,sim.simx_opmode_streaming)
err_code = sim.simxSetJointTargetVelocity(clientID,motor_esquerdo,0.0,sim.simx_opmode_streaming)
break
lamb = lamb + 0.01
err_code = sim.simxSetJointTargetVelocity(clientID,motor_direito,0.0,sim.simx_opmode_streaming)
err_code = sim.simxSetJointTargetVelocity(clientID,motor_esquerdo,0.0,sim.simx_opmode_streaming)
print ('Finalizado seguidor de caminho')
# Now close the connection to CoppeliaSim:
sim.simxFinish(clientID)
else:
print ('Failed connecting to remote API server')
print ('Program ended')
``` |
{
"source": "jhonatheberson/digital-image-processing",
"score": 3
} |
#### File: second_unit/python/canny.py
```python
import numpy as np
import cv2 as cv
def bordar_random(img, fundo, fator=20):
retorno = fundo.copy()
for i in range(6, 0, -1):
pontos = cv.Canny(img, i*fator, i*fator*3)
pontos = np.where(pontos != 0)
cordenadas = zip(pontos[0], pontos[1])
for p in cordenadas:
cor = img[p]
retorno = cv.circle(retorno, (p[1], p[0]), i, (int(cor[0]), int(cor[1]), int(cor[2])), -1, lineType=cv.LINE_AA)
return retorno
```
#### File: second_unit/python/homomorphicFilter.py
```python
import numpy as np
import cv2 as cv
def visualizador(complexo_img):
magnitude = np.log(np.abs(complexo_img) + 10**-10)
magnitude = magnitude / np.max(magnitude)
fase = (np.angle(complexo_img) + np.pi) / (np.pi * 2)
return magnitude, fase
def dft_np(img, vis=False, shift=False):
complexo = np.fft.fft2(img)
if shift:
complexo_img = np.fft.fftshift(complexo)
else:
complexo_img = complexo.copy()
if vis:
magnitude, fase = visualizador(complexo_img)
cv.imshow('Magnitude', magnitude)
cv.imshow('Fase', fase)
return complexo
def dft_inv_np(complexo):
img_comp = np.fft.ifft2(complexo)
img = np.real(img_comp)
return img/255
def gerar_filtro(img, x_0, x_min, x_max, c):
xx, yy = np.mgrid[:img.shape[0], :img.shape[1]]
circle = np.sqrt((xx - img.shape[0] / 2) ** 2 + (yy - img.shape[1] / 2) ** 2)
if c == 0:
c = 10**-10
return x_min + (np.tanh((circle - x_0)/c) + 1) / 2 * (x_max - x_min)
def normalizacao(x):
min_v = np.min(x)
ran_v = np.max(x) - min_v
return (x - min_v) / ran_v
def filtro_homomorfico(img, x_0, x_min, x_max, c, logs=True):
if logs:
img_return = img + 1.
img_return = np.log(img_return)
else:
img_return = img
img_return = dft_np(img_return)
filtro = gerar_filtro(img_return, x_0, x_min, x_max, c)
img_return = img_return * np.fft.fftshift(filtro)
filtro_return, _ = visualizador(img_return)
filtro_return = np.fft.fftshift(filtro_return)
img_return = dft_inv_np(img_return)
filtro_return[:,:filtro_return.shape[1]//2] = filtro[:,:filtro_return.shape[1]//2]
return normalizacao(np.exp(img_return)), filtro_return
# Obrigatoriedade da funcao! (Desnecessario!)
def faz_nada(*args, **kwargs):
pass
def main():
cv.getBuildInformation()
# cap = cv.VideoCapture('Bridge.mp4')
# cap = cv.VideoCapture('Night_Scene.mp4')
cap = cv.VideoCapture('Highway.mp4')
if not cap.isOpened():
print('Falha ao abrir o video.')
exit(-1)
cv.namedWindow('Filtro')
cv.createTrackbar('log', 'Filtro', 1, 1, faz_nada)
cv.createTrackbar('c', 'Filtro', 10, 100, faz_nada)
cv.createTrackbar('raio', 'Filtro', 20, 1000, faz_nada)
cv.createTrackbar('min', 'Filtro', 0, 100, faz_nada)
cv.createTrackbar('max', 'Filtro', 100, 100, faz_nada)
speed = 5
descarte_frame = 0
while True:
ret, frame = cap.read()
if ret:
if descarte_frame == 0:
logs = cv.getTrackbarPos('log', 'Filtro')
c = cv.getTrackbarPos('c', 'Filtro')
r = cv.getTrackbarPos('raio', 'Filtro')
v_min = cv.getTrackbarPos('min', 'Filtro')
v_max = cv.getTrackbarPos('max', 'Filtro')
v_min = v_min / 100
v_max = v_max / 100
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cv.imshow('Frame', frame)
img, filtro = filtro_homomorfico(frame, r, v_min, v_max, c, logs==1)
cv.imshow('Homomorfico', img)
cv.imshow('Filtro', filtro)
descarte_frame = (descarte_frame + 1) % speed
key = cv.waitKey(15)
if key == 27:
break
else:
# break
cap = cv.VideoCapture('Highway.mp4')
cap.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
# Delete antes de postar:
# import cProfile
# cProfile.run('main()', 'output.dat')
``` |
{
"source": "jhonatheberson/digital-systems",
"score": 3
} |
#### File: digital-systems/board computer/serial_comunication.py
```python
import serial
import logging
import threading
import time
port = "/dev/ttyACM0"
speed = 9600
conected = serial.Serial(port, speed)
opcao = 0
def thread_data(conected):
print("---------------------------DADOS--------------------------")
arduino = conected.readline().decode('utf-8').split('\r\n')
print(arduino[0])
arduino = conected.readline().decode('utf-8').split('\r\n')
print(arduino[0])
arduino = conected.readline().decode('utf-8').split('\r\n')
print(arduino[0])
print("==========================================================")
def thread_time():
while True:
time.sleep(100)
thread_data()
while opcao != "100":
print("==========================================================")
print("--------------------COMPUADOR DE BORDO--------------------")
print("==========================================================")
opcao = input("Digite 0 para desligar farol\n Digite 1 para ligar farol\n"
"Digite 2 para ligar pisca esquerda\n Digite 3 para ligar pisca direita\n"
"Digite 100 para parar\n:")
if opcao == "1":
conected.write(b'1')
elif opcao == "0":
conected.write(b'0')
elif opcao == "2":
conected.write(b'2')
elif opcao == "3":
conected.write(b'3')
threading.Thread(target=thread_time(), args=(1,))
threading.Thread(target=thread_data(conected), args=(1,))
conected.close()
``` |
{
"source": "jhonatheberson/doc-sphinx",
"score": 3
} |
#### File: jhonatheberson/doc-sphinx/ga.py
```python
import numpy as np
import random
import multiprocessing as mp
import sys
class Individual:
"""
Individual(x0=np.zeros(2), ndim=2, bounds=np.array((np.ones(2) * 10 * -1, np.ones(2) * 10)), type_create='uniform')
Cria os individuos que compoem a população da classe Genetic
Atributos
----------
fitness : None or flout
é o melhor valor do individuo
size_individual : int
é o tamanho do indiviuo
create_individual : class 'fffit.ga.Individual'
cria o individuo com suas caracteristicas
Parametros
----------
type_create : str, optional
Define qual será o tipo de criação da população inicial
x0 : np.ndarray, optional
Define qual o ponto inicial até os limites de onde vai criar o valor do indviduo
bounds : numpy.ndarray
Define até onde pode ir os valores dos individuo, como inferio e superior
ndim : integer
Define quantos dimensões tem o indiviuo ou seja o tamanho do array
sigma :float, opcional
Define a probabilidade do individuo ter mutação
Exemplos
--------
>>> from fffit import ga
>>> import numpy as np
>>> ranges = 2
>>> ndim = 2
>>> bounds = np.array((np.ones(2) * 10 * -1, np.ones(2) * 10))
>>> individual = ga.Individual(x0=np.zeros(2), ndim=2, bounds=bounds, sigma=None, type_create='uniform')
<fffit.ga.Individual object at 0x7f8968797c18>
>>> individual.chromosome
array([ 5.7287427 , -0.54066483])
"""
def __init__(self, type_create='uniform', x0=None, bounds=None, ndim=None, sigma=None):
self.fitness = None
self.size_individual = ndim
self.create_individual(type_create, x0, bounds, sigma)
super().__init__()
def create_individual(self, type_create, x0, bounds, sigma):
"""
create_individual(type_create=uniform, x0=np.zeros(2), np.array((np.ones(2) * 10 * -1, np.ones(2) * 10)), sigma=None)
Cria os chromosome que pertence a classe Individual
Parametros
----------
type_create : str, optional
Define qual será o tipo de criação da população inicial
x0 : np.ndarray, optional
Define qual o ponto inicial até os limites de onde vai criar o valor do indviduo
bounds : numpy.ndarray
Define até onde pode ir os valores dos individuo, como inferio e superior
sigma :float, opcional
Define a probabilidade do individuo ter mutação
"""
if type_create == 'gaussian':
if sigma is not None and not (np.shape(sigma) == (self.size_individual,) or isinstance(sigma, float) or isinstance(sigma, int)):
raise ValueError(f'sigma bust be a single float or an array with {self.size_individual} entries.')
self.chromosome = np.random.normal(x0, sigma, size=self.size_individual)
elif type_create == 'integer':
if bounds is None or np.shape(bounds) != (2, self.size_individual):
raise ValueError(f'bounds must be of shape (2, {self.size_individual}). Instead, got {bounds}.')
self.chromosome = np.random.randint(bounds[0], bounds[1], size=self.size_individual)
elif type_create == 'uniform':
if bounds is None or np.shape(bounds) != (2, self.size_individual):
raise ValueError(f'bounds must be of shape (2, {self.size_individual}). Instead, got {bounds}.')
self.chromosome = np.random.uniform(bounds[0], bounds[1], size=self.size_individual)
else:
raise ValueError(f'Invalid individual creation type: {type_create}')
class Genetic(object):
"""
Genetic(maxiter=1000, goal=0, cross_over='one_point',
mutation_probability=0.01, mutation='uniform',
selection_method='elitism',num_parents=2,
num_elitism=10, bounds=np.array((np.ones(2) * 10 * -1, np.ones(2) * 10)))
É a classe que é responsavel por realizar a criar a população,
selecionar os parentes da população, realizar os cruzamentos e mutação.
Parametros
----------
goal : float, opcional
Define o valor a qual queremos nos aproximar
bounds : numpy.ndarray
Define até onde pode ir os valores dos individuo, como inferio e superior
mutation_probability : float
Define a probabilidade de ocorrer a mutação
selection_probability : float
Define a probabilidade de ocorrer a seleção de pais
sigma : float
Define a probabilidade do individuo ter mutação
num_parents : integer
Define o numero de parentes que será esolhido no metodo da seleção
num_elitism : integer
Define o numero de pais que será preservado entre as gerações
maxiter : integer,opcional
Define o numero de interações maximo que o algoritmo irá fazer para encontrar o resultado
selection_method : str, opcional
Define o metodo de seleção que será usado para escolher os pais da proxima geração
cross_over : str, opcional
Define o metodo de cruzamento que será usado
mutation : str, opcional
Define o metodo de mutação que será usado
submit_to_cluster : bool, opcional
Define se a meta-hurística será executada no cluster
Exemplos
--------
>>> from fffit import ga
>>> bounds = np.array((np.ones(2) * 10 * -1, np.ones(2) * 10))
>>> ga.Genetic(maxiter=1000, goal=0, cross_over='one_point',
mutation_probability=0.01, mutation='uniform',
selection_method='elitism',num_parents=2,
num_elitism=10, bounds=bounds)
>>>
"""
def __init__(self, goal=1.0 ,bounds = None,
mutation_probability=0.5, selection_probability=0.5,
sigma=0.5, num_parents=2,num_elitism=2, maxiter=None,
selection_method='elitism', cross_over='uniform',
mutation='gaussian', submit_to_cluster=False):
"""Inicializar o objeto PSO"""
self.num_parents = num_parents
self.num_elitism = num_elitism
self.mutation_probability = mutation_probability
self.selection_probability = selection_probability
self.sigma = sigma # Mutation size
self.population = []
self.model = np.array(np.ones(5))
self.bounds = bounds
self.ncpu = 1
self.step_number = 0
self.maxiter = maxiter
self.submit_to_cluster = submit_to_cluster
self.goal = goal
self.fitness = None
self.selection_method = selection_method
self.x0 = []
self.size_population = None
self.cont_new_population = 0
self.step_evaluation = 0
self.improving = True
if cross_over == 'uniform':
self.cross_over = self.cross_over_uniform
elif cross_over == 'two_points':
self.cross_over = self.cross_over_two_points
elif cross_over == 'one_point':
self.cross_over = self.cross_over_one_point
else:
raise ValueError(f'Invalid crossover: {cross_over}.')
if mutation == 'binary':
self.mutation = self.mutation_binary
elif mutation == 'gaussian':
self.mutation = self.mutation_gaussian
elif mutation == 'uniform':
self.mutation = self.mutation_uniform
else:
raise ValueError(f'Invalid mutation: {mutation}.')
def populate(self, size_population, bounds=None, x0=None, ndim=None, sigma=None, type_create='uniform'):
"""
Retorna uma lista consistindo de vários indivíduos que formam a população.
Return:
:return: void
"""
self.size_population = size_population
if x0 is None and ndim is None:
raise ValueError('Either x0 or ndim bust be given')
elif x0 is not None:
self.x0 = x0
self.ndim = len(x0)
else:
self.ndim = ndim
self.population = [Individual(x0=x0, ndim=self.ndim, bounds=bounds, sigma=sigma, type_create=type_create) for i in range(size_population)]
def calculate_pop_fitness(self, func):
"""
calcula a aptidão da população e retorna em lista da classe genética.
Returns:
:return: void
"""
for individual in self.population:
func(individual)
def calculate_best_fitness(self):
"""Calcula a melhor aptidão entre todas as populações e salva na lista da classe genética.
Returns:
:return: void
"""
self.fitness = max([k.fitness for k in self.population])
return self.fitness
def calculate_avg_fitness(self):
"""Calcula a aptidão de media entre todas as populações e salva na lista da classe genética.
Returns:
:return: void
"""
return sum([k.fitness for k in self.population]) / np.size(self.population)
@staticmethod
def sorted_population(population):
"""
Selecione o gene a ser realizado a mutação.
Args:
:param population:(:obj:`list`): Lists one containing a population.
Returns:
:return: score(:obj:`list`): An element of the population list select.
"""
return sorted(population, key=lambda k: k.fitness)
def roulette_selection(self):
"""
Esta função realiza a seleção por releta do gene a ser realizado a mutação.
Args:
:param population: population:(:obj:`list`): Lists one containing a population.
Returns:
:return: selected(:obj:`list`): An element of the population list select.
"""
population = self.sorted_population(self.population)
parents = []
sum_score = sum([k.fitness for k in population])
if not np.all(np.sign([k.fitness for k in population]) == np.sign(sum_score)):
raise ValueError('Not all fitnesses have the same sign. This is not supported')
if sum_score < 0:
# If sum_score is negative, we assume all fitnesses are negative as
# well (we're actually trying to minimize something, so we need to make
# sure that only positive values are sent as roulette wheel fractions.
for individual in population:
individual.fitness = individual.fitness - sum_score
for _ in range(self.num_parents):
sum_score = sum([k.fitness for k in population])
selection_criteria = 0
key = np.random.uniform(0, sum_score)
for idx in range(len(population)):
selection_criteria += population[idx].fitness
if selection_criteria > key:
parents.append(population.pop(idx))
break
return parents
def random_selection(self):
"""
Esta função realiza uma seleção do torneio e retorno gene vencedor.
Args:
:param population:(:obj:`list`): Lists one containing a population.
Returns:
:return: sub_population(:obj:`list`): An element of the sub_population list select.
"""
parents = []
for k in range(self.num_parents):
sub_population = random.sample(self.population, 2)
sub_population = self.sorted_population(sub_population)
parents.append(sub_population[0])
return parents
def elitism_selection(self):
"""
Função que realiza seleção elitista.
Args:
:param population:(:obj:`list`): Lists one containing a population.
Returns:
:return:population(:obj:`list`): An element of the population list select.
"""
population = self.sorted_population(self.population)
parents = population[-self.num_parents:]
return parents
def selection(self):
"""[summary]
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if self.selection_method == 'roulette':
parents = self.roulette_selection()
return parents
elif self.selection_method == 'random':
parents = self.random_selection()
return parents
elif self.selection_method == 'elitism':
parents = self.elitism_selection()
return parents
else:
raise ValueError(f'Invalid {self.selection_method} selection method.')
def cross_over_one_point(self, population):
"""
Esta função realiza o cruzamento de um ponto no gene.
Args:
:param population:(:obj:`list`): Lists one containing a population.
:param parents :(:obj:`float`): parents gene to carry out the mutation.
Returns:
"""
population = self.sorted_population(population)
local_parents = self.selection()
for individual in population[:-self.num_elitism]:
locus = np.random.randint(0, individual.size_individual)
parent_1 = np.random.randint(self.num_parents)
parent_2 = np.random.randint(self.num_parents)
while parent_2 == parent_1:
parent_2 = np.random.randint(self.num_parents)
individual.chromosome[:locus] = local_parents[parent_1].chromosome[:locus].copy()
individual.chromosome[locus:] = local_parents[parent_2].chromosome[locus:]
self.population = population
def cross_over_two_points(self, population):
"""
Esta função realiza o cruzamento de dois pontos no gene.
Args:
:param population:(:obj:`list`): Lists one containing a population.
:param parents :(:obj:`float`): parents gene to carry out the mutation.
Returns:
"""
population = self.sorted_population(population)
local_parents = self.selection()
for individual in population[:-self.num_elitism]:
while True:
locus = np.random.randint(0, individual.size_individual)
locusTwo = np.random.randint(locus, individual.size_individual)
if locus != locusTwo:
break
individual.fitness = None
parent_1 = np.random.randint(self.num_parents)
parent_2 = np.random.randint(self.num_parents)
while parent_2 == parent_1:
parent_2 = np.random.randint(self.num_parents)
individual.chromosome = local_parents[parent_1].chromosome.copy()
# To avoid messing with local_parents in the next line.
individual.chromosome[locus:locusTwo] = local_parents[parent_2].chromosome[locus:locusTwo]
self.population = population
def cross_over_uniform(self, population):
"""
Esta função realiza o cruzamento uniforme no gene.
Args:
:param population:(:obj:`list`): Lists one containing a population.
:param parents :(:obj:`float`): parents gene to carry out the mutation.
Returns:
"""
population = self.sorted_population(population)
local_parents = self.selection();
for individual in population[:-self.num_elitism]:
parent_1 = np.random.randint(self.num_parents)
parent_2 = np.random.randint(self.num_parents)
while parent_2 == parent_1:
parent_2 = np.random.randint(self.num_parents)
for j in range(individual.size_individual):
drawn = np.random.choice((parent_1, parent_2))
individual.chromosome[j] = local_parents[drawn].chromosome[j]
self.population = population
def mutation_uniform(self, population):
"""
A função realiza a mutação uniforme e retorna a população modificada.
Args:
:param population:(:obj:`list`): Lists one containing a population.
Returns:
"""
for individual in population:
for locus in range(individual.size_individual):
if random.random() <= self.mutation_probability:
individual.chromosome[locus] = np.random.uniform(
self.bounds[0][locus],self.bounds[1][locus], size=1)
def mutation_binary(self, population):
"""
A função realiza a mutação binária e retorna a população com a modificação,
vale ressaltar que esta mutação só é válida para população binária.
Args:
:param population:(:obj:`list`): Lists one containing a population.
Returns:
"""
for individual in population:
for locus in range(individual.size_individual):
if random.random() <= self.mutation_probability:
if individual.chromosome[locus] == 1:
individual.chromosome[locus] = 0
elif individual.chromosome[locus] == 0:
individual.chromosome[locus] = 1
def mutation_gaussian(self, population):
"""
A função realiza a mutação de Gausiana e retorna a população com a modificada.
Args:
:param population:(:obj:`list`): Lists one containing a population.
Returns:
"""
for individual in population:
for locus in range(individual.size_individual):
if random.random() <= self.mutation_probability:
individual.chromosome[locus] = np.random.normal(individual.chromosome[locus],
self.sigma)
def update_swarm(self):
"""
Atualize a população realizando cruzamento, mutação
Returns:
:return population(:obj:`list` of :obj:`Particles`): returns a list of
swarms.
"""
if self.fitness is None:
logging.error("Cannot update the population before calculating Fitness")
raise RuntimeError("Updated the population before calculating Fitness")
self.cross_over(self.population)
self.mutation(self.population)
if self.submit_to_cluster:
self.curr_iter['update'] += 1
def evaluate_single_fitness_test(self, func,
enum_particles=False, add_step_num=False,
**kwargs):
"""
Execute a função fornecida como o teste de aptidão para todas as partículas.
Parametros:
-----------
fun : callable
The fitness test function to be minimized:
``func(individual.ichromosome, **kwargs) -> float``.
enum_particles : boolean
If `True`, the population will be enumerated and the individual index will
be passed to `func` as keyword `part_idx`, added to `kwargs`
add_step_num : boolean
If `True`, the current step number will be passed to `func`
as keyword `step_num`, added to `kwargs`
**kwargs: Other keywords to the fitness function, will be passed as is.
"""
if add_step_num:
kwargs['step_num'] = self.step_number
if self.ncpu == 1:
if enum_particles:
for part_idx, individual in enumerate(self.population):
kwargs['part_idx'] = part_idx
individual.fitness = func(individual.chromosome, **kwargs)
else:
for individual in self.population:
individual.fitness = func(individual.chromosome, **kwargs)
elif self.ncpu > 1:
with mp.Pool(processes=self.ncpu) as pool:
argslist = []
p = []
for part_idx, individual in enumerate(self.population):
argslist.append(dict(kwargs))
if enum_particles:
argslist[-1]['part_idx'] = part_idx
for idx, args in enumerate(argslist):
p.append(pool.apply_async(func, args=(self.population[idx].chromosome,),kwds=args))
results = [ r.get() for r in p ]
for part_idx, individual in enumerate(self.population):
individual.fitness = results[part_idx]
def do_full_step(self, func, **kwargs):
"""Execute uma etapa completa de GA.
Este método passa por todos os outros métodos para realizar uma completa
Etapa GA, para que possa ser chamada a partir de um loop no método run ()..
"""
if self.fitness is not None and self.step_number < self.maxiter:
self.cross_over(self.population)
self.mutation(self.population)
for individual in self.population:
np.clip(individual.chromosome, self.bounds[0], self.bounds[1], out=individual.chromosome)
if self.submit_to_cluster:
raise NotImplementedError('Multistep jobs are under revision.')
else:
self.evaluate_single_fitness_test(func, **kwargs)
self.step_number += 1
def fitness_variation(self, fitness_evaluation):
"""
Essa função realiza a verificação da variação do fitness entre as populações
Args:
fitness_evaluation ([type]): [description]
"""
if(fitness_evaluation != self.fitness):
self.step_evaluation = 0
else:
self.step_evaluation += 1
print('step_evaluation: {}'.format(self.step_evaluation))
if(self.step_evaluation > 100):
self.improving = False
def run(self, func, DEBUG=None, **kwargs):
"""Execute uma execução de otimização completa.
Faz a otimização com a execução da atualização das velocidades
e as coordenadas também verifica o critério interrompido para encontrar fitnnes.
Parameters
----------
func : callable
Function that calculates fitnnes.
Returns
-------
The dictionary that stores the optimization results.
"""
self.func = func
while (self.step_number < self.maxiter and self.improving):
self.do_full_step(func, **kwargs)
fitness_evaluation= self.fitness
self.calculate_best_fitness()
#self.fitness_variation(fitness_evaluation)
if DEBUG is not None:
with open(DEBUG, 'a') as dbg_file:
print(f"# {self.step_number} {self.fitness}")
print(f" {self.step_number} {self.fitness}",
file=dbg_file)
#np.savetxt(dbg_file,
# [(*p.chromosome, p.fitness)
# for p in self.population])
if self.fitness >= self.goal:
break
self.results = {}
self.results['fitness'] = self.fitness
return self.results
``` |
{
"source": "jhonatheberson/MIPS-architecture",
"score": 3
} |
#### File: jhonatheberson/MIPS-architecture/Add.py
```python
class Add ():
input = 0
output = 0
def Add(self):
self.output = self.input + 4
def setAdd(self,input_final):
self.input = input_final
def getAdd(self):
return self.output
``` |
{
"source": "jhonatheberson/news-portal",
"score": 3
} |
#### File: management/commands/capturar_noticias.py
```python
from django.core.management.base import BaseCommand
from django.utils import timezone
from bs4 import BeautifulSoup as bs
from portal.models import News, Portal
from requests import get
class Command(BaseCommand):
help = 'class responsible for web scraping to capture the news and add it to the database'
def handle(self, *args, **kwargs):
"""responsible function of web scraping and capturing the headlines of the news.
Keyword arguments:
rargs -- django command method default (default)
kwargs -- django command method default (default)
"""
base_url = 'https://www.tecmundo.com.br' # url that will be done web scraping
base_portal = Portal(name=base_url)
base_portal.save() # saving the url to the database
title = [] # create a queue to store the titles
tecmundo = get(base_url) # requests a request to url
tecmundo_page = bs(tecmundo.text, 'html.parser') # get the text of the request response only html
boxes = tecmundo_page.find_all("h2") # search all over the site just what is h2 tagged
size_boxes = len(boxes)
for i in range(size_boxes):
title.append(boxes[i].text) # add titles to the queue
news = News(title=title[i])
news.save() # saving the title to the database
```
#### File: news/portal/models.py
```python
from django.db import models
class Portal(models.Model):
"""
medel of the database table that was created
this table refers to table News
"""
name = models.CharField(max_length=40) # name of the news portal or url
def __str__(self):
return self.name
class News(models.Model):
"""
medel of the database table that was created
"""
title = models.CharField(max_length=200) # title of the news
portal = models.ForeignKey(Portal, null=True,
on_delete=models.CASCADE) # portal of the news
create_at = models.DateTimeField(
auto_now_add=True) # good database practice
update_at = models.DateTimeField(auto_now=True) # good database practice
def __str__(self):
return self.title
# Create your models here.
```
#### File: news/portal/views.py
```python
from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse
from django.contrib import messages
from .forms import NewForm
from .models import News, Portal
def newsList(request):
"""
function responsible for performing the
search and bringing news from the database end paginator the page
"""
search = request.GET.get('search') # check if the form has something in it
if search:
news = News.objects.filter(title__icontains=search) # search the content in the database
else:
news_list = News.objects.all().order_by('-create_at') # shows all news from the bank in order of the most recent
paginator = Paginator(news_list, 7) #shows 7 news per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
news = paginator.page(page)
except (EmptyPage, InvalidPage):
news = paginator.page(paginator.num_pages)
return render(request, 'portal/list.html', {'news': news})
def newsView(request, id):
"""
function that brings the other news fields
"""
news = get_object_or_404(News, pk=id)
return render(request, 'portal/news.html', {'news': news})
``` |
{
"source": "Jhonattanln/Factor-Investing",
"score": 3
} |
#### File: Quality/Quality/Quality.py
```python
investing/Quality/Quality/Quality.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
######################################################### D/E #############################################################
de = pd.read_excel(r'C:\Users\Jhona\OneDrive - Grupo Marista\Projetos\Factor Investing\Factor-Investing\Factor investing\Quality\Quality\DE.xlsx',
parse_dates=True, index_col=0)
def columns(df):
df.columns = df.columns.str[-6:]
return df
columns(de)
de.replace('-', np.nan, inplace=True)
de = de.T
de.columns = de.columns.astype(str)
de.rename(columns={'Data': 'Ação', '2019-12-31':'DE'}, inplace=True)
data_de = de[de['DE'] > 0]
data_de.sort_values('DE')
######################################################### ROE ########################################################
roe = pd.read_excel(r'C:\Users\Jhona\OneDrive - Grupo Marista\Projetos\Factor Investing\Factor-Investing\Factor investing\Quality\Quality\ROE.xlsx',
parse_dates=True, index_col=0)
columns(roe)
roe.replace('-',np.nan, inplace=True)
roe = roe.T
roe.columns = roe.columns.astype(str)
roe.rename(columns={'2019-12-31':'ROE'}, inplace=True)
data_roe = roe[roe['ROE']>0]
data_roe.sort_values(by=['ROE'], ascending=False, inplace=True)
data_roe
############# Concatenando dados
data=pd.concat([data_de, data_roe], axis=1).dropna()
############# Ranking
de_value = data.DE.sum()
roe_value = data.ROE.sum()
values={}
for i in data.DE:
values['DE'] = data.DE /de_value
values = pd.DataFrame(values)
for i in data.ROE:
values['ROE'] = data['ROE'] / roe_value
values['Ranking'] = values.DE.div(values.ROE)
values.sort_values(by=['Ranking'], inplace=True)
assets = values.iloc[:20]
assets.index
stocks = ['ITSA4.SA', 'CEBR5.SA', 'LREN3.SA', 'TRPL4.SA', 'PARD3.SA', 'HAPV3.SA', 'STBP3.SA', 'TGMA3.SA',
'CEAB3.SA', 'UNIP3.SA',]
### Potfolio
df = pd.DataFrame()
from pandas_datareader import data
for i in stocks:
df[i] = data.DataReader(i, data_source='yahoo', start='2020-01-01', end = '2020-12-31')['Adj Close']
### Portfolio returns
weights = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
returns = df.pct_change().dropna()
df['Portfolio'] = (1+returns.dot(weights)).cumprod().dropna()
norm = pd.DataFrame()
for i in df:
norm[i] = df[i].div(df[i].iloc[1]).mul(100)
norm
plt.style.use('ggplot')
norm.plot()
plt.legend(loc='lower left')
plt.show()
### Portfolio vs IBOV
ibov = data.DataReader('^BVSP', data_source='yahoo', start='2020-01-01', end = '2020-12-31')
ibov.rename(columns = {'Adj Close':'IBOV'}, inplace=True)
ibov.drop(ibov.columns[[0,1,2,3,4]], axis=1, inplace=True)
ibov['Ibov'] = ibov['IBOV'].div(ibov['IBOV'].iloc[0]).mul(100)
ibov
plt.plot(norm['Portfolio'])
plt.plot(ibov['Ibov'])
plt.legend(['Portfolio - Quality', 'Ibov'])
plt.show()
final = pd.concat([norm['Portfolio'], ibov['Ibov']], axis = 1)
final.to_excel('Quality.xlsx')
``` |
{
"source": "jhonattanrgc21/python-exercises",
"score": 4
} |
#### File: python-exercises/Arreglos/print_matriz.py
```python
def mostrar(matriz):
global filas # Cantidad de filas leidas como entrada
for i in range(filas):
print(*matriz[i])
# Variable matriz
matriz = list()
# Los ciclos while validan la cantidad de filas y columnas
while True:
filas = int(input('Ingrese el numero de filas: '))
if filas > 0:
break
else:
print('Error, la cantidad debe ser mayor a cer\n.')
while True:
cols = int(input('Ingrese el numero de columnas: '))
if cols > 0:
break
else:
print('Error, la cantidad debe ser mayor a cer\n.')
# Proceso
for i in range(filas):
matriz.append(list())
for j in range(cols):
matriz[i].append(input('valor [%d, %d]: ' % (i + 1, j + 1)))
# Salida
mostrar(matriz)
``` |
{
"source": "jhonattanrgc21/web-empresarial",
"score": 2
} |
#### File: web-empresarial/blog/admin.py
```python
from django.contrib import admin
from .models import Category, Post
class CategoryAdmin(admin.ModelAdmin):
# Convierte los campos de fecha en formato de solo lectura
readonly_fields = ('created', 'updated',)
class PostAdmin(admin.ModelAdmin):
# Convierte los campos de fecha en formato de solo lectura
readonly_fields = ('created', 'updated',)
# Lista las columnas especificadas por cada registro
list_display = ('title', 'author', 'published', 'post_categories',)
# Permite ordenar las columnas por los campos especificados
ordering = ('author', 'published' ,)
# Agrega un buscador con las opciones de los campos especificados
search_fields = ('title', 'content', 'author__username', 'categories__name',)
# Permite obtener una lista de registros por fechas
date_hierarchy = 'published'
# Permite crear filtros para el buscador
list_filter = ('author__username', 'categories__name')
# Este tipo de funcion es para los campos de muchos a muchos o uno a muchos
def post_categories(self, obj):
''' Obtiene una lista de categorias asociadas al registro
ordenadas por nombre y separadas por coma '''
return (',').join([c.name for c in obj.categories.all().order_by('name')])
# Asigna un nombre a la funcion para mostrar en la columna de los registros
post_categories.short_description = 'Categorias'
# Registro de las configuraciones en el dashboard
admin.site.register(Category, CategoryAdmin)
admin.site.register(Post, PostAdmin)
```
#### File: web-empresarial/pages/models.py
```python
from django.db import models
from ckeditor.fields import RichTextField
class Page(models.Model):
title = models.CharField(
max_length = 200,
verbose_name = 'Titulo',
unique = True
)
content = RichTextField(
verbose_name = 'Contenido'
)
order = models.SmallIntegerField(verbose_name = 'Orden', default = 0)
created = models.DateTimeField(auto_now_add = True)
updated = models.DateTimeField(auto_now = True)
class Meta:
''' La clase Meta permite ordenar la lista de paginas
por nombre y traduce el nombre de la
entidad de ingles a español en el dashboard '''
verbose_name = 'pagina'
verbose_name_plural = 'paginas'
ordering = ['order','title']
# Metodos
def __str__(self):
return self.title
``` |
{
"source": "jhonattanrgc21/web-personal",
"score": 2
} |
#### File: web-personal/portfolio/models.py
```python
from django.db import models
class Project(models.Model):
''' Modelo de Proyectos '''
# Aatributos
title = models.CharField(
max_length=200,
verbose_name = 'Titulo'
)
description = models.TextField(verbose_name = 'Descripcion')
image = models.ImageField(
upload_to = 'media/projects',
verbose_name = 'Imagen'
)
link = models.URLField(
null=True,
blank=True,
verbose_name = 'Direccion web'
)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
''' La clase Meta permite ordenar la lista de proyectos
por fecha de creacion y traduce el nombre de la
entidad de ingles a español en el dashboard '''
verbose_name = 'proyecto'
verbose_name_plural = 'proyectos'
ordering = ['-created']
# Metodos
def __str__(self):
return self.title
```
#### File: web-personal/portfolio/views.py
```python
from django.shortcuts import render
from .models import Project
def portfolio(request):
''' Obtiene una lista con todos los proyectos
registrados en la BD y los renderiza a la
vista portfolio.html '''
projects = Project.objects.all()
return render(request, 'portfolio/portfolio.html', {'projects': projects})
``` |
{
"source": "Jhonattan-rocha/Meus-primeiros-programas",
"score": 3
} |
#### File: Exer39/Banco/clientes.py
```python
from contas import Conta_poupnaca, Conta_corrente
from itertools import count
class _MetaClassCliente(type):
def __new__(mcs, name, bases, namespace):
if name == "Cliente":
return type.__new__(mcs, name, bases, namespace)
if "cadastrar_pounpanca" not in namespace:
raise SyntaxError("Falta o método cadastrar pounpanca na classe")
else:
if not callable(namespace["cadastrar_pounpanca"]):
raise SyntaxError("Falta o método cadastrar pounpanca na classe")
if "cadastrar_corrente" not in namespace:
raise SyntaxError("Falta o método cadastrar pounpanca na classe")
else:
if not callable(namespace["cadastrar_corrente"]):
raise SyntaxError("Falta o método cadastrar corente na classe")
class Pessoa:
def __init__(self, nome="", idade=0, RG=0, CPF=0):
if self.__class__ is Pessoa:
raise TypeError(f"{self.__class__} não pode ser instanciada")
self._nome = nome
self._idade = idade
self.__RG = RG
self.__CPF = CPF
class Cliente(Pessoa, metaclass=_MetaClassCliente):
def __init__(self, nome, idade, RG, CPF):
super(Cliente, self).__init__(nome, idade, RG, CPF)
self._id_cliente = next(count(1))
def cadastrar_pounpanca(self, agencia, conta):
self.conta_poupanca = Conta_poupnaca(agencia, conta)
def cadastrar_corrente(self, agencia, conta):
self.conta_corrente = Conta_corrente(agencia, conta)
if __name__ == '__main__':
pass
```
#### File: Exer39/Banco/contas.py
```python
from abc import abstractmethod, ABC
class Conta(ABC):
def __init__(self, agencia, conta, saldo=0):
self._agencia = agencia
self._conta = conta
self._saldo: float = saldo
@property
def agencia(self):
return self._agencia
@agencia.setter
def agencia(self, valor):
self._agencia = valor
@property
def conta(self):
return self._conta
@conta.setter
def conta(self, valor):
self._conta = valor
@property
def saldo(self):
return self._saldo
@saldo.setter
def saldo(self, valor: float):
self._saldo = valor
def depoistar(self, valor: float):
if valor > 0:
self._saldo += valor
else:
raise ValueError("Depoisto não pode ser negativo")
@abstractmethod
def sacar(self, valor: float):
pass
class Conta_poupnaca(Conta):
def __init__(self, agencia=0, conta=0, saldo=0):
super(Conta_poupnaca, self).__init__(agencia, conta, saldo)
def sacar(self, valor: float):
if self.saldo < valor:
# print("Saldo insuficiente")
return 0
self.saldo -= valor
class Conta_corrente(Conta):
def __init__(self, agencia, conta, saldo=0, limite=100):
super(Conta_corrente, self).__init__(agencia, conta, saldo)
self.limite = limite
def sacar(self, valor: float):
if (self.saldo + self.limite) < valor:
# print("Saldo insuficiente")
if self.saldo < 0:
print("Saldo negativado")
return
self.saldo -= valor
```
#### File: ExerciciosDeSO/ExerciciosThreadsSlide/ExerPG37.py
```python
import threading
from random import randint
from time import sleep
class Carro(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.semaforo = threading.Lock()
@classmethod
def andar(cls, idt):
sentido = ["frente", "esquerda", "direita"]
direcao = sentido[randint(0, 2)]
print(f"O carro {idt} está indo para a {direcao}")
def run(self) -> None:
try:
self.semaforo.acquire()
sleep(1)
self.andar(self.native_id)
except Exception as e:
print(e)
finally:
self.semaforo.release()
if __name__ == '__main__':
for i in range(4):
t = Carro()
t.start()
```
#### File: ExerciciosDeSO/ExerciciosThreadsSlide/ExerPG38.py
```python
import copy
import threading
from time import sleep, time
colocados = []
class F1(threading.Thread):
def __init__(self, escudeira):
threading.Thread.__init__(self)
self.semaforo = threading.Semaphore(5)
self.semaforoCarro = threading.Semaphore(1)
self.escudeira = escudeira
def run(self) -> None:
try:
self.semaforo.acquire()
match self.escudeira:
case 1:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
case 2:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
case 3:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
case 4:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
case 5:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
case 6:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
case 7:
self.semaforoCarro.acquire()
for i in range(3):
self.volta()
if i == 2:
colocados.append(self.escudeira)
colocados.append(max(self.tempos))
self.semaforoCarro.release()
self.semaforo.release()
except Exception as e:
print(e)
finally:
self.semaforo.release()
def volta(self):
# try:
# self.semaforoCarro.acquire()
self.tempos = []
tempo_ini = time()
print(f"{self.native_id} da escudeira {self.escudeira} começou uma volta")
sleep(5)
print(f"{self.native_id} da escudeira {self.escudeira} terminou uma volta")
self.tempos.append(time() - tempo_ini)
# self.semaforoCarro.release()
# except Exception as e:
# print(e)
# finally:
# self.semaforoCarro.release()
if __name__ == '__main__':
escudeira = [1, 3, 2, 1, 3, 2, 7, 4, 5, 6, 5, 6, 4, 7]
for i in escudeira:
f1 = F1(i)
f1.start()
sleep(20)
lista_auxiliar = [i for i in colocados if i is not int]
print(lista_auxiliar)
print(colocados)
exit(0)
for i in range(len(colocados)//2):
print(f"{i+1}° colocado, {colocados[colocados.index(lista_auxiliar[i])-1]}")
print(colocados)
```
#### File: Versao_final/TestePosteriores/Teste2.py
```python
import pickle
import numpy as np
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
class BagOfWords:
def __init__(self):
self.vocab = []
def build_vocab(self, sentences):
for sentence in sentences:
for word in sentence.split(" "):
if word not in self.vocab:
self.vocab.append(word)
self.vocab.sort()
def toArray(self, sentences):
words = sentences.split(' ')
vector = np.zeros(len(self.vocab))
for word in words:
for i, _word in enumerate(self.vocab):
if _word == word:
vector[i] = 1
return list(vector)
def get_len(self):
return len(self.vocab)
sentences = ['eu gosto disso', 'eu odeio isso', 'aquilo era bom', 'aquilo era mal']
bag = BagOfWords()
bag.build_vocab(sentences)
outputs = [[1], [0], [1], [0]]
inputs = []
# print(bag.vocab)
for sentence in sentences:
vector = bag.toArray(sentence)
passe = []
for num in vector:
passe.append(num)
inputs.append(passe)
# esse dataset começa com o tamanho, depois o número de saídas
ds = SupervisedDataSet(bag.get_len(), 1)
for i, j in zip(inputs, outputs):
ds.addSample(i, j)
# neuronios de entrada, depois a quantidade de neurônios da cama intermediária, camada de saida
netWork = buildNetwork(bag.get_len(), 5, 1, bias=True)
back = BackpropTrainer(netWork, ds) # rede, database
# esse for treina a rede neural
for i in range(1000):
back.train()
print(netWork.activate(bag.toArray("que horas que é"))[0]) # comando para ativar a rede neural
```
#### File: Versao_final/TestePosteriores/Teste_ia_k.py
```python
import os
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC, SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import Minha_Biblioteca
import numpy
"""
import sklearn
from sklearn.model_selection import train_test_split
modelo = sklearn.svm.LinearSVC() # modelo básico para machining learning
modelo.fit(x, y) # comando para mandar o modelo treinar
# x é os dados, y é as respostas providas baseadas nesses dados
# print(sklearn.metrics.accuracy_score(teste_x, previcoes))
# sample() no pandas traz linhas aleatórias
modelo = sklearn.svm.LinearSVC() # modelo básico para machining learning
# isso aqui separa os dados em teste e treino para que não vicio o algoritmo
treino_x, teste_x, treino_y, teste_y = train_test_split(x, y)
modelo.fit(treino_x, treino_y)
modelo.predict(teste_x) # comando para prever usando a variável teste_x do train_test_split
# print(sklearn.metrics.accuracy_score(teste_y, previcoes))
from pybrain.tools.shortcuts import buildNetwork
"""
# arquivo = open(r"databaseF.txt", 'r', encoding='utf-8')
# arquivo = arquivo.read()
# arquivo = arquivo.split()
def ajeitar(texto=''):
texto = texto.split()
retorno = ' '
for c in texto:
retorno += c
return numpy.float64(retorno)
teste1 = [ajeitar(Minha_Biblioteca.LetraNumero("me fala as horas"))]
teste2 = [ajeitar(Minha_Biblioteca.LetraNumero("que horas são"))]
teste3 = [ajeitar(Minha_Biblioteca.LetraNumero("me diga as horas"))]
teste4 = [ajeitar(Minha_Biblioteca.LetraNumero("fale as horas"))]
teste5 = [ajeitar(Minha_Biblioteca.LetraNumero("horas, por favor"))]
teste6 = [ajeitar(Minha_Biblioteca.LetraNumero("me fala a data"))]
teste7 = [ajeitar(Minha_Biblioteca.LetraNumero("que dia é hoje"))]
teste8 = [ajeitar(Minha_Biblioteca.LetraNumero("me diga a data"))]
teste9 = [ajeitar(Minha_Biblioteca.LetraNumero("fale a data"))]
teste10 = [ajeitar(Minha_Biblioteca.LetraNumero("tempo"))]
teste11 = [ajeitar(Minha_Biblioteca.LetraNumero("me diga o clima"))]
teste12 = [ajeitar(Minha_Biblioteca.LetraNumero("qual é o clima de hoje"))]
teste13 = [ajeitar(Minha_Biblioteca.LetraNumero("qual a previsão do clima para hoje"))]
teste14 = [ajeitar(Minha_Biblioteca.LetraNumero("qual o clima para hoje"))]
teste15 = [ajeitar(Minha_Biblioteca.LetraNumero("clima"))]
x = [teste1, teste2, teste3, teste4, teste5, teste6, teste7, teste8, teste9, teste10, teste11, teste12, teste13,
teste14, teste15]
# x = [["me fala as horas"], ["que horas são"], ["me diga as horas"], ["fale as horas"], ["horas, por favor"],
# ["me fala a data"], ["que dia é hoje"], ["me diga a data"], ["fale a data"], ["tempo"],
# ["me diga o clima"], ["qual é o clima de hoje"], ["qual a previsão do clima para hoje"], ["qual o clima para hoje"], ["clima"]]
y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
modelo = SVC(max_iter=1000000, random_state=10, kernel="linear")
treino_x, teste_x, treino_y, teste_y = train_test_split(x, y, test_size=0.5, train_size=0.5)
modelo.fit(treino_x, treino_y)
print(modelo.predict([[ajeitar(Minha_Biblioteca.LetraNumero("qual o clima"))]]))
```
#### File: Curos_Python_curemvid/Exercicios_dos_videos/Ex102.py
```python
def fatorial(num=1, show=0):
"""
Resumo do programa: cálculo do fatorial do número n
:param num: - parâmetro que vai receber o valor de n
:param show: - parâmetro de valor, sendo s ou n, que vai definir se irá ser mostrado ou não a conta
:return: - retorna o print com o valor do fatorial de n, ou retorna a conta junto com o valor de n
"""
if show == 0:
f = 1
for c in range(num, 0, -1):
f *= c
return f
else:
f = 1
for c in range(num, 0, -1):
print(f"{c} x {f} = ", end=' ')
f *= c
print(f)
if c == 1:
print(f"O fatorial de {num} é {f}")
return f
print("Programa para cálculo de fatorial!!!")
n = int(input("Digite um número para saber seu fatorial: "))
escolha = ''
while escolha != 'S' and escolha != 'N':
escolha = str(input("Deseja saber o cálculo do fatorial: ")).upper()
if escolha == 'S':
escolha = int(1)
break
else:
escolha = int(0)
break
if escolha == 0:
print(f"O faotial do número é:{fatorial(n, escolha)}")
else:
print("O cálculo é: ")
fatorial(n, escolha)
```
#### File: Curos_Python_curemvid/Exercicios_dos_videos/Ex104.py
```python
def leiaint(msg):
while True:
n = input(msg)
if n.isnumeric():
valor = int(n)
break
else:
print("Digite um número válido")
return valor
num = leiaint("digite um número inteiro: ")
print(f"Você digitou o número {num}")
```
#### File: Curos_Python_curemvid/Exercicios_dos_videos/Ex105.py
```python
def notas(*nums, situacao=False):
notasA = []
cont = 0
maior = 0
menor = 0
media = 0
for c in range(0, len(nums)):
cont += 1
notasA.append(nums[c])
media += nums[c]
if c == 0:
maior = nums[c]
menor = nums[c]
else:
if maior < nums[c]:
maior = nums[c]
if menor > nums[c]:
menor = nums[c]
dados = dict()
dados['Quantidade de Alunos'] = cont
dados['Notas'] = notasA.copy()
dados['Maior nota'] = maior
dados['Menor nota'] = menor
dados['Média'] = media/cont
if situacao:
if media/cont >= 7:
dados["Situação"] = 'Boa'
else:
dados["Situação"] = 'Ruim'
print(dados)
print("!!!Sistema de notas!!!")
print("As notas dos alunos da sala é: ")
notas(10, 20, 50, 90, 60, 7, 6, 8)
```
#### File: Uteis/Exer109M/__init__.py
```python
def metade(num, formata=False):
if formata:
return f"R${float(num / 2):.2f}"
return float(num/2)
def dobro(num, formata=False):
if formata:
return f"R${float(num*2):.2f}"
return num*2
def aumentar(num, p, formata=False):
p = 1+(p/100)
if formata:
return f"R${float(num*p):.2f}"
return float(num*p)
def diminuir(num, p, formata=False):
p = p/100
if formata:
return f"R${float(num - (num*p)):.2f}"
return float(num - (num*p))
```
#### File: imobil/autenti/views.py
```python
from django.contrib.auth import logout
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.contrib import messages, auth
from django.contrib.messages import constants
def cadastro(request):
if request.method == "GET":
return render(request, 'cadastro.html')
elif request.method == "POST":
username = request.POST.get('username')
email = request.POST.get('email')
senha = request.POST.get('senha')
if len(username.strip()) == 0 or len(email.strip()) == 0 or len(senha.strip()) == 0:
messages.add_message(request, constants.ERROR, 'Preencha todos os campos')
return redirect('/auth/cadastro')
user = User.objects.filter(username=username)
if user.exists():
messages.add_message(request, constants.ERROR, 'Já existe um usuário com esse nome cadastrado')
return redirect('/auth/cadastro')
try:
user = User.objects.create_user(username=username, email=email, password=<PASSWORD>)
user.save()
messages.add_message(request, constants.SUCCESS, 'Cadastro realizado com sucesso')
return redirect('/auth/logar')
except:
messages.add_message(request, constants.ERROR, 'Erro interno do sistema')
return redirect('/auth/cadastro')
if request.user.is_authenticated:
return redirect('/')
def logar(request):
if request.method == "GET":
return render(request, 'logar.html')
elif request.method == "POST":
username = request.POST.get('username')
senha = request.POST.get('senha')
usuario = auth.authenticate(username=username, password=senha)
if not usuario:
messages.add_message(request, constants.ERROR, 'Username ou senha inválidos')
return redirect('/auth/logar')
else:
auth.login(request, usuario)
if request.user.is_authenticated:
return redirect('/')
def sair(request):
logout(request)
return render(request, "logar.html")
``` |
{
"source": "jhondevcode/spyder",
"score": 2
} |
#### File: findinfiles/widgets/results_browser.py
```python
import os.path as osp
# Third party imports
from qtpy.QtCore import QPoint, QSize, Qt, Signal, Slot
from qtpy.QtGui import QAbstractTextDocumentLayout, QTextDocument
from qtpy.QtWidgets import (QApplication, QStyle, QStyledItemDelegate,
QStyleOptionViewItem, QTreeWidgetItem)
# Local imports
from spyder.api.translations import get_translation
from spyder.config.gui import get_font
from spyder.utils import icon_manager as ima
from spyder.widgets.onecolumntree import OneColumnTree
# Localization
_ = get_translation('spyder')
# ---- Constants
# ----------------------------------------------------------------------------
ON = 'on'
OFF = 'off'
# ---- Items
# ----------------------------------------------------------------------------
class LineMatchItem(QTreeWidgetItem):
def __init__(self, parent, lineno, colno, match, font, text_color):
self.lineno = lineno
self.colno = colno
self.match = match
self.text_color = text_color
self.font = font
super().__init__(parent, [self.__repr__()], QTreeWidgetItem.Type)
def __repr__(self):
match = str(self.match).rstrip()
_str = (
f"<!-- LineMatchItem -->"
f"<p style=\"color:'{self.text_color}';\">"
f"<b>{self.lineno}</b> ({self.colno}): "
f"<span style='font-family:{self.font.family()};"
f"font-size:{self.font.pointSize()}pt;'>{match}</span></p>"
)
return _str
def __unicode__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
def __lt__(self, x):
return self.lineno < x.lineno
def __ge__(self, x):
return self.lineno >= x.lineno
class FileMatchItem(QTreeWidgetItem):
def __init__(self, parent, path, filename, sorting, text_color):
self.sorting = sorting
self.filename = osp.basename(filename)
# Get relative dirname according to the path we're searching in.
dirname = osp.dirname(filename)
rel_dirname = dirname.split(path)[1]
if rel_dirname.startswith(osp.sep):
rel_dirname = rel_dirname[1:]
title = (
f'<!-- FileMatchItem -->'
f'<b style="color:{text_color}">{osp.basename(filename)}</b>'
f' '
f'<span style="color:{text_color}">'
f'<em>{rel_dirname}</em>'
f'</span>'
)
super().__init__(parent, [title], QTreeWidgetItem.Type)
self.setIcon(0, ima.get_icon_by_extension_or_type(filename, 1.0))
self.setToolTip(0, filename)
def __lt__(self, x):
if self.sorting['status'] == ON:
return self.filename < x.filename
else:
return False
def __ge__(self, x):
if self.sorting['status'] == ON:
return self.filename >= x.filename
else:
return False
# ---- Browser
# ----------------------------------------------------------------------------
class ItemDelegate(QStyledItemDelegate):
def __init__(self, parent):
super().__init__(parent)
self._margin = None
def paint(self, painter, option, index):
options = QStyleOptionViewItem(option)
self.initStyleOption(options, index)
style = (QApplication.style() if options.widget is None
else options.widget.style())
doc = QTextDocument()
text = options.text
doc.setHtml(text)
doc.setDocumentMargin(0)
# This needs to be an empty string to avoid the overlapping the
# normal text of the QTreeWidgetItem
options.text = ""
style.drawControl(QStyle.CE_ItemViewItem, options, painter)
ctx = QAbstractTextDocumentLayout.PaintContext()
textRect = style.subElementRect(QStyle.SE_ItemViewItemText,
options, None)
painter.save()
painter.translate(textRect.topLeft() + QPoint(0, 4))
doc.documentLayout().draw(painter, ctx)
painter.restore()
def sizeHint(self, option, index):
options = QStyleOptionViewItem(option)
self.initStyleOption(options, index)
doc = QTextDocument()
doc.setHtml(options.text)
doc.setTextWidth(options.rect.width())
size = QSize(int(doc.idealWidth()), int(doc.size().height()))
return size
class ResultsBrowser(OneColumnTree):
sig_edit_goto_requested = Signal(str, int, str, int, int)
sig_max_results_reached = Signal()
def __init__(self, parent, text_color, max_results=1000):
super().__init__(parent)
self.search_text = None
self.results = None
self.max_results = max_results
self.total_matches = None
self.error_flag = None
self.completed = None
self.sorting = {}
self.font = get_font()
self.data = None
self.files = None
self.root_items = None
self.text_color = text_color
self.path = None
# Setup
self.set_title('')
self.set_sorting(OFF)
self.setSortingEnabled(False)
self.setItemDelegate(ItemDelegate(self))
self.setUniformRowHeights(True) # Needed for performance
self.sortByColumn(0, Qt.AscendingOrder)
# Only show the actions for collaps/expand all entries in the widget
# For further information see spyder-ide/spyder#13178
self.common_actions = self.common_actions[:2]
# Signals
self.header().sectionClicked.connect(self.sort_section)
def activated(self, item):
"""Double-click event."""
itemdata = self.data.get(id(self.currentItem()))
if itemdata is not None:
filename, lineno, colno, colend = itemdata
self.sig_edit_goto_requested.emit(
filename, lineno, self.search_text, colno, colend - colno)
def set_sorting(self, flag):
"""Enable result sorting after search is complete."""
self.sorting['status'] = flag
self.header().setSectionsClickable(flag == ON)
@Slot(int)
def sort_section(self, idx):
self.setSortingEnabled(True)
def clicked(self, item):
"""Click event."""
self.activated(item)
def clear_title(self, search_text):
self.font = get_font()
self.clear()
self.setSortingEnabled(False)
self.num_files = 0
self.data = {}
self.files = {}
self.set_sorting(OFF)
self.search_text = search_text
title = "'%s' - " % search_text
text = _('String not found')
self.set_title(title + text)
@Slot(object)
def append_file_result(self, filename):
"""Real-time update of file items."""
if len(self.data) < self.max_results:
self.files[filename] = FileMatchItem(self, self.path, filename,
self.sorting, self.text_color)
self.files[filename].setExpanded(True)
self.num_files += 1
@Slot(object, object)
def append_result(self, items, title):
"""Real-time update of line items."""
if len(self.data) >= self.max_results:
self.set_title(_('Maximum number of results reached! Try '
'narrowing the search.'))
self.sig_max_results_reached.emit()
return
available = self.max_results - len(self.data)
if available < len(items):
items = items[:available]
self.setUpdatesEnabled(False)
self.set_title(title)
for item in items:
filename, lineno, colno, line, match_end = item
file_item = self.files.get(filename, None)
if file_item:
item = LineMatchItem(file_item, lineno, colno, line,
self.font, self.text_color)
self.data[id(item)] = (filename, lineno, colno, match_end)
self.setUpdatesEnabled(True)
def set_max_results(self, value):
"""Set maximum amount of results to add."""
self.max_results = value
def set_path(self, path):
"""Set path where the search is performed."""
self.path = path
``` |
{
"source": "Jhon-Dx/Program",
"score": 3
} |
#### File: Jhon-Dx/Program/Gerador_de_Senha.py
```python
import Gerador_de_senhas.Defs as ge
import PySimpleGUI as sg
class Gerador:
sg.theme('DarkPurple1')
def __init__(self):
layout = [
[sg.Checkbox('Numeros', key='sonumeros'), sg.Text(size=(3, 1)), sg.Checkbox('Letras', key='soletras'),
sg.Text(size=(3, 1)), sg.Checkbox('Simbolos', key='sosimbolos')],
[sg.Text('Quantidade de senhas'), sg.Combo(values=list(range(30)), key='totalchars', default_value=1,
size=(3, 1))],
[sg.Text(size=(11, 3)), sg.Button('Gerar Senha')],
[sg.Text('Resultado:', size=(9, 1)), sg.Text(size=(22, 0))],
[sg.Output(size=(40, 5))]
]
self.janela = sg.Window("Gerador de Senha").layout(layout)
def iniciar(self):
while True:
self.evento, self.values = self.janela.read()
com_numeros = self.values['sonumeros']
com_letras = self.values['soletras']
com_simbolos = self.values['sosimbolos']
total_de_caracteres = self.values['totalchars']
if self.evento == sg.WINDOW_CLOSED:
break
if self.evento == 'Gerar Senha':
newsenha = ge.truefalse(com_numeros, com_simbolos, com_letras, total_de_caracteres)
print(newsenha)
tela = Gerador()
tela.iniciar()
``` |
{
"source": "Jhoneagle/RandomProducts",
"score": 3
} |
#### File: finvoiceConverter/components/row.py
```python
from lxml import etree
'''
Super class for csv-files rows. Implemented child classes at the moment are Invoice and InvoiceRow classes.
'''
class Row(object):
def __init__(self, records, xmlRoot):
self._root = xmlRoot
self.records = records
self.fields = dict()
self._parse()
def toCSV(self):
csvFields = map(self.__getRecord, self.records)
output = ""
for x in csvFields:
output += (x + ';')
return output
# get the data of given column in the row this spesifies.
def __getRecord(self, record):
if (record in self.fields):
return self.fields[record]
return ''
# Super method to parse the xml and make it ready to be printed into csv-file.
# This is supposed to be implemented by child classes.
def _parse(self):
pass
# set data to given column.
def _setElem(self, elemName, recordName):
elem = self._root.find(elemName)
if (elem is not None):
self.fields[recordName] = elem.text
``` |
{
"source": "Jhoneagle/TilastointiOhjelma",
"score": 3
} |
#### File: application/auth/models.py
```python
from application import db
from application.models import Base
class User(Base):
__tablename__ = "account"
name = db.Column(db.String(144), nullable=False)
phonenumber = db.Column(db.String(144), nullable=False)
email = db.Column(db.String(144), nullable=False)
company = db.Column(db.String(144), nullable=False)
address = db.Column(db.String(144), nullable=False)
username = db.Column(db.String(144), nullable=False)
password = db.Column(db.String(144), nullable=False)
def __init__(self, name, phone, email, company, address, username, password):
self.name = name
self.phonenumber = phone
self.email = email
self.company = company
self.address = address
self.username = username
self.password = password
def get_id(self):
return self.id
def is_active(self):
return True
def is_anonymous(self):
return False
def is_authenticated(self):
return True
```
#### File: application/kavijat/models.py
```python
from application import db
from application.models import Base
class Kavijat(Base):
__tablename__ = "kavijat"
sivu_id = db.Column(db.Integer, db.ForeignKey('sivu.id'))
kaynnit = db.Column(db.Integer, nullable=False)
vuosi = db.Column(db.Integer, nullable=False)
kuukausi = db.Column(db.Integer, nullable=False)
def __init__(self, kaynnit, vuosi, kuukausi):
self.kaynnit = kaynnit
self.vuosi = vuosi
self.kuukausi = kuukausi
```
#### File: application/selain/models.py
```python
from application import db
from application.models import Base
class Selain(Base):
__tablename__ = "selain"
kavijat_id = db.Column(db.Integer, nullable=False)
kaynnit = db.Column(db.Integer, nullable=False)
selain = db.Column(db.String(144), nullable=False)
def __init__(self, kaynnit, selain):
self.kaynnit = kaynnit
self.selain = selain
```
#### File: application/visits/views.py
```python
from application import app, db
from flask import redirect, render_template, request, url_for
from application.visits.models import Visit
from application.visits.forms import VisitForm, ListForm
from flask_login.utils import login_required, current_user
from application.sivu.models import Sivu
from sqlalchemy.sql import text
@app.route("/visits/new/")
@login_required
def visits_form():
return render_template("visits/new.html", title="Lisää uusi käynti tietue", form = VisitForm())
@app.route("/visits/", methods=["POST"])
@login_required
def visits_create():
form = VisitForm(request.form)
if not form.validate():
return render_template("visits/new.html", form = form, title="Lisää uusi käynti tietue")
result = Sivu.query.filter_by(osoite=form.website.data).first()
sivuId = None
if result is None:
s = Sivu(form.website.data, form.websiteGroup.data)
s.account_id = current_user.id
db.session.add(s)
db.session.commit()
r = Sivu.query.filter_by(osoite=form.website.data).first()
sivuId = r.id
else:
sivuId = result.id
v = Visit(kuukausi=form.month.data, vuosi=form.year.data, lukumaara=form.VisitAmount.data)
v.sivu_id = sivuId
db.session().add(v)
db.session().commit()
return redirect(url_for("visit_index"))
@app.route("/visits", methods=["GET"])
@login_required
def visit_index():
return render_template("visits/list.html", title="Kuukauden käyntien listaus", form = ListForm())
@app.route("/result/", methods=["GET", "POST"])
@login_required
def visits_result():
if request.method == 'POST':
form = ListForm(request.form)
stmt = text("SELECT * FROM visit, sivu WHERE visit.kuukausi = :month AND visit.vuosi = :year AND visit.sivu_id = sivu.id AND sivu.account_id = :id").params(month=form.month.data, year=form.year.data, id=current_user.id)
stmt2 = text("SELECT * FROM visit, sivu WHERE visit.kuukausi = :month AND visit.vuosi = :year AND visit.sivu_id = sivu.id AND sivu.account_id = :id").params(month=form.month.data, year=form.year2.data, id=current_user.id)
result = db.engine.execute(stmt)
result2 = db.engine.execute(stmt2)
return render_template("visits/result.html", title="Tulos", visits=result, visits2=result2)
else:
return render_template("visits/result.html", title="Tulos")
```
#### File: application/yhteenveto/views.py
```python
from application import app, db
from flask import redirect, render_template, request, url_for
from application.visits.models import Visit
from application.yhteenveto.forms import InYearForm, InMonthForm
from flask_login.utils import login_required, current_user
from application.sivu.models import Sivu
from sqlalchemy.sql import text
@app.route("/yhteenveto/alku/", methods=["GET"])
@login_required
def yhteenveto_alku():
return render_template("yhteenveto/valinta.html", title="Yhteenvedot")
@app.route("/yhteenveto/vuodessa/", methods=["GET", "POST"])
@login_required
def yhteenveto_vuodessa():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT sivu.osoite, SUM(visit.lukumaara) AS maara FROM sivu, visit WHERE visit.vuosi = :vuosi AND visit.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.osoite").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/vuodessa.html", title="Käyntejä sivuilla vuodessa", vuosi=result)
else:
return render_template("yhteenveto/kyselyvuodessa.html", title="Käyntejä sivuilla vuodessa", form = InYearForm())
@app.route("/yhteenveto/ryhma/", methods=["GET", "POST"])
@login_required
def yhteenveto_ryhmatulos():
if request.method == 'POST':
form = InMonthForm(request.form)
stmt = text("SELECT sivu.ryhma AS ryhma, SUM(visit.lukumaara) AS maara FROM sivu, visit WHERE visit.vuosi = :vuosi AND visit.kuukausi = :kuukausi AND visit.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.ryhma").params(vuosi=form.year.data, kuukausi=form.month.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/ryhmassa.html", title="Käyntejä sivuryhmissä vuodessa", vuosi=result)
else:
return render_template("yhteenveto/kyselyryhmassa.html", title="Vuoden tilasto", form = InMonthForm())
@app.route("/yhteenveto/selaimia/", methods=["GET", "POST"])
@login_required
def yhteenveto_selaimia():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT selain.selain AS nimi, SUM(selain.kaynnit) AS maara FROM sivu, selain, kavijat WHERE selain.kavijat_id = kavijat.id AND kavijat.vuosi = :vuosi AND kavijat.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY selain.selain").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/selaimia.html", title="Selaimien yhteenveto", selaimet=result)
else:
return render_template("yhteenveto/selainvuosi.html", title="Vuoden tilasto", form = InYearForm())
@app.route("/yhteenveto/kavijoita/", methods=["GET", "POST"])
@login_required
def yhteenveto_kavijoita():
if request.method == 'POST':
form = InYearForm(request.form)
stmt = text("SELECT sivu.osoite, SUM(kavijat.kaynnit) AS maara FROM sivu, kavijat WHERE kavijat.vuosi = :vuosi AND kavijat.sivu_id = sivu.id AND sivu.account_id = :id GROUP BY sivu.osoite").params(vuosi=form.year.data, id=current_user.id)
result = db.engine.execute(stmt)
return render_template("yhteenveto/kavijoita.html", title="Kavijoita sivuilla vuodessa", kavijat=result)
else:
return render_template("yhteenveto/kavijavuosi.html", title="Vuoden tilasto", form = InYearForm())
``` |
{
"source": "JHON-EDV/chime",
"score": 3
} |
#### File: src/penn_chime/charts.py
```python
from altair import Chart # type: ignore
import pandas as pd # type: ignore
import numpy as np # type: ignore
from .parameters import Parameters
from .utils import add_date_column
def new_admissions_chart(
alt,
projection_admits: pd.DataFrame,
parameters: Parameters,
as_date: bool = False,
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
y_scale.clamp = True
tooltip_dict = {False: "day", True: "date:T"}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {"shorthand": "date:T", "title": "Date"}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
return (
alt.Chart(projection_admits.head(plot_projection_days))
.transform_fold(fold=["Hospitalized", "ICU", "Ventilated"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Daily admissions", scale=y_scale),
color="key:N",
tooltip=[
tooltip_dict[as_date],
alt.Tooltip("value:Q", format=".0f", title="Admissions"),
"key:N",
],
)
.interactive()
)
def admitted_patients_chart(
alt,
census: pd.DataFrame,
parameters: Parameters,
as_date: bool = False
) -> Chart:
"""docstring"""
plot_projection_days = parameters.n_days - 10
max_y_axis = parameters.max_y_axis
if as_date:
census = add_date_column(census)
x_kwargs = {"shorthand": "date:T", "title": "Date"}
idx = "date:T"
else:
x_kwargs ={"shorthand": "day", "title": "Days from today"}
idx = "day"
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
y_scale.clamp = True
return (
alt.Chart(census.head(plot_projection_days))
.transform_fold(fold=["Hospitalized Census", "ICU Census", "Ventilated Census"])
.mark_line(point=True)
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Census", scale=y_scale),
color="key:N",
tooltip=[
idx,
alt.Tooltip("value:Q", format=".0f", title="Census"),
"key:N",
],
)
.interactive()
)
def additional_projections_chart(
alt,
i: np.ndarray,
r: np.ndarray,
as_date: bool = False,
max_y_axis: int = None
) -> Chart:
dat = pd.DataFrame({"Infected": i, "Recovered": r})
dat["day"] = dat.index
if as_date:
dat = add_date_column(dat)
x_kwargs = {"shorthand": "date:T", "title": "Date"}
else:
x_kwargs = {"shorthand": "day", "title": "Days from today"}
y_scale = alt.Scale()
if max_y_axis is not None:
y_scale.domain = (0, max_y_axis)
y_scale.clamp = True
return (
alt.Chart(dat)
.transform_fold(fold=["Infected", "Recovered"])
.mark_line()
.encode(
x=alt.X(**x_kwargs),
y=alt.Y("value:Q", title="Case Volume", scale=y_scale),
tooltip=["key:N", "value:Q"],
color="key:N",
)
.interactive()
)
```
#### File: src/penn_chime/utils.py
```python
from collections import namedtuple
from datetime import datetime, timedelta
from typing import Optional
import numpy as np # type: ignore
import pandas as pd # type: ignore
# from .parameters import Parameters
# (0.02, 7) is 2%, 7 days
# be sure to multiply by 100 when using as a default to the pct widgets!
RateLos = namedtuple('RateLos', ('rate', 'length_of_stay'))
def add_date_column(
df: pd.DataFrame,
drop_day_column: bool = False,
date_format: Optional[str] = None,
) -> pd.DataFrame:
"""Copies input data frame and converts "day" column to "date" column
Assumes that day=0 is today and allocates dates for each integer day.
Day range can must not be continous.
Columns will be organized as original frame with difference that date
columns come first.
Arguments:
df: The data frame to convert.
drop_day_column: If true, the returned data frame will not have a day column.
date_format: If given, converts date_time objetcts to string format specified.
Raises:
KeyError: if "day" column not in df
ValueError: if "day" column is not of type int
"""
if not "day" in df:
raise KeyError("Input data frame for converting dates has no 'day column'.")
if not pd.api.types.is_integer_dtype(df.day):
raise KeyError("Column 'day' for dates converting data frame is not integer.")
df = df.copy()
# Prepare columns for sorting
non_date_columns = [col for col in df.columns if not col == "day"]
# Allocate (day) continous range for dates
n_days = int(df.day.max())
start = datetime.now()
end = start + timedelta(days=n_days + 1)
# And pick dates present in frame
dates = pd.date_range(start=start, end=end, freq="D")[df.day.tolist()]
if date_format is not None:
dates = dates.strftime(date_format)
df["date"] = dates
if drop_day_column:
df.pop("day")
date_columns = ["date"]
else:
date_columns = ["day", "date"]
# sort columns
df = df[date_columns + non_date_columns]
return df
```
#### File: chime/tests/test_app.py
```python
import pytest # type: ignore
import pandas as pd # type: ignore
import numpy as np # type: ignore
import altair as alt # type: ignore
from src.penn_chime.charts import new_admissions_chart, admitted_patients_chart
from src.penn_chime.models import sir, sim_sir
from src.penn_chime.parameters import Parameters
from src.penn_chime.presentation import display_header
from src.penn_chime.settings import DEFAULTS
from src.penn_chime.defaults import RateLos
PARAM = Parameters(
current_hospitalized=100,
doubling_time=6.0,
known_infected=5000,
market_share=0.05,
relative_contact_rate=0.15,
susceptible=500000,
hospitalized=RateLos(0.05, 7),
icu=RateLos(0.02, 9),
ventilated=RateLos(0.01, 10),
n_days=60
)
# set up
# we just want to verify that st _attempted_ to render the right stuff
# so we store the input, and make sure that it matches what we expect
class MockStreamlit:
def __init__(self):
self.render_store = []
self.markdown = self.just_store_instead_of_rendering
self.latex = self.just_store_instead_of_rendering
self.subheader = self.just_store_instead_of_rendering
def just_store_instead_of_rendering(self, inp, *args, **kwargs):
self.render_store.append(inp)
return None
def cleanup(self):
"""
Call this after every test, unless you intentionally want to accumulate stuff-to-render
"""
self.render_store = []
st = MockStreamlit()
# test presentation
def test_penn_logo_in_header():
penn_css = '<link rel="stylesheet" href="https://www1.pennmedicine.org/styles/shared/penn-medicine-header.css">'
display_header(st, PARAM)
assert len(
list(filter(lambda s: penn_css in s, st.render_store))
), "The Penn Medicine header should be printed"
def test_the_rest_of_header_shows_up():
random_part_of_header = "implying an effective $R_t$ of"
assert len(
list(filter(lambda s: random_part_of_header in s, st.render_store))
), "The whole header should render"
st.cleanup()
@pytest.mark.xfail()
def test_header_fail():
"""
Just proving to myself that these tests work
"""
some_garbage = "ajskhlaeHFPIQONOI8QH34TRNAOP8ESYAW4"
display_header(st, PARAM)
assert len(
list(filter(lambda s: some_garbage in s, st.render_store))
), "This should fail"
st.cleanup()
def test_defaults_repr():
"""
Test DEFAULTS.repr
"""
repr(DEFAULTS)
# Test the math
def test_sir():
"""
Someone who is good at testing, help
"""
sir_test = sir(100, 1, 0, 0.2, 0.5, 1)
assert sir_test == (
0.7920792079207921,
0.20297029702970298,
0.0049504950495049506,
), "This contrived example should work"
assert isinstance(sir_test, tuple)
for v in sir_test:
assert isinstance(v, float)
assert v >= 0
# Certain things should *not* work
with pytest.raises(TypeError) as error:
sir("S", 1, 0, 0.2, 0.5, 1)
assert str(error.value) == "can't multiply sequence by non-int of type 'float'"
with pytest.raises(TypeError) as error:
sir(100, "I", 0, 0.2, 0.5, 1)
assert str(error.value) == "can't multiply sequence by non-int of type 'float'"
with pytest.raises(TypeError) as error:
sir(100, 1, "R", 0.2, 0.5, 1)
assert str(error.value) == "unsupported operand type(s) for +: 'float' and 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, "beta", 0.5, 1)
assert str(error.value) == "bad operand type for unary -: 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, 0.2, "gamma", 1)
assert str(error.value) == "unsupported operand type(s) for -: 'float' and 'str'"
with pytest.raises(TypeError) as error:
sir(100, 1, 0, 0.2, 0.5, "N")
assert str(error.value) == "unsupported operand type(s) for /: 'str' and 'float'"
# Zeros across the board should fail
with pytest.raises(ZeroDivisionError):
sir(0, 0, 0, 0, 0, 0)
def test_sim_sir():
"""
Rounding to move fast past decimal place issues
"""
sim_sir_test = sim_sir(5, 6, 7, 0.1, 0.1, 40)
s, i, r = sim_sir_test
assert round(s[0], 0) == 5
assert round(i[0], 2) == 6
assert round(r[0], 0) == 7
assert round(s[-1], 2) == 0
assert round(i[-1], 2) == 0.18
assert round(r[-1], 2) == 17.82
assert isinstance(sim_sir_test, tuple)
for v in sim_sir_test:
assert isinstance(v, np.ndarray)
def test_new_admissions_chart():
projection_admits = pd.read_csv('tests/projection_admits.csv')
chart = new_admissions_chart(alt, projection_admits, PARAM)
assert isinstance(chart, alt.Chart)
assert chart.data.iloc[1].hosp < 1
assert round(chart.data.iloc[40].icu, 0) == 25
# test fx call with no params
with pytest.raises(TypeError):
new_admissions_chart()
empty_chart = new_admissions_chart(alt, pd.DataFrame(), PARAM)
assert empty_chart.data.empty
def test_admitted_patients_chart():
census_df = pd.read_csv('tests/census_df.csv')
chart = admitted_patients_chart(alt, census_df, PARAM)
assert isinstance(chart, alt.Chart)
assert chart.data.iloc[1].hosp == 1
assert chart.data.iloc[49].vent == 203
# test fx call with no params
with pytest.raises(TypeError):
admitted_patients_chart()
empty_chart = admitted_patients_chart(alt, pd.DataFrame(), PARAM)
assert empty_chart.data.empty
def test_parameters():
param = Parameters(
current_hospitalized=100,
doubling_time=6.0,
known_infected=5000,
market_share=0.05,
relative_contact_rate=0.15,
susceptible=500000,
hospitalized=RateLos(0.05, 7),
icu=RateLos(0.02, 9),
ventilated=RateLos(0.01, 10),
n_days=60
)
# test the Parameters
# hospitalized, icu, ventilated
assert param.rates == (0.05, 0.02, 0.01)
assert param.lengths_of_stay == (7, 9, 10)
assert param.infected == 40000.0
assert isinstance(param.infected, float) # based off note in models.py
# test the class-calculated attributes
assert param.detection_probability == 0.125
assert param.intrinsic_growth_rate == 0.12246204830937302
assert param.beta == 3.2961405355450555e-07
assert param.r_t == 2.307298374881539
assert param.r_naught == 2.7144686763312222
assert param.doubling_time_t == 7.764405988534983
# test the things n_days creates, which in turn tests sim_sir, sir, and get_dispositions
assert len(param.susceptible_v) == len(param.infected_v) == len(param.recovered_v) == param.n_days + 1 == 61
assert param.susceptible_v[0] == 500000.0
assert round(param.susceptible_v[-1], 0) == 67202
assert round(param.infected_v[1], 0) == 43735
assert round(param.recovered_v[30], 0) == 224048
assert [d[0] for d in param.dispositions] == [100.0, 40.0, 20.0]
assert [round(d[-1], 0) for d in param.dispositions] == [115.0, 46.0, 23.0]
# change n_days, make sure it cascades
param.n_days = 2
assert len(param.susceptible_v) == len(param.infected_v) == len(param.recovered_v) == param.n_days + 1 == 3
``` |
{
"source": "JhoneM/DynamicUrl",
"score": 3
} |
#### File: DynamicUrl/dynamic_url/url.py
```python
from .functions import Querys
class Url:
def get_url(self, headers, db):
"""Check the appoimentID from the url
Match with the database to validate if exist
Args:
headers ([type]): [Headers Objec]
Returns:
url [string]: [url validate]
"""
try:
url_origin = headers.get("Origin", False)
appointment_id = url_origin.split("-")[2]
query_functions = Querys(appointment_id)
query, job_config = query_functions.from_telehealth_room_url()
df = db.query(query, job_config=job_config).to_dataframe()
url = df.url[0] if not df.empty else ""
except IndexError:
url = ""
except AttributeError:
url = ""
return url
``` |
{
"source": "JhonEmmanuelTorres/cryptography-unal",
"score": 3
} |
#### File: homeworks/number 4/des.py
```python
from base64 import standard_b64encode, standard_b64decode
from os import system
from lib.pyDes import *
from sys import stdin
# required functions
def readFile(path):
with open(path, mode='rb') as file:
data = file.read()
return data
def writeFile(path, data):
with open(path, mode='wb') as file:
file.write(data)
def encrypt(message, key):
return des(key, CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5).encrypt(message)
def decrypt(cipherImage, key):
return des(key, CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5).decrypt(cipherImage, padmode=PAD_PKCS5)
# required input data
print "Enter the key (8 bytes):\t",
key = stdin.readline().strip()
print "Enter name of the image:\t",
inputImage = stdin.readline().strip()
print "Enter name output image:\t",
outputImage = stdin.readline().strip()
# validation
if len(key) != 8:
print "Key invalid"
exit()
# homework
# read bits
message = readFile(inputImage)
# cipher image
imageCipher = encrypt(message, key)
# encode in base 64
imageCipher64 = standard_b64encode(imageCipher)
# show representation
print "The message in base 64 is:\t" + imageCipher64
# decode in base 64
imageCipher = standard_b64decode(imageCipher64)
# write image
writeFile(outputImage, decrypt(imageCipher, key))
# open image
system("xdg-open " + outputImage)
``` |
{
"source": "JhonFrederick/django-attributesjsonfield",
"score": 2
} |
#### File: attributesjsonfield/forms/fields.py
```python
import django
from django.forms import MultiValueField, CharField
from attributesjsonfield.widgets import AttributesJSONWidget
class AttributesJSONField(MultiValueField):
""" """
widget = AttributesJSONWidget
def __init__(self, *args, attributes=None, require_all_fields=False, **kwargs):
self.attributes = attributes
self.clean_attributes = []
if self.attributes:
for attr in self.attributes:
is_dict = type(attr) == dict
field = attr["field"] if is_dict else attr
if is_dict:
label = attr.get("verbose_name", field)
required = attr.get("required", True)
else:
label = field
required = True
self.clean_attributes.append(
{
"field": field,
"label": label,
"name": field,
"choices": attr.get("choices") if is_dict else None,
"required": required,
"default": attr.get("default") if is_dict else None,
"data_type": attr.get("data_type") if is_dict else None,
}
)
else:
self.clean_attributes = None
fields = [
CharField(
label=attr["label"],
initial=attr.get("default"),
required=attr["required"],
)
for attr in self.clean_attributes
]
self.widget = AttributesJSONWidget(attributes_json=self.clean_attributes)
if django.VERSION >= (3, 1):
# MultiValueField does not receive as kwargs the encoder or decoder
kwargs.pop("encoder")
kwargs.pop("decoder")
super().__init__(fields=fields, require_all_fields=require_all_fields, **kwargs)
def compress(self, data_list):
if data_list:
data = {}
for i, attribute in enumerate(self.clean_attributes):
data[attribute["name"]] = data_list[i]
return data
return None
``` |
{
"source": "Jhong098/SignSense",
"score": 2
} |
#### File: Jhong098/SignSense/client.py
```python
import socket
from sys import argv
import cv2
import mediapipe as mp
import itertools
import numpy as np
import time
import sys
from multiprocessing import Queue, Process
from queue import Empty
import atexit
from math import ceil
from collections import deque
sys.path.insert(1, './tools')
import holistic, common, encrypt
PRINT_FREQ = 30
SERVER_ADDR = "172.16.31.10"
# SERVER_ADDR = "127.0.0.1"
# Server IP address and Port number
serverAddressPort = (SERVER_ADDR, 9999)
APP_NAME = "SignSense"
# send landmarks and receive predictions from server continuously
def server(landmark_queue, prediction_queue):
common.print_debug_banner("STARTED SERVER")
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPClientSocket.setblocking(0)
while True:
try:
landmark = landmark_queue.get()
encrypted_landmark = encrypt.encrypt_chacha(landmark)
# Send message to server using created UDP socket
UDPClientSocket.sendto(encrypted_landmark, serverAddressPort)
# Receive message from the server
msgFromServer = UDPClientSocket.recvfrom(2048)[0]
raw_data = encrypt.decrypt_chacha(msgFromServer)
prediction_queue.put(raw_data)
except encrypt.DecryptionError:
print(f"tried to decrypt {msgFromServer}")
except socket.error as e:
# print(f"SOCKET EXCEPTION: {e}")
pass
except Exception as e:
print(f"SERVER EXCEPTION: {e}")
pass
def video_loop(landmark_queue, prediction_queue, use_holistic=False):
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
cap.set(cv2.CAP_PROP_FOURCC, fourcc)
if not cap.isOpened():
print("Error opening Camera")
fps = cap.get(cv2.CAP_PROP_FPS)
print("Webcam FPS = {}".format(fps))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
mp_drawing = mp.solutions.drawing_utils
timestamp = None
started = False
predicted = None
initialized = False
delay = 0
pred_history = deque([" "]*5, 5)
pdecay = time.time()
print("starting image cap")
for image, results in holistic.process_capture(cap, use_holistic):
window_state = cv2.getWindowProperty(APP_NAME, 0)
if started and window_state == -1:
print("QUITTING")
break
started = True
newtime = time.time()
if timestamp is not None:
diff = newtime - timestamp
# Uncomment to print time between each frame
# print(diff)
timestamp = newtime
row = holistic.to_landmark_row(results, use_holistic)
landmark_str = ','.join(np.array(row).astype(np.str))
# send comma delimited str of flattened landmarks in bytes to server
try:
landmark_queue.put_nowait(landmark_str)
except Exception as e:
print(e)
try:
out = prediction_queue.get_nowait()
# toggle the server status flag on first message received
if out and not initialized:
initialized = True
common.print_debug_banner("SENDING ACK TO SERVER FOR CONNECTION")
# send a one-time ACK to toggle server connected status
landmark_queue.put_nowait("ACK")
if delay >= PRINT_FREQ:
if out and out != pred_history[-1] and out != "None":
pred_history.append(out)
pdecay = time.time()
delay = 0
except:
pass
delay += 1
if time.time() - pdecay > 7:
pred_history = deque([" "]*5, 5)
holistic.draw_landmarks(image, results, use_holistic, ' '.join(pred_history))
if initialized:
cv2.circle(image,(20,450), 10, (0,255,0), -1)
cv2.putText(image,'online',(40,458), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow(APP_NAME, image)
else:
cv2.circle(image,(20,450), 10, (0,0,255), -1)
cv2.putText(image,'connecting',(40,458), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow(APP_NAME, image)
cap.release()
cv2.destroyAllWindows()
# send termination message to server
landmark_queue.put("END")
if __name__ == "__main__":
# queue containing the returned predictions from the server
landmark_queue, prediction_queue = Queue(), Queue()
# start separate process for the webcam video GUI
server_p = Process(target=server, args=(landmark_queue, prediction_queue, ))
server_p.daemon = True
atexit.register(common.exit_handler, server_p)
server_p.start()
video_p = Process(target=video_loop, args=(landmark_queue, prediction_queue, ))
video_p.daemon = True
atexit.register(common.exit_handler, video_p)
video_p.start()
video_p.join()
```
#### File: Jhong098/SignSense/server.py
```python
import socket
from pathlib import Path
from sys import path, argv
path.insert(1, './tools')
import common, encrypt
from holistic import normalize_features
from multiprocessing import Queue, Process, Manager
import threading
from ctypes import c_char_p
from queue import Empty
import atexit
from math import ceil
import numpy as np
import time
DEBUG = True
LOG = False
ENCRYPT = True
GPU = True
# Create a tuple with IP Address and Port Number
SERVER_ADDR = ("0.0.0.0", common.SERVER_RECV_PORT)
BLACKLIST_ADDRS = [('192.168.1.68', 9999)] # local router heartbeat thing
# current working directory
CURRENT_WORKING_DIRECTORY = Path().absolute()
DEFAULT_MODEL = list((CURRENT_WORKING_DIRECTORY/'models').iterdir())[-1]
LABELS = common.get_labels('data/')
PRINT_FREQ = 30
PRED_FREQ = 5
MAX_QUEUE_LEN = 25
CONFIDENCE_THRESHOLD = 0.6
POLL_INTERVAL = 30
class MissingModelException(Exception):
pass
def array_to_class(out, addr, connected):
prediction = np.argmax(out)
# send confident prediction
if out[prediction] > CONFIDENCE_THRESHOLD:
print(f"{LABELS[prediction]} {out[prediction]*100} - {addr}")
tag = LABELS[prediction]
if not connected:
tag = "None" if tag is None else tag
return encrypt.encrypt_chacha(tag) if ENCRYPT else tag.encode()
# send back prediction if it is a valid class or if the client hasn't connected
if tag is not None:
ret_val = encrypt.encrypt_chacha(tag) if ENCRYPT else tag.encode()
return ret_val
else:
print("None ({} {}% Below threshold)".format(
LABELS[prediction], out[prediction]*100))
# TODO: store landmarks based on the client to handle multiple clients
class LandmarkReceiver(common.UDPRequestHandler):
def __init__(self, **kwargs):
super().__init__()
self.__dict__.update(kwargs)
self.CLIENT_TIMEOUT = 30 # time allowed between messages
self.client_to_process = {}
self.manager = Manager()
self.client_to_last_msg = self.manager.dict()
self.client_to_f_q = {}
self.client_to_p_q = {}
self.poll_connections()
self.cleaning_process = None
self.client_to_connected = {}
def periodic_task(interval, times = -1):
def outer_wrap(function):
def wrap(*args, **kwargs):
stop = threading.Event()
def inner_wrap():
i = 0
while i != times and not stop.isSet():
stop.wait(interval)
function(*args, **kwargs)
i += 1
t = threading.Timer(0, inner_wrap)
t.daemon = True
t.start()
return stop
return wrap
return outer_wrap
def cleanup_client(self, addr):
common.print_debug_banner(f"CLEANING UP CLIENT: {addr}")
self.cleaning_process = addr
del self.client_to_f_q[addr]
del self.client_to_p_q[addr]
del self.client_to_last_msg[addr]
del self.client_to_connected[addr]
process_to_del = self.client_to_process[addr]
process_to_del.terminate()
common.print_debug_banner("FINISHED TERMINATING")
# process_to_del.close()
# common.print_debug_banner("FINISHED CLOSING")
del self.client_to_process[addr]
common.print_debug_banner(f"FINISHED CLEANUP")
print(f"CURRENT PROCESS COUNT: {len(self.client_to_process.keys())}")
self.cleaning_process = None
@periodic_task(POLL_INTERVAL)
def poll_connections(self):
common.print_debug_banner(f"POLLING CONNECTIONS")
print(f"CURRENT PROCESS COUNT: {len(self.client_to_process.keys())}")
for client, last_msg_ts in self.client_to_last_msg.items():
if time.time() - last_msg_ts > self.CLIENT_TIMEOUT:
common.print_debug_banner(f"FOUND OVERTIME CLIENT: {client}")
self.cleanup_client(client)
def start_process(self, addr):
f_q = Queue(MAX_QUEUE_LEN)
p_q = Queue(MAX_QUEUE_LEN)
self.client_to_f_q[addr] = f_q
self.client_to_p_q[addr] = p_q
self.client_to_connected[addr] = False
self.client_to_last_msg[addr] = time.time()
predict = Process(
target=predict_loop,
args=(
model_path,
f_q,
p_q,
)
)
self.client_to_process[addr] = predict
atexit.register(common.exit_handler, predict)
predict.daemon = True
predict.start()
print(f"started new predict process for {addr}")
def datagram_received(self, data, addr):
if addr is None:
return
if addr in BLACKLIST_ADDRS:
common.print_debug_banner(f"BLOCKED {addr}")
return
# new client connected
if addr not in self.client_to_f_q and addr != self.cleaning_process:
self.start_process(addr)
return
self.client_to_last_msg[addr] = time.time()
# Receive and print the datagram received from client
try:
if ENCRYPT:
data = encrypt.decrypt_chacha(data)
# received termination signal from client
if len(data) < 4:
if data == "END":
common.print_debug_banner(f"RECEIVED 'END' FROM {addr}")
self.client_to_f_q[addr].put("END")
self.cleanup_client(addr)
elif data == "ACK":
common.print_debug_banner(f"RECEIVED 'ACK' FROM {addr}")
self.client_to_connected[addr] = True
return
landmark_arr = np.array([float(i.strip()) for i in data.split(",")])
normalized_data = normalize_features(landmark_arr)
self.client_to_f_q[addr].put_nowait(normalized_data)
pred = self.client_to_p_q[addr].get_nowait()
tag = array_to_class(pred, addr, self.client_to_connected[addr])
self.transport.sendto(tag, addr)
except encrypt.DecryptionError:
print(f"tried to decrypt {data}")
except Exception as e:
# print(e)
pass
def predict_loop(model_path, f_q, p_q):
# force predict to run on CPU
if not GPU:
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
import keras
from train import TIMESTEPS, init_gpu
if LOG:
import timeit
import logging
LOG_FILE_NAME = "logs/predict_log"
logging.basicConfig(
level=logging.DEBUG,
filemode="a+",
filename=LOG_FILE_NAME,
format="%(message)s"
)
if GPU:
logging.info(f"\n-----USING GPU------")
else:
logging.info(f"\n-----USING CPU------")
times = []
time_count = 0
TIME_FREQ = 60
def slide(w, new):
# Discard oldest frame and append new frame to data window
w[:-1] = w[1:]
w[-1] = new
return w
if GPU:
init_gpu()
model = keras.models.load_model(model_path)
delay = 0
window = None
results = None
results_len = ceil(PRINT_FREQ / PRED_FREQ)
if DEBUG:
common.print_debug_banner("STARTED PREDICTION")
while True:
row = f_q.get()
if len(row) == 3 and row == "END":
break
if window is None:
window = np.zeros((TIMESTEPS, len(row)))
window = slide(window, row)
if delay >= PRED_FREQ:
out = model(np.array([window]))
if results is None:
results = np.zeros((results_len, len(LABELS)))
results = slide(results, out)
pred = np.mean(results, axis=0)
p_q.put(pred)
delay = 0
delay += 1
common.print_debug_banner("ENDING PREDICT PROCESS")
def live_predict(model_path, use_holistic):
# launch UDP server to receive landmark features
common.start_server(
LandmarkReceiver(),
SERVER_ADDR
)
if __name__ == "__main__":
if len(argv) < 2:
model_path = CURRENT_WORKING_DIRECTORY/'models'/DEFAULT_MODEL
if not model_path.exists():
raise MissingModelException("NO MODEL CAN BE USED!")
else:
model_path = argv[1]
if DEBUG:
common.print_debug_banner(f"using model {model_path}")
live_predict(model_path, False)
```
#### File: SignSense/tools/encrypt.py
```python
from Crypto.Cipher import ChaCha20
from Crypto.Random import get_random_bytes
SECRET_KEY = '88ab02664ae3197ec531da8bd7ea0b5a'.encode()
class DecryptionError(Exception):
pass
# takes a string and encrypts into a bytearray
def encrypt_chacha(text):
nonce_rfc7539 = get_random_bytes(12)
cipher = ChaCha20.new(key=SECRET_KEY, nonce=nonce_rfc7539)
ciphertext = cipher.encrypt(text.encode())
nonce = cipher.nonce
ct = ciphertext
result = bytearray(nonce + ct)
return result
# takes bytearray and decrypts into string
def decrypt_chacha(encrypted_data):
try:
b64 = bytearray(encrypted_data)
nonce = b64[:12]
ciphertext = b64[12:]
cipher = ChaCha20.new(key=SECRET_KEY, nonce=nonce)
plaintext = cipher.decrypt(ciphertext).decode()
return plaintext
except:
raise DecryptionError
def test():
text = "TESTING"
encrypted = encrypt_chacha(text)
print(encrypted)
decrypted = decrypt_chacha(encrypted)
print(decrypted)
assert(text == decrypted)
# test()
```
#### File: SignSense/tools/extract_frames_from_videos.py
```python
import cv2
import os
from os.path import join, exists
from tqdm import tqdm
import numpy as np
hc = []
def convert(gesture_folder, target_folder):
rootPath = os.getcwd()
majorData = os.path.abspath(target_folder)
print(majorData)
if not exists(majorData):
os.makedirs(majorData)
gesture_folder = os.path.abspath(gesture_folder)
print(gesture_folder)
os.chdir(gesture_folder)
gestures = os.listdir(os.getcwd())
print("Source Directory containing gestures: %s" % (gesture_folder))
print("Destination Directory containing frames: %s\n" % (majorData))
for gesture in tqdm(gestures, unit='actions', ascii=True):
gesture_path = os.path.join(gesture_folder, gesture)
os.chdir(gesture_path)
gesture_frames_path = os.path.join(majorData, gesture)
if not os.path.exists(gesture_frames_path):
os.makedirs(gesture_frames_path)
videos = os.listdir(os.getcwd())
videos = [video for video in videos if(os.path.isfile(video))]
for video in tqdm(videos, unit='videos', ascii=True):
name = os.path.abspath(video)
cap = cv2.VideoCapture(name) # capturing input video
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
lastFrame = None
os.chdir(gesture_frames_path)
count = 0
# assumption only first 200 frames are important
while count < 201:
ret, frame = cap.read() # extract frame
if ret is False:
break
framename = os.path.splitext(video)[0]
framename = framename + "_frame_" + str(count) + ".jpeg"
hc.append(
[join(gesture_frames_path, framename), gesture, frameCount])
if not os.path.exists(framename):
lastFrame = frame
cv2.imwrite(framename, frame)
count += 1
os.chdir(gesture_path)
cap.release()
cv2.destroyAllWindows()
os.chdir(rootPath)
# convert("test_data/", "frames")
# print(hc)
```
#### File: SignSense/tools/live_predict.py
```python
from sys import argv
import cv2
import mediapipe as mp
import itertools
import numpy as np
import time
from collections import deque
from multiprocessing import Queue, Process
from queue import Empty
import atexit
from math import ceil
from pathlib import Path
import holistic
import common
USE_HOLISTIC = False
PRINT_FREQ = 30
PRED_FREQ = 5
assert PRINT_FREQ % PRED_FREQ == 0
LABELS = common.get_labels('data/')
def video_loop(feature_q, prediction_q, use_holistic):
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
cap.set(cv2.CAP_PROP_FOURCC, fourcc)
if not cap.isOpened():
print("Error opening Camera")
fps = cap.get(cv2.CAP_PROP_FPS)
print("Webcam FPS = {}".format(fps))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
mp_drawing = mp.solutions.drawing_utils
print("Awaiting start signal from predict")
prediction_q.get()
timestamp = None
delay = 0
tag = deque([" "]*5, 5)
pdecay = time.time()
print("starting image cap")
for image, results in holistic.process_capture(cap, use_holistic):
newtime = time.time()
if timestamp is not None:
diff = newtime - timestamp
# Uncomment to print time between each frame
# print(diff)
timestamp = newtime
raw_flat_row = holistic.to_landmark_row(results, use_holistic)
normalized_row = holistic.normalize_features(raw_flat_row)
feature_q.put(np.array(normalized_row))
try:
out = prediction_q.get_nowait()
prediction = np.argmax(out)
if delay >= PRINT_FREQ:
if out[prediction] > .6:
print("{} {}%".format(
LABELS[prediction], out[prediction]*100))
if LABELS[prediction] not in [tag[-1], None, "None"]:
tag.append(LABELS[prediction])
pdecay = time.time()
else:
print("None ({} {}% Below threshold)".format(
LABELS[prediction], out[prediction]*100))
delay = 0
if feature_q.qsize() > 5:
print(
"Warning: Model feature queue overloaded - size = {}".format(feature_q.qsize()))
print("--> ", end='')
for i, label in enumerate(out):
print("{}:{:.2f}% | ".format(LABELS[i], label*100), end='')
print("\n")
except Empty:
pass
delay += 1
if time.time() - pdecay > 7:
tag = deque([" "]*5, 5)
holistic.draw_landmarks(image, results, use_holistic, ' '.join(tag))
cv2.imshow("SignSense", image)
def predict_loop(feature_q, prediction_q):
import tensorflow as tf
import keras
import train
print("Starting prediction init")
train.init_gpu()
model = keras.models.load_model(model_path)
print("Sending ready to video loop")
prediction_q.put("start")
delay = 0
window = None
results = None
results_len = ceil(PRINT_FREQ / PRED_FREQ)
print("Starting prediction")
while True:
row = feature_q.get()
if window is None:
window = np.zeros((train.TIMESTEPS, len(row)))
# Discard oldest frame and append new frame to data window
window[:-1] = window[1:]
window[-1] = row
if delay >= PRED_FREQ:
out = model(np.array([window]))
if results is None:
results = np.zeros((results_len, len(LABELS)))
results[:-1] = results[1:]
results[-1] = out
prediction_q.put(np.mean(results, axis=0))
delay = 0
delay += 1
def live_predict(model_path, use_holistic):
f_q = Queue()
p_q = Queue()
p = Process(target=video_loop, args=(f_q, p_q, use_holistic,))
atexit.register(exit_handler, p)
p.start()
predict_loop(f_q, p_q)
def exit_handler(p):
try:
p.kill()
except:
print("Couldn't kill video_loop")
if __name__ == "__main__":
model_path = argv[1]
# Use MP Hands only
live_predict(model_path, USE_HOLISTIC)
``` |
{
"source": "jhong16/HLD-TT",
"score": 3
} |
#### File: src/tests/edit_verify_test.py
```python
from src.cli import TreeShell
import unittest
class TestVerificationEdits(unittest.TestCase):
def test_undo_delete_branch(self):
"""
Testing undo and redo interaction with delete_branch
"""
print("\n\nUndo Test branch delete=======================================================")
shell = TreeShell()
shell.reset()
shell.do_add_root_formula("or(a,b)")
shell.do_branch("1")
shell.do_go_to("2")
shell.do_add_formula("a")
shell.do_go_to("3")
shell.do_add_formula("b")
shell.do_mark_parent("3 1")
shell.do_go_to("1")
shell.do_delete_branch("")
self.assertEqual(len(shell.tree.formulas[1].children), 0)
self.assertEqual(len(shell.current_node.children), 0)
shell.do_undo("")
print(shell.tree.formulas)
self.assertEqual(len(shell.tree.formulas[1].children), 1)
self.assertEqual(len(shell.current_node.children), 2)
def test_checkmark_edits(self):
"""
Testing undo and redo interaction with checkmark system
"""
print("\n\nUndo Test checkmark edit1=======================================================")
shell = TreeShell()
shell.reset()
shell.do_add_root_formula("or(a,b)")
shell.do_branch("1")
shell.do_go_to("2")
shell.do_add_formula("a")
shell.do_mark_parent("2 1")
shell.do_go_to("3")
shell.do_add_formula("b")
shell.do_mark_parent("3 1")
shell.do_checkmark("1")
self.assertTrue(shell.tree.formulas[1].checkmarked)
#Using undo and redo on checkmark
shell.do_undo(None)
self.assertFalse(shell.tree.formulas[1].checkmarked)
shell.do_redo(None)
self.assertTrue(shell.tree.formulas[1].checkmarked)
shell.do_delete_formula("2")
self.assertFalse(shell.tree.formulas[1].checkmarked)
shell.do_undo(None)
self.assertTrue(shell.tree.formulas[1].checkmarked)
shell.do_redo(None)
self.assertFalse(shell.tree.formulas[1].checkmarked)
shell.do_undo(None)
self.assertTrue(shell.tree.formulas[1].checkmarked)
def test_checkmark_edits2(self):
"""
Testing delete_formula interaction with checkmark system
"""
print("\n\nUndo Test checkmark edits 2=======================================================")
shell = TreeShell()
shell.reset()
shell.do_add_root_formula("and(a,b)")
shell.do_add_formula("a")
shell.do_mark_parent("2 1")
shell.do_add_formula("b")
shell.do_mark_parent("3 1")
shell.do_checkmark("1")
self.assertTrue(shell.tree.formulas[1].checkmarked)
#Using undo and redo on checkmark
shell.do_undo(None)
self.assertFalse(shell.tree.formulas[1].checkmarked)
shell.do_redo(None)
self.assertTrue(shell.tree.formulas[1].checkmarked)
shell.do_delete_formula("1")
shell.do_undo(None)
shell.do_delete_formula("2")
self.assertFalse(shell.tree.formulas[1].checkmarked)
shell.do_undo(None)
self.assertTrue(shell.tree.formulas[1].checkmarked)
def test_BiCondition(self):
print("\n\nBiconditional Test=======================================================")
shell = TreeShell()
shell.reset()
shell.do_add_formula("iff(a,b)")
shell.do_branch("1")
shell.do_go_to("2")
shell.do_add_formula("a")
shell.do_add_formula("b")
shell.do_go_to("3")
shell.do_add_formula("not(a)")
shell.do_add_formula("not(b)")
shell.do_mark_parent("2 1")
shell.do_mark_parent("3 1")
shell.do_mark_parent("4 1")
shell.do_mark_parent("5 1")
shell.do_checkmark("1")
shell.do_add_root_formula("iff(a,b)")
shell.do_mark_parent("2 1")
shell.do_checkmark("1")
self.assertTrue(shell.tree.formulas[1].checkmark)
self.assertTrue(shell.tree.formulas[2].checkmark)
for i in range(1, len(shell.tree.formulas)):
self.assertTrue(shell.tree.formulas[i].valid)
shell.do_undo(None)
self.assertFalse(shell.tree.formulas[1].checkmarked)
shell.do_undo(None)
for i in range(2, len(shell.tree.formulas)):
self.assertFalse(shell.tree.formulas[i].valid, i)
shell.do_redo(None)
for i in range(2, len(shell.tree.formulas)):
self.assertTrue(shell.tree.formulas[i].valid, i)
shell.do_redo(None)
self.assertTrue(shell.tree.formulas[1].checkmarked)
shell.do_go_to("2")
shell.do_mark_open("")
self.assertTrue(shell.finish)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jhong93/aws-lambda-proxy",
"score": 2
} |
#### File: lambda/impl/short.py
```python
import boto3
import hashlib
import json
import os
from base64 import b64encode, b64decode
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from requests import post
from shared.crypto import REQUEST_META_NONCE, RESPONSE_META_NONCE, \
REQUEST_BODY_NONCE, RESPONSE_BODY_NONCE, \
decrypt_with_gcm, encrypt_with_gcm, PRIVATE_KEY_ENV_VAR
from shared.proxy import proxy_single_request, MAX_LAMBDA_BODY_SIZE
DEBUG = os.environ.get('VERBOSE', False)
S3_RESOURCE = boto3.resource('s3')
rsaPrivKey = os.environ.get(PRIVATE_KEY_ENV_VAR, None)
RSA_CIPHER = None
if rsaPrivKey is not None:
RSA_CIPHER = PKCS1_OAEP.new(RSA.importKey(rsaPrivKey.decode('hex')))
def decrypt_encrypted_metadata(event):
encryptedKey = b64decode(event['key'])
ciphertext = b64decode(event['meta64'])
tag = b64decode(event['metaTag'])
sessionKey = RSA_CIPHER.decrypt(encryptedKey)
cleartext = decrypt_with_gcm(sessionKey, ciphertext, tag,
REQUEST_META_NONCE)
return sessionKey, json.loads(cleartext)
def decrypt_encrypted_body(event, sessionKey, s3BucketName):
if 'body64' in event:
bodyData = b64decode(event['body64'])
if sessionKey is not None:
tag = b64decode(event['bodyTag'])
requestBody = decrypt_with_gcm(sessionKey, bodyData, tag,
REQUEST_BODY_NONCE)
else:
requestBody = bodyData
elif 's3Key' in event:
assert s3BucketName is not None
requestBody = get_request_body_from_s3(s3BucketName, event['s3Key'])
if sessionKey is not None:
tag = b64decode(event['s3Tag'])
requestBody = decrypt_with_gcm(sessionKey, requestBody, tag,
REQUEST_BODY_NONCE)
else:
requestBody = None
return requestBody
def get_request_body_from_s3(bucketName, key):
s3Object = S3_RESOURCE.Object(Bucket=bucketName, Key=key)
return s3Object.get()['Body'].read()
def put_response_body_in_s3(bucketName, data):
md5 = hashlib.md5()
md5.update(data)
key = md5.hexdigest()
s3Bucket = S3_RESOURCE.Bucket(bucketName)
s3Bucket.put_object(Key=key, Body=data,
StorageClass='REDUCED_REDUNDANCY')
return key
def post_message_to_server(messageServerHostAndPort, messageData):
md5 = hashlib.md5()
md5.update(messageData)
messageId = md5.hexdigest()
response = post('http://%s/%s' % (messageServerHostAndPort, messageId),
headers={
'Content-Length': str(len(messageData)),
'Content-Type': 'application/binary'},
data=messageData)
if response.status_code != 204:
raise IOError('Failed to post message to server: %s' %
messageServerHostAndPort)
return messageId
def encrypt_response_metadata(metadata, sessionKey):
ciphertext, tag = encrypt_with_gcm(sessionKey, json.dumps(metadata),
RESPONSE_META_NONCE)
return {'meta64': b64encode(ciphertext), 'metaTag': b64encode(tag)}
def prepare_response_content(content, sessionKey, s3BucketName,
messageServerHostAndPort):
ret = {}
if s3BucketName is not None and len(content) >= MAX_LAMBDA_BODY_SIZE:
if sessionKey is None:
s3Data = content
else:
s3Data, tag = encrypt_with_gcm(sessionKey, content,
RESPONSE_BODY_NONCE)
ret['s3Tag'] = b64encode(tag)
ret['s3Key'] = put_response_body_in_s3(s3BucketName, s3Data)
elif messageServerHostAndPort is not None \
and len(content) >= MAX_LAMBDA_BODY_SIZE:
if sessionKey is None:
messageData = content
else:
messageData, tag = encrypt_with_gcm(sessionKey, content,
RESPONSE_BODY_NONCE)
ret['messageTag'] = b64encode(tag)
ret['messageId'] = post_message_to_server(messageServerHostAndPort,
messageData)
else:
if sessionKey is not None:
data, tag = encrypt_with_gcm(sessionKey,
content,
RESPONSE_BODY_NONCE)
ret['contentTag'] = b64encode(tag)
ret['content64'] = b64encode(data)
else:
ret['content64'] = b64encode(content)
return ret
def short_lived_handler(event, context):
"""Handle a single request and return it immediately"""
if 'key' in event:
sessionKey, requestMeta = decrypt_encrypted_metadata(event)
else:
sessionKey, requestMeta = None, event
# Unpack request metadata
method = requestMeta['method']
url = requestMeta['url']
requestHeaders = requestMeta['headers']
s3BucketName = requestMeta.get('s3Bucket', None)
messageServerHostAndPort = requestMeta.get('messageServer', None)
# Unpack request body
requestBody = decrypt_encrypted_body(event, sessionKey, s3BucketName)
response = proxy_single_request(method, url, requestHeaders,
requestBody, gzipResult=True)
ret = {
'statusCode': response.statusCode,
'headers': response.headers
}
if sessionKey is not None:
ret = encrypt_response_metadata(ret, sessionKey)
if response.content:
ret.update(prepare_response_content(response.content, sessionKey,
s3BucketName,
messageServerHostAndPort))
return ret
```
#### File: lib/proxies/aws_short.py
```python
import boto3
import hashlib
import json
import logging
from base64 import b64encode, b64decode
from random import SystemRandom
from threading import Semaphore
from concurrent.futures import ThreadPoolExecutor
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Random import get_random_bytes
from lib.proxy import AbstractRequestProxy, ProxyResponse
from lib.stats import LambdaStatsModel, S3StatsModel
from shared.crypto import REQUEST_META_NONCE, RESPONSE_META_NONCE, \
REQUEST_BODY_NONCE, RESPONSE_BODY_NONCE, \
decrypt_with_gcm, encrypt_with_gcm
from shared.proxy import MAX_LAMBDA_BODY_SIZE
logger = logging.getLogger(__name__)
random = SystemRandom()
SESSION_KEY_LENGTH = 16
def _get_region_from_arn(arn):
elements = arn.split(':')
return elements[3]
class ShortLivedLambdaProxy(AbstractRequestProxy):
"""Invoke a lambda for each request"""
def __init__(self, functions, maxParallelRequests, s3Bucket,
pubKeyFile, messageServer, stats):
assert not (messageServer is not None and s3Bucket is not None)
self.__functions = functions
self.__functionToClient = {}
self.__regionToClient = {}
self.__lambdaRateSemaphore = Semaphore(maxParallelRequests)
self.__lambda = boto3.client('lambda')
if 'lambda' not in stats.models:
stats.register_model('lambda', LambdaStatsModel())
self.__lambdaStats = stats.get_model('lambda')
# Use local message server to receive large payloads
self.__enableMessageServer = messageServer is not None
self.__messageServer = messageServer
# Use s3 to send and receive large payloads
self.__enableS3 = False
if s3Bucket is not None:
self.__s3Bucket = s3Bucket
if 's3' not in stats.models:
stats.register_model('s3', S3StatsModel())
self.__s3Stats = stats.get_model('s3')
self.__s3 = boto3.client('s3')
self.__s3DeletePool = ThreadPoolExecutor(1)
self.__enableS3 = True
# Enable encryption
self.__enableEncryption = False
if pubKeyFile is not None:
with open(pubKeyFile, 'rb') as ifs:
self.__rsaCipher = PKCS1_OAEP.new(RSA.importKey(ifs.read()))
self.__enableEncryption = True
def __get_lambda_client(self, function):
"""Get a lambda client from the right region"""
client = self.__functionToClient.get(function)
if client is not None:
return client
if 'arn:' not in function:
# using function name in the default region
client = self.__lambda
self.__functionToClient[function] = client
else:
region = _get_region_from_arn(function)
client = self.__regionToClient.get(region)
if client is None:
client = boto3.client('lambda', region_name=region)
self.__regionToClient[region] = client
self.__functionToClient[function] = client
return client
def __delete_object_from_s3(self, key):
assert self.__enableS3 is True
self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key)
def __load_object_from_s3(self, key):
assert self.__enableS3 is True
result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key)
ret = result['Body'].read()
self.__s3DeletePool.submit(self.__delete_object_from_s3, key)
self.__s3Stats.record_get(len(ret))
return ret
def __put_object_into_s3(self, data):
assert self.__enableS3 is True
md5 = hashlib.md5()
md5.update(data)
key = md5.hexdigest()
s3Bucket = boto3.resource('s3').Bucket(self.__s3Bucket)
s3Bucket.put_object(Key=key, Body=data,
StorageClass='REDUCED_REDUNDANCY')
self.__s3Stats.record_get(len(data))
return key
def __prepare_request_body(self, body, sessionKey):
bodyArgs = {}
if len(body) <= MAX_LAMBDA_BODY_SIZE:
if self.__enableEncryption:
bodyData, bodyTag = encrypt_with_gcm(sessionKey, body,
REQUEST_BODY_NONCE)
bodyArgs['bodyTag'] = b64encode(bodyTag)
bodyArgs['body64'] = b64encode(bodyData)
else:
bodyArgs['body64'] = b64encode(body)
elif self.__enableS3:
if self.__enableEncryption:
assert sessionKey is not None
s3Data, s3Tag = encrypt_with_gcm(sessionKey, body,
REQUEST_BODY_NONCE)
bodyArgs['s3Tag'] = b64encode(s3Tag)
else:
s3Data = body
requestS3Key = self.__put_object_into_s3(s3Data)
bodyArgs['s3Key'] = requestS3Key
return bodyArgs
def __prepare_encrypted_metadata(self, metaArgs, sessionKey):
assert sessionKey is not None
ciphertext, tag = encrypt_with_gcm(sessionKey,
json.dumps(metaArgs),
REQUEST_META_NONCE)
key = self.__rsaCipher.encrypt(sessionKey)
return {
'meta64': b64encode(ciphertext),
'metaTag': b64encode(tag),
'key': b64encode(key)
}
def __handle_encrypted_metadata(self, response, sessionKey):
assert sessionKey is not None
ciphertext = b64decode(response['meta64'])
tag = b64decode(response['metaTag'])
plaintext = decrypt_with_gcm(sessionKey, ciphertext, tag,
RESPONSE_META_NONCE)
return json.loads(plaintext)
def __handle_response_body(self, response, sessionKey):
content = b''
if 'content64' in response:
content = b64decode(response['content64'])
if self.__enableEncryption:
assert sessionKey is not None
tag = b64decode(response['contentTag'])
content = decrypt_with_gcm(sessionKey, content, tag,
RESPONSE_BODY_NONCE)
elif 's3Key' in response:
content = self.__load_object_from_s3(response['s3Key'])
if self.__enableEncryption:
assert sessionKey is not None
tag = b64decode(response['s3Tag'])
content = decrypt_with_gcm(sessionKey, content, tag,
RESPONSE_BODY_NONCE)
elif 'messageId' in response:
content = self.__messageServer.get_message(response['messageId']).content
if self.__enableEncryption:
assert sessionKey is not None
tag = b64decode(response['messageTag'])
content = decrypt_with_gcm(sessionKey, content, tag,
RESPONSE_BODY_NONCE)
return content
def request(self, method, url, headers, body):
logger.debug('Proxying %s %s with Lamdba', method, url)
sessionKey = None
if self.__enableEncryption:
sessionKey = get_random_bytes(SESSION_KEY_LENGTH)
requestS3Key = None
try:
invokeArgs = {
'method': method,
'url': url,
'headers': headers,
}
if self.__enableS3:
invokeArgs['s3Bucket'] = self.__s3Bucket
if self.__enableMessageServer:
invokeArgs['messageServer'] = self.__messageServer.publicHostAndPort
if self.__enableEncryption:
invokeArgs = self.__prepare_encrypted_metadata(invokeArgs,
sessionKey)
if body is not None:
invokeArgs.update(self.__prepare_request_body(body, sessionKey))
function = random.choice(self.__functions)
lambdaClient = self.__get_lambda_client(function)
self.__lambdaRateSemaphore.acquire()
try:
with self.__lambdaStats.record() as billingObject:
invokeResponse = lambdaClient.invoke(
FunctionName=function,
Payload=json.dumps(invokeArgs),
LogType='Tail')
billingObject.parse_log(invokeResponse['LogResult'])
finally:
self.__lambdaRateSemaphore.release()
finally:
if requestS3Key is not None:
self.__s3DeletePool.submit(self.__delete_object_from_s3,
requestS3Key)
if invokeResponse['StatusCode'] != 200:
logger.error('%s: status=%d', invokeResponse['FunctionError'],
invokeResponse['StatusCode'])
return ProxyResponse(statusCode=500, headers={}, content='')
if 'FunctionError' in invokeResponse:
logger.error('%s error: %s', invokeResponse['FunctionError'],
invokeResponse['Payload'].read())
return ProxyResponse(statusCode=500, headers={}, content='')
response = json.loads(invokeResponse['Payload'].read())
if self.__enableEncryption:
responseMeta = (self.__handle_encrypted_metadata(response,
sessionKey))
statusCode = responseMeta['statusCode']
headers = responseMeta['headers']
else:
statusCode = response['statusCode']
headers = response['headers']
content = self.__handle_response_body(response, sessionKey)
return ProxyResponse(statusCode=statusCode, headers=headers,
content=content)
```
#### File: lib/proxies/aws_stream.py
```python
import boto3
import json
import logging
from random import SystemRandom
from threading import Semaphore
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from lib.proxy import AbstractStreamProxy
from lib.stats import LambdaStatsModel
logger = logging.getLogger(__name__)
random = SystemRandom()
def _get_region_from_arn(arn):
elements = arn.split(':')
return elements[3]
class StreamLambdaProxy(AbstractStreamProxy):
"""Invoke a lambda for each connection"""
class Connection(AbstractStreamProxy.Connection):
def __init__(self, host, port):
self.host = host
self.port = port
def close(self):
pass
def __str__(self):
return self.host + ':' + self.port
def __init__(self, functions, maxParallelRequests,
pubKeyFile, streamServer, stats, maxIdleTimeout=1):
self.__connIdleTimeout = maxIdleTimeout
self.__functions = functions
self.__functionToClient = {}
self.__regionToClient = {}
self.__lambdaRateSemaphore = Semaphore(maxParallelRequests)
self.__lambda = boto3.client('lambda')
if 'lambda' not in stats.models:
stats.register_model('lambda', LambdaStatsModel())
self.__lambdaStats = stats.get_model('lambda')
self.__streamServer = streamServer
# Enable encryption
self.__enableEncryption = False
if pubKeyFile is not None:
with open(pubKeyFile, 'rb') as ifs:
self.__rsaCipher = PKCS1_OAEP.new(RSA.importKey(ifs.read()))
self.__enableEncryption = True
def __get_lambda_client(self, function):
"""Get a lambda client from the right region"""
client = self.__functionToClient.get(function)
if client is not None:
return client
if 'arn:' not in function:
# using function name in the default region
client = self.__lambda
self.__functionToClient[function] = client
else:
region = _get_region_from_arn(function)
client = self.__regionToClient.get(region)
if client is None:
client = boto3.client('lambda', region_name=region)
self.__regionToClient[region] = client
self.__functionToClient[function] = client
return client
def connect(self, host, port):
return StreamLambdaProxy.Connection(host, port)
def stream(self, cliSock, servInfo):
assert isinstance(servInfo, StreamLambdaProxy.Connection)
socketId = '%016x' % random.getrandbits(128)
invokeArgs = {
'stream': True,
'socketId': socketId,
'streamServer': self.__streamServer.publicHostAndPort,
'host': servInfo.host,
'port': int(servInfo.port),
'idleTimeout': self.__connIdleTimeout
}
function = random.choice(self.__functions)
lambdaClient = self.__get_lambda_client(function)
self.__lambdaRateSemaphore.acquire()
try:
self.__streamServer.take_ownership_of_socket(socketId, cliSock,
self.__connIdleTimeout)
with self.__lambdaStats.record() as billingObject:
invokeResponse = lambdaClient.invoke(
FunctionName=function,
Payload=json.dumps(invokeArgs),
LogType='Tail')
billingObject.parse_log(invokeResponse['LogResult'])
finally:
self.__lambdaRateSemaphore.release()
if invokeResponse['StatusCode'] != 200:
logger.error('%s: status=%d', invokeResponse['FunctionError'],
invokeResponse['StatusCode'])
if 'FunctionError' in invokeResponse:
logger.error('%s error: %s', invokeResponse['FunctionError'],
invokeResponse['Payload'].read())
```
#### File: aws-lambda-proxy/lib/workers.py
```python
import atexit
import json
import logging
import random
import time
from abc import abstractproperty
from concurrent.futures import ThreadPoolExecutor
from threading import Condition, Event, Lock, Thread
from lib.stats import Stats, LambdaStatsModel, SqsStatsModel
from shared.workers import LambdaSqsResult, LambdaSqsTask
# Re-expose these classes
LambdaSqsResult = LambdaSqsResult
LambdaSqsTask = LambdaSqsTask
logger = logging.getLogger(__name__)
try:
import boto3
except ImportError as e:
logger.error('Failed to import boto3')
boto3 = None
MAX_SQS_REQUEST_MESSAGES = 10
DEFAULT_POLLING_THREADS = 4
DEFAULT_HANDLER_THREADS = 4
class Future(object):
def __init__(self):
self.__done = Event()
self.__result = None
self.__aborted = False
self._partial = {}
def get(self, timeout=None):
self.__done.wait(timeout)
if self.__result is None:
self.__aborted = True
return self.__result
def set(self, result):
self.__result = result
self.__done.set()
@property
def isAborted(self):
return self.__aborted
class LambdaSqsTaskConfig(object):
@abstractproperty
def queue_prefix(self):
"""Prefix of the temporary SQS queues"""
pass
@abstractproperty
def lambda_function(self):
"""Name of lambda function to call"""
pass
@abstractproperty
def max_workers(self):
pass
@abstractproperty
def load_factor(self):
"""Target ratio of pending tasks to workers"""
pass
@property
def worker_wait_time(self):
"""Number of seconds each worker will wait for work"""
return 1
@property
def message_retention_period(self):
"""
Number of seconds each message will persist before
timing our
"""
return 60
def pre_invoke_callback(self, workerId, workerArgs):
"""Add any extra args to workerArgs"""
pass
def post_return_callback(self, workerId, workerResponse):
"""
Called on worker exit. WorkerResponse is None if there was
an error
"""
pass
class WorkerManager(object):
def __init__(self, taskConfig, stats=None):
self.__config = taskConfig
if stats is None:
stats = Stats()
self.__stats = stats
if 'lambda' not in stats.models:
stats.register_model('lambda', LambdaStatsModel())
self.__lambdaStats = stats.get_model('lambda')
if 'sqs' not in stats.models:
stats.register_model('sqs', SqsStatsModel())
self.__sqsStats = stats.get_model('sqs')
self.__lambda = boto3.client('lambda')
self.__numWorkers = 0
self.__numWorkersLock = Lock()
# RequestId -> Future
self.__numTasksInProgress = 0
self.__tasksInProgress = {}
self.__tasksInProgressLock = Lock()
self.__tasksInProgressCondition = Condition(self.__tasksInProgressLock)
self.__init_message_queues()
# Start result fetcher thread
self.__result_handler_pool = ThreadPoolExecutor(DEFAULT_POLLING_THREADS)
for i in xrange(DEFAULT_POLLING_THREADS):
rt = Thread(target=self.__result_daemon)
rt.daemon = True
rt.start()
def __init_message_queues(self):
"""Setup the message queues"""
sqs = boto3.resource('sqs')
currentTime = time.time()
taskQueueAttributes = {
'MessageRetentionPeriod': str(self.__config.message_retention_period),
'ReceiveMessageWaitTimeSeconds': str(self.__config.worker_wait_time),
}
taskQueueName = '%s_task_%d' % (self.__config.queue_prefix, currentTime)
self.__taskQueueName = taskQueueName
taskQueue = sqs.create_queue(
QueueName=taskQueueName,
Attributes=taskQueueAttributes)
self.__taskQueue = taskQueue
atexit.register(lambda: taskQueue.delete())
logger.info('Created task queue: %s', taskQueueName)
resultQueueAttributes = {
'MessageRetentionPeriod':
str(self.__config.message_retention_period),
'ReceiveMessageWaitTimeSeconds': str(20),
}
resultQueueName = '%s_result_%d' % (self.__config.queue_prefix,
currentTime)
self.__resultQueueName = resultQueueName
resultQueue = sqs.create_queue(
QueueName=resultQueueName,
Attributes=resultQueueAttributes)
atexit.register(lambda: resultQueue.delete())
logger.info('Created result queue: %s', resultQueueName)
def execute(self, task, timeout=None):
"""Enqueue a message in the task queue"""
assert isinstance(task, LambdaSqsTask)
with self.__numWorkersLock:
if self.__should_spawn_worker():
self.__spawn_new_worker()
kwargs = {}
if task.messageAttributes:
kwargs['MessageAttributes'] = task.messageAttributes
messageStatus = self.__taskQueue.send_message(
MessageBody=task.body, **kwargs)
# Use the MessageId as taskId
taskId = messageStatus['MessageId']
taskFuture = Future()
with self.__tasksInProgressLock:
self.__tasksInProgress[taskId] = taskFuture
self.__numTasksInProgress = len(self.__tasksInProgress)
self.__tasksInProgressCondition.notify()
# Do this before sleeping
self.__sqsStats.record_send(
SqsStatsModel.estimate_message_size(
messageAttributes=task.messageAttributes,
messageBody=task.body))
result = taskFuture.get(timeout=timeout)
with self.__tasksInProgressLock:
del self.__tasksInProgress[taskId]
self.__numTasksInProgress = len(self.__tasksInProgress)
return result
def __should_spawn_worker(self):
if self.__config.max_workers == 0:
return False
return (self.__numWorkers == 0 or
(self.__numWorkers < self.__config.max_workers and
self.__numTasksInProgress >
self.__numWorkers * self.__config.load_factor))
def __spawn_new_worker(self):
workerId = random.getrandbits(32)
logger.info('Starting new worker: %d', workerId)
workerArgs = {
'workerId': workerId,
'taskQueue': self.__taskQueueName,
'resultQueue': self.__resultQueueName,
}
functionName = self.__config.lambda_function
self.__config.pre_invoke_callback(workerId, workerArgs)
t = Thread(target=self.__wait_for_worker,
args=(functionName, workerId, workerArgs))
t.daemon = True
t.start()
self.__numWorkers += 1
assert self.__numWorkers <= self.__config.max_workers,\
'Max worker limit exceeded'
def __wait_for_worker(self, functionName, workerId, workerArgs):
"""Wait for the worker to exit and the lambda to return"""
try:
with self.__lambdaStats.record() as billingObject:
response = self.__lambda.invoke(
FunctionName=functionName,
Payload=json.dumps(workerArgs),
LogType='Tail')
billingObject.parse_log(response['LogResult'])
if response['StatusCode'] != 200 or 'FunctionError' in response:
logger.error('Worker %d exited unexpectedly: %s: status=%d',
workerId,
response['FunctionError'],
response['StatusCode'])
logger.error(response['Payload'].read())
self.__config.post_return_callback(workerId, None)
else:
workerResponse = json.loads(response['Payload'].read())
self.__config.post_return_callback(workerId, workerResponse)
finally:
with self.__numWorkersLock:
self.__numWorkers -= 1
assert self.__numWorkers >= 0, 'Workers cannot be negative'
def __handle_single_result_message(self, message):
# TODO: Fix me. Assume maximally sized messages for now
try:
result = LambdaSqsResult.from_message(message)
taskId = result.taskId
with self.__tasksInProgressLock:
taskFuture = self.__tasksInProgress.get(taskId)
if taskFuture is None:
logger.info('No future for task: %s', taskId)
return
# Handle fragmented
if result.isFragmented:
taskFuture._partial[result.fragmentId] = result
logger.info('Setting result: %s', taskId)
if len(taskFuture._partial) == result.numFragments:
taskFuture.set([
taskFuture._partial[i]
for i in xrange(result.numFragments)
])
else:
logger.info('Setting result: %s', taskId)
taskFuture.set(result)
except Exception as e:
logger.error('Failed to parse message: %s', message)
logger.exception(e)
finally:
self.__sqsStats.record_receive(
SqsStatsModel.estimate_message_size(message=message))
def __result_daemon(self):
"""Poll SQS result queue and set futures"""
requiredAttributes = ['All']
sqs = boto3.resource('sqs')
resultQueue = sqs.get_queue_by_name(QueueName=self.__resultQueueName)
while True:
# Don't poll SQS unless there is a task in progress
with self.__tasksInProgressLock:
if self.__numTasksInProgress == 0:
self.__tasksInProgressCondition.wait()
# Poll for new messages
logger.info('Polling for new results')
messages = None
try:
self.__sqsStats.record_poll()
messages = resultQueue.receive_messages(
MessageAttributeNames=requiredAttributes,
MaxNumberOfMessages=MAX_SQS_REQUEST_MESSAGES)
logger.info('Received %d messages', len(messages))
self.__result_handler_pool.map(
self.__handle_single_result_message, messages)
except Exception as e:
logger.error('Error polling SQS')
logger.exception(e)
finally:
if messages is not None and len(messages) > 0:
try:
result = resultQueue.delete_messages(
Entries=[{
'Id': message.message_id,
'ReceiptHandle': message.receipt_handle
} for message in messages]
)
if len(result['Successful']) != len(messages):
raise Exception('Failed to delete all messages: %s'
% result['Failed'])
except Exception as e:
logger.exception(e)
```
#### File: jhong93/aws-lambda-proxy/tests.py
```python
import json
import unittest
import os
import random
import sys
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from lib.stats import Stats, ProxyStatsModel
import shared.crypto as crypto
import shared.proxy as proxy
from main import DEFAULT_MAX_LAMBDAS, DEFAULT_PORT, build_local_proxy, \
build_lambda_proxy, build_handler
from gen_rsa_kp import generate_key_pair
def silence_stdout(func):
def decorator(*args, **kwargs):
try:
with open(os.devnull, 'wb') as devnull:
sys.stdout = devnull
func(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return decorator
class TestCrypto(unittest.TestCase):
def test_gcm_encypt_decrypt(self):
key = 'a' * 16
cleartext = 'Hello'
nonce = 'my-nonce'
ciphertext, tag = crypto.encrypt_with_gcm(key, cleartext, nonce)
decrypted = crypto.decrypt_with_gcm(key, ciphertext, tag, nonce)
self.assertEqual(cleartext, decrypted)
def _start_test_server(port, numRequests):
class Handler(BaseHTTPRequestHandler):
def log_message(self, format, *args): pass
def __respond(self, statusCode):
for header in self.headers:
if header == 'A': assert self.headers['A'] == '1'
self.send_response(statusCode)
self.send_header('B', '2')
self.end_headers()
self.wfile.write(TestProxy.EXPECTED_RESPONSE_BODY)
def do_GET(self): self.__respond(200)
def do_POST(self):
body = self.rfile.read(int(self.headers['Content-Length']))
assert body == TestProxy.EXPECTED_POST_BODY
self.__respond(201)
server = HTTPServer(('localhost', port), Handler)
def run_server():
for _ in xrange(numRequests):
server.handle_request()
t = Thread(target=run_server)
t.daemon = True
t.start()
class TestProxy(unittest.TestCase):
EXPECTED_REQUEST_HEADERS = {'A': '1'}
EXPECTED_RESPONSE_HEADERS = {'B': '2'}
EXPECTED_POST_BODY = json.dumps({'request': 'Ping'})
EXPECTED_RESPONSE_BODY = json.dumps({'response': 'pong'})
def test_proxy_real_request(self):
response = proxy.proxy_single_request('GET', 'http://google.com',
{'Connection': 'close'}, None)
self.assertEqual(response.statusCode, 301,
'Response from Google should be redirect')
def test_proxy_local_request(self):
port = random.randint(9000, 10000)
url = 'http://localhost:%d/' % port
_start_test_server(port, 3)
response = proxy.proxy_single_request(
'GET', url, TestProxy.EXPECTED_REQUEST_HEADERS, b'')
self.assertEqual(response.statusCode, 200)
self.assertDictContainsSubset(TestProxy.EXPECTED_RESPONSE_HEADERS,
response.headers)
self.assertEqual(response.content,
TestProxy.EXPECTED_RESPONSE_BODY)
response = proxy.proxy_single_request(
'GET', url, TestProxy.EXPECTED_REQUEST_HEADERS, None)
self.assertEqual(response.statusCode, 200)
self.assertDictContainsSubset(TestProxy.EXPECTED_RESPONSE_HEADERS,
response.headers)
self.assertEqual(response.content,
TestProxy.EXPECTED_RESPONSE_BODY)
response = proxy.proxy_single_request(
'POST', url, {
'Foo': 'Bar',
'Content-Length': str(len(TestProxy.EXPECTED_POST_BODY))
},
TestProxy.EXPECTED_POST_BODY)
self.assertEqual(response.statusCode, 201)
self.assertDictContainsSubset(TestProxy.EXPECTED_RESPONSE_HEADERS,
response.headers)
self.assertEqual(response.content,
TestProxy.EXPECTED_RESPONSE_BODY)
class TestRsaKeygen(unittest.TestCase):
@silence_stdout
def test_keygen(self):
generate_key_pair(os.devnull, os.devnull)
class TestBuildProxy(unittest.TestCase):
"""Tries to build the proxies, but not actually run the server."""
@staticmethod
def _get_default_setup():
stats = Stats()
stats.register_model('proxy', ProxyStatsModel())
class MockArgs(object):
pass
args = MockArgs()
args.port = DEFAULT_PORT
args.host = 'localhost'
args.functions = []
args.enableEncryption = False
args.lambdaType = 'short'
args.s3Bucket = None
args.publicServerHostAndPort = None
args.maxLambdas = DEFAULT_MAX_LAMBDAS
args.enableMitm = False
args.disableStats = False
args.verbose = False
return args, stats, None
@silence_stdout
def test_build_local_no_mitm(self):
args, stats, _ = TestBuildProxy._get_default_setup()
args.local = True
args.enableMitm = False
proxy = build_local_proxy(args, stats)
build_handler(proxy, stats, verbose=True)
@silence_stdout
def test_build_local_with_mitm(self):
args, stats, _ = TestBuildProxy._get_default_setup()
args.local = True
args.enableMitm = True
proxy = build_local_proxy(args, stats)
build_handler(proxy, stats, verbose=True)
@silence_stdout
def test_build_lambda_with_mitm(self):
args, stats, reverseServer = TestBuildProxy._get_default_setup()
args.enableMitm = True
args.functions = ['proxy']
args.s3Bucket = 'mock-bucket'
args.enableEncryption = True
proxy = build_lambda_proxy(args, stats, reverseServer)
build_handler(proxy, stats, verbose=True)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jhong93/caption-index",
"score": 4
} |
#### File: caption-index/captions/query.py
```python
import heapq
from abc import ABC, abstractmethod, abstractproperty
from collections import deque
from typing import Dict, List, Iterable, NamedTuple, Optional
from parsimonious.grammar import Grammar, NodeVisitor
from .index import Lexicon, CaptionIndex
from .tokenize import default_tokenizer
from .util import PostingUtil, group_results_by_document
GRAMMAR = Grammar(r"""
expr_root = sp? expr_group sp?
expr_group = and / or / not / expr
expr = expr_paren / tokens_root
expr_paren = sp? "(" sp? expr_group sp? ")" sp?
and = expr more_and threshold?
more_and = (sp? "&" sp? expr)+
or = expr more_or
more_or = (sp? "|" sp? expr)+
not = expr more_not threshold?
more_not = (sp? "\\" sp? expr)+
threshold = sp? threshold_type sp? integer
threshold_type = "::" / "//"
integer = ~r"\d+"
tokens_root = tokens_list more_tokens_root
more_tokens_root = (sp tokens_list)*
tokens_list = tokens / tokens_exp
tokens_exp = "[" sp? tokens sp? "]"
tokens = token more_tokens
more_tokens = (sp token)*
token = ~r"[^\s()&|\\\[\]:/]+"
sp = ~r"\s+"
""")
class _Expr(ABC):
class Context(NamedTuple):
lexicon: Lexicon
index: CaptionIndex
documents: Optional[Iterable[CaptionIndex.DocIdOrDocument]]
ignore_word_not_found: bool
case_insensitive: bool
@abstractmethod
def eval(self, context: '_Expr.Context') -> Iterable[CaptionIndex.Document]:
raise NotImplementedError()
@abstractmethod
def estimate_cost(self, lexicon: Lexicon) -> float:
raise NotImplementedError()
@abstractproperty
def _pprint_data(self):
raise NotImplementedError()
def __repr__(self):
return repr(self._pprint_data)
class _JoinExpr(_Expr):
def __init__(self, children, threshold, threshold_type):
assert all(isinstance(c, _Expr) for c in children)
self.children = children
self.threshold = threshold
self.threshold_type = threshold_type
def estimate_cost(self, lexicon):
return sum(c.estimate_cost(lexicon) for c in self.children)
class _Phrase(_Expr):
class Token(NamedTuple):
text: str
expand: bool
def __init__(self, tokens):
assert all(isinstance(t, _Phrase.Token) for t in tokens)
self.tokens = tokens
@property
def _pprint_data(self):
return {
'1. op': 'Phrase',
'2. tokens': ' '.join([
'[{}]'.format(t.text) if t.expand else t.text
for t in self.tokens])
}
def eval(self, context):
kwargs = {}
if context.documents is not None:
kwargs['documents'] = context.documents
ngram_tokens = []
for t in self.tokens:
if t.expand:
tokens = [context.lexicon[x] for x in
context.lexicon.similar(t.text)]
if len(tokens) == 0:
return
ngram_tokens.append(tokens)
else:
try:
tokens = [context.lexicon[t.text]]
if context.case_insensitive:
matches = context.lexicon.case_insensitive(tokens[0])
if matches is not None:
# Some other words exist
assert len(matches) > 0
tokens = [context.lexicon[x] for x in matches]
except Lexicon.WordDoesNotExist:
if context.ignore_word_not_found:
return
else:
raise
ngram_tokens.append(tokens)
for d in context.index.ngram_search(*ngram_tokens, **kwargs):
yield d
def estimate_cost(self, lexicon):
# The cost to search is the frequency of the least frequent token
# in the ngram since this is the number of locations that need to
# be checked.
min_token_count = lexicon.word_count
for t in self.tokens:
token_count = 0
if t.expand:
tokens = [lexicon[x] for x in
lexicon.similar(t.text)]
token_count += sum(x.count for x in tokens)
else:
# FIXME: Case insensitivity not considered here
try:
token = lexicon[t.text]
token_count += token.count
except Lexicon.WordDoesNotExist:
pass
min_token_count = min(token_count, min_token_count)
return min_token_count / lexicon.word_count
def _dist_time_posting(p1, p2):
return (
max(p2.start - p1.end, 0)
if p1.start <= p2.start else _dist_time_posting(p2, p1))
def _dist_idx_posting(p1, p2):
return (
max(p2.idx - (p1.idx + p1.len), 0)
if p1.idx <= p2.idx else _dist_idx_posting(p2, p1))
class _And(_JoinExpr):
@property
def _pprint_data(self):
return {
'1. op': 'And',
'2. thresh': '{} {}'.format(
self.threshold,
'seconds' if self.threshold_type == 't' else 'tokens'),
'3. children': [c._pprint_data for c in self.children]
}
def eval(self, context):
results = []
for c in self.children:
child_results = deque(c.eval(context))
if len(child_results) == 0:
return
doc_ids = [d.id for d in child_results]
context = context._replace(documents=doc_ids)
results.append({d.id: d.postings for d in child_results})
dist_fn = (
_dist_time_posting if self.threshold_type == 't' else
_dist_idx_posting)
n = len(results)
for doc_id in sorted(doc_ids):
pq = []
for i, r in enumerate(results):
assert doc_id in r
ps_iter = iter(r[doc_id])
ps_head = next(ps_iter)
pq.append((ps_head.start, i, ps_head, ps_iter))
heapq.heapify(pq)
merged_postings = []
ps_prev = [None] * n
while len(pq) > 0:
# Consider first element
_, i, ps_head, ps_iter = heapq.heappop(pq)
# Check conditions
near_i = set()
for elem in pq:
ps_cmp = elem[2]
j = elem[1]
if dist_fn(ps_head, ps_cmp) < self.threshold:
near_i.add(j)
if len(near_i) < n - 1:
for j in range(n):
if j != i and j not in near_i:
ps_cmp = ps_prev[j]
if ps_cmp is not None:
if dist_fn(ps_head, ps_cmp) < self.threshold:
near_i.add(j)
else:
# No solution
break
if len(near_i) == n - 1:
merged_postings.append(ps_head)
# Advance postings
ps_prev[i] = ps_head
try:
ps_head = next(ps_iter)
heapq.heappush(pq, (ps_head.start, i, ps_head, ps_iter))
except StopIteration:
pass
merged_postings.sort(key=lambda x: x.start)
if len(merged_postings) > 0:
yield CaptionIndex.Document(
id=doc_id, postings=merged_postings)
class _Or(_JoinExpr):
@property
def _pprint_data(self):
return {
'1. op': 'Or',
'2. children': [c._pprint_data for c in self.children]
}
def eval(self, context):
results = [c.eval(context) for c in self.children]
for doc_id, grouped_postings in group_results_by_document(results):
yield CaptionIndex.Document(
id=doc_id,
postings=PostingUtil.union(grouped_postings)
)
class _Not(_JoinExpr):
@property
def _pprint_data(self):
return {
'1. op': 'Not',
'2. thresh': '{} {}'.format(
self.threshold,
'seconds' if self.threshold_type == 't' else 'tokens'),
'3. children': [c._pprint_data for c in self.children]
}
def eval(self, context):
child0_results = list(self.children[0].eval(context))
other_context = context._replace(
documents=[d.id for d in child0_results])
other_results = [c.eval(other_context) for c in self.children[1:]]
other_postings = {
doc_id: PostingUtil.union(ps_lists)
for doc_id, ps_lists in group_results_by_document(other_results)
}
dist_fn = (
_dist_time_posting if self.threshold_type == 't' else
_dist_idx_posting)
key_fn = (
(lambda x: x.start) if self.threshold_type == 't' else
(lambda x: x.idx))
for d in child0_results:
postings = []
doc_ops = other_postings.get(d.id, [])
doc_op_i = 0
prev_op = None
for p in d.postings:
p_key = key_fn(p)
while (
doc_op_i < len(doc_ops)
and key_fn(doc_ops[doc_op_i]) <= p_key
):
prev_op = doc_ops[doc_op_i]
doc_op_i += 1
if prev_op and dist_fn(p, prev_op) < self.threshold:
continue
if (
doc_op_i < len(doc_ops)
and dist_fn(p, doc_ops[doc_op_i]) < self.threshold
):
continue
postings.append(p)
if len(postings) > 0:
yield CaptionIndex.Document(id=d.id, postings=postings)
DEFAULT_AND_THRESH = 15
DEFAULT_NOT_THRESH = 15
class _QueryParser(NodeVisitor):
def __init__(self, constants={}):
self.grammar = GRAMMAR
self._constants = constants
visit_more_and = visit_more_or = visit_more_not = visit_more_tokens = \
lambda a, b, c: c
def visit_expr_root(self, node, children):
assert len(children) == 3
return children[1]
def visit_expr_group(self, node, children):
assert len(children) == 1
return children[0]
def visit_expr(self, node, children):
assert len(children) == 1
return children[0]
def visit_expr_paren(self, node, children):
assert len(children) == 7
return children[3]
def visit_and(self, node, children):
assert len(children) == 3
if children[2] is None:
threshold = self._constants.get(
'and_threshold', DEFAULT_AND_THRESH)
threshold_type = 't'
else:
threshold_type, threshold = children[2]
assert isinstance(threshold, int)
assert isinstance(threshold_type, str)
return _And([children[0], *children[1]], threshold, threshold_type)
def visit_or(self, node, children):
assert len(children) == 2
return _Or([children[0], *children[1]], None, None)
def visit_not(self, node, children):
assert len(children) == 3
if children[2] is None:
threshold = self._constants.get(
'not_threshold', DEFAULT_NOT_THRESH)
threshold_type = 't'
else:
threshold_type, threshold = children[2]
assert isinstance(threshold, int)
assert isinstance(threshold_type, str)
return _Not([children[0], *children[1]], threshold, threshold_type)
def visit_threshold(self, node, children):
assert len(children) == 4
if children[1] == '//':
return ('w', children[3])
elif children[1] == '::':
return ('t', children[3])
else:
raise Exception('Invalid threshold token')
def visit_threshold_type(self, node, children):
return node.text
def visit_integer(self, node, children):
return int(node.text)
def visit_tokens_root(self, node, children):
assert len(children) == 2
return _Phrase([*children[0], *children[1]])
def visit_more_tokens_root(self, node, children):
return [l for c in children for l in c]
def visit_tokens_list(self, node, children):
assert len(children) == 1
return children[0]
def visit_tokens_exp(self, node, children):
assert len(children) == 5
return [t._replace(expand=True) for t in children[2]]
def visit_tokens(self, node, children):
assert len(children) == 2
return [*children[0], *[t for c in children[1] for t in c]]
def visit_token(self, node, children):
tokenizer = default_tokenizer()
tokens = tokenizer.tokens(node.text)
return [_Phrase.Token(t, False) for t in tokens]
def generic_visit(self, node, children):
return children[-1] if children else None
class Query:
"""Parse and execute queries"""
def __init__(self, raw_query: str, **config):
self._tree = _QueryParser(config).parse(raw_query)
def execute(
self, lexicon: Lexicon, index: CaptionIndex, documents=None,
ignore_word_not_found=True, case_insensitive=False
) -> Iterable[CaptionIndex.Document]:
return self._tree.eval(_Expr.Context(
lexicon, index, documents, ignore_word_not_found,
case_insensitive))
def estimate_cost(self, lexicon: Lexicon) -> float:
return self._tree.estimate_cost(lexicon)
```
#### File: caption-index/captions/util.py
```python
import heapq
import itertools
import numpy as np
from collections import deque
from typing import Generator, Iterable, List, Tuple, Union
from .index import Lexicon, CaptionIndex
Number = Union[int, float]
VERBOSE = False
def window(tokens: Iterable, n: int, subwindows: bool = False) -> Generator:
"""Takes an iterable words and returns a windowed iterator"""
buffer = deque()
for t in tokens:
buffer.append(t)
if len(buffer) == n:
if subwindows:
for i in range(n):
yield tuple(itertools.islice(buffer, 0, i + 1))
else:
yield tuple(buffer)
buffer.popleft()
if subwindows:
while len(buffer) > 0:
for i in range(len(buffer)):
yield tuple(itertools.islice(buffer, 0, i + 1))
buffer.popleft()
def frequent_words(
lexicon: Lexicon,
percentile: float = 99.7
) -> List[Lexicon.Word]:
"""Return words at a frequency percentile"""
threshold = np.percentile([w.count for w in lexicon], percentile)
return [w for w in lexicon if w.count >= threshold]
class PostingUtil(object):
@staticmethod
def merge(p1: CaptionIndex.Posting, p2: CaptionIndex.Posting):
"""Merge two postings"""
start_idx = min(p1.idx, p2.idx)
end_idx = max(p1.idx + p1.len, p2.idx + p2.len)
return p1._replace(
start=min(p1.start, p2.start),
end=max(p1.end, p2.end),
idx=start_idx,
len=end_idx - start_idx)
@staticmethod
def deoverlap(
postings: Iterable[CaptionIndex.Posting],
threshold: Number = 0,
use_time: bool = True
) -> List[CaptionIndex.Posting]:
"""Merge postings which overlap"""
result = []
curr_p = None
def overlaps(p1: CaptionIndex.Posting,
p2: CaptionIndex.Posting) -> bool:
if use_time:
return (p2.start >= p1.start
and p2.start - p1.end <= threshold)
else:
return (p2.idx >= p1.idx
and p2.idx - (p1.idx + p1.len) <= threshold)
for p in postings:
if curr_p is None:
curr_p = p
elif overlaps(curr_p, p):
curr_p = PostingUtil.merge(curr_p, p)
else:
result.append(curr_p)
curr_p = p
if curr_p is not None:
result.append(curr_p)
return result
@staticmethod
def dilate(
postings: Iterable[CaptionIndex.Posting],
amount: Number,
duration: Number
) -> List[CaptionIndex.Posting]:
"""Dilate start and end times"""
return [p._replace(
start=max(p.start - amount, 0), end=min(p.end + amount, duration)
) for p in postings]
@staticmethod
def to_fixed_length(
postings: Iterable[CaptionIndex.Posting],
length: Number,
duration: Number
) -> List[CaptionIndex.Posting]:
"""Dilate start and end times"""
result = []
half_length = length / 2
for p in postings:
mid = (p.start + p.end) / 2
result.append(p._replace(
start=max(mid - half_length, 0),
end=min(mid + half_length, duration)
))
return result
@staticmethod
def union(
postings_lists: List[Iterable[CaptionIndex.Posting]],
use_time: bool = True
) -> List[CaptionIndex.Posting]:
"""Merge several lists of postings by order of idx."""
def get_priority(p) -> Tuple[Number, Number]:
return (p.start, p.idx) if use_time else (p.idx, p.start)
postings_lists_with_priority = [
((get_priority(p), p) for p in pl) for pl in postings_lists]
return [r[1] for r in heapq.merge(*postings_lists_with_priority)]
def group_results_by_document(
results: List[Iterable[CaptionIndex.Document]]
) -> Generator[
Tuple[int, List[List[CaptionIndex.Posting]]], None, None
]:
"""Group postings of documents from multiple results"""
result_with_id = [((d.id, d) for d in r) for r in results]
curr_doc_id = None
curr_docs = []
for doc_id, document in heapq.merge(*result_with_id):
if curr_doc_id is None:
curr_doc_id = doc_id
if curr_doc_id != doc_id:
yield curr_doc_id, [d.postings for d in curr_docs]
curr_doc_id = doc_id
curr_docs = []
curr_docs.append(document)
if len(curr_docs) > 0:
yield curr_doc_id, [d.postings for d in curr_docs]
```
#### File: caption-index/scripts/update_index.py
```python
import argparse
import os
from collections import defaultdict
import shutil
from typing import List, Optional
from captions import Lexicon, Documents
from lib.common import (
DocumentToIndex, read_docs_from_stdin, list_docs,
merge_files, get_word_counts, index_documents)
def get_args():
p = argparse.ArgumentParser()
p.add_argument('index_dir', type=str,
help='Directory containing existing index')
p.add_argument('-d', '--new-doc-dir', type=str,
help='Directory containing captions. If not passed, read from stdin.')
p.add_argument('--chunk-size', dest='chunk_size', type=int,
help='Break the index into chunks of n documents')
p.add_argument('--skip-existing-names', action='store_true',
help='Skip documents that are already indexed')
return p.parse_args()
def index_new_docs(
new_docs_to_index: List[DocumentToIndex],
new_documents: Documents,
lexicon: Lexicon,
index_dir: str,
data_dir: str,
chunk_size: Optional[int]
):
"""Builds inverted indexes and reencode documents in binary"""
assert len(new_docs_to_index) == len(new_documents)
base_doc_id = min(d.id for d in new_documents)
max_doc_id = max(d.id for d in new_documents)
index_and_doc_paths = defaultdict(list)
for doc_to_index, doc in zip(new_docs_to_index, new_documents):
assert doc_to_index.name == doc.name
if chunk_size is None:
doc_index_out_path = os.path.join(
index_dir, '{:07d}-{:07d}.bin'.format(
base_doc_id, base_doc_id + len(new_docs_to_index)))
elif chunk_size == 1:
doc_index_out_path = os.path.join(
index_dir, '{:07d}.bin'.format(doc.id))
else:
chunk_idx = int((doc.id - base_doc_id) / chunk_size) * chunk_size
doc_index_out_path = os.path.join(
index_dir, '{:07d}-{:07d}.bin'.format(
base_doc_id + chunk_idx,
min(base_doc_id + chunk_idx + chunk_size, max_doc_id)))
doc_data_out_path = os.path.join(
data_dir, '{}.bin'.format(doc.id))
index_and_doc_paths[doc_index_out_path].append(
(doc.id, doc_to_index.path, doc_data_out_path))
index_documents(list(index_and_doc_paths.items()), lexicon)
def main(
index_dir: str,
new_doc_dir: Optional[str],
chunk_size: Optional[int] = None,
skip_existing_names: bool = False
):
assert chunk_size is None or chunk_size > 0
doc_path = os.path.join(index_dir, 'documents.txt')
lex_path = os.path.join(index_dir, 'lexicon.txt')
index_path = os.path.join(index_dir, 'index.bin')
old_lexicon = Lexicon.load(lex_path)
documents = Documents.load(doc_path)
if new_doc_dir:
new_docs_to_index = list_docs(new_doc_dir)
else:
new_docs_to_index = read_docs_from_stdin()
assert len(new_docs_to_index) > 0
tmp_new_docs_to_index = []
for new_doc in new_docs_to_index:
if new_doc.name in documents:
if skip_existing_names:
print('Skipping: {} is already indexed!'.format(new_doc.name))
else:
raise Exception(
'{} is already indexed! Aborting.'.format(new_doc.name))
else:
tmp_new_docs_to_index.append(new_doc)
new_docs_to_index = tmp_new_docs_to_index
if len(new_docs_to_index) == 0:
print('No new documents to index.')
return
# Update lexicon
new_word_counts = get_word_counts(new_docs_to_index)
lexicon_words = [
Lexicon.Word(w.id, w.token, w.count + new_word_counts[w.token]
if w.token in new_word_counts else w.count)
for w in old_lexicon
]
for w in new_word_counts:
if w not in old_lexicon:
lexicon_words.append(
Lexicon.Word(len(lexicon_words), w, new_word_counts[w]))
lexicon = Lexicon(lexicon_words)
base_doc_id = len(documents)
new_documents = [Documents.Document(id=i + base_doc_id, name=d.name)
for i, d in enumerate(new_docs_to_index)]
# Convert existing index.bin to a dirctory if needed
if os.path.isfile(index_path):
tmp_index_path = index_path + '.tmp'
shutil.move(index_path, tmp_index_path)
os.makedirs(index_path)
shutil.move(
tmp_index_path,
os.path.join(index_path, '{:07d}-{:07d}.bin'.format(
0, base_doc_id)))
assert os.path.isdir(index_path)
# Index the new documents
index_new_docs(new_docs_to_index, new_documents, lexicon, index_path,
os.path.join(index_dir, 'data'), chunk_size)
# Write out the new documents file
shutil.move(doc_path, doc_path + '.old')
all_documents = list(documents)
all_documents.extend(new_documents)
Documents(all_documents).store(doc_path)
# Update to the new lexicon
lexicon.store(lex_path)
print('Done!')
if __name__ == '__main__':
main(**vars(get_args()))
```
#### File: caption-index/tests/test_index_update.py
```python
import os
import shutil
import tempfile
from subprocess import check_call, CalledProcessError
import captions
from lib.common import get_docs_and_lexicon
TEST_DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test-small.tar.gz')
BUILD_INDEX_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'scripts', 'build_index.py')
UPDATE_INDEX_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..', 'scripts', 'update_index.py')
def test_update_index():
tmp_dir = tempfile.mkdtemp(suffix=None, prefix='caption-index-unittest-',
dir=None)
subs_dir = os.path.join(tmp_dir, 'subs')
idx_dir = os.path.join(tmp_dir, 'index')
# Unpack the test data
os.makedirs(subs_dir)
check_call(['tar', '-xzf', TEST_DATA_PATH, '-C', subs_dir])
# Build an index
check_call([BUILD_INDEX_SCRIPT, '-d', subs_dir, '-o', idx_dir])
# Update the index (should fail due to duplicate files)
try:
check_call([UPDATE_INDEX_SCRIPT, '-d', subs_dir, idx_dir])
raise Exception('Uh oh, an exception should have been thrown...')
except CalledProcessError:
pass
# Update the index (should do nothing since all of them are duplicates)
check_call([UPDATE_INDEX_SCRIPT, '--skip-existing-names', '-d', subs_dir,
idx_dir])
# Update the index
for fname in os.listdir(subs_dir):
src_path = os.path.join(subs_dir, fname)
dst_path = os.path.join(subs_dir, 'copy::' + fname)
shutil.move(src_path, dst_path)
check_call([UPDATE_INDEX_SCRIPT, '-d', subs_dir, idx_dir])
assert os.path.isfile(os.path.join(idx_dir, 'documents.txt.old'))
# Test the new index
def count_and_test(index, document, tokens):
ids = index.contains(tokens, [document])
assert len(ids) == 1
count = 0
(d,) = list(index.search(tokens, [document]))
dh = documents.open(document)
assert len(d.postings) > 0
for l in d.postings:
assert l.len == len(tokens)
assert abs(l.end - l.start) < 10.0, 'ngram time too large'
count += 1
# Check that we actually found the right ngrams
assert [lexicon.decode(t) for t in dh.tokens(l.idx, l.len)] == tokens
return count
documents, lexicon = get_docs_and_lexicon(idx_dir)
idx_path = os.path.join(idx_dir, 'index.bin')
assert os.path.isdir(idx_path)
assert len(os.listdir(idx_path)) == 2, os.listdir(idx_path)
test_document = documents['copy::cnn.srt']
with captions.CaptionIndex(idx_path, lexicon, documents) as index:
assert count_and_test(index, test_document, ['THEY']) == 12
assert count_and_test(index, test_document, ['PEOPLE']) == 12
assert count_and_test(index, test_document, ['TO', 'THE']) == 9 # one wraps
assert count_and_test(index, test_document, ['GIBSON', 'GUITAR', 'DROP']) == 1
assert count_and_test(index, test_document, ['PUT', 'THAT', 'DOWN']) == 1
assert count_and_test(index, test_document, ['CLOCK', 'STRIKES']) == 2
assert count_and_test(index, test_document, ['>>']) == 149
assert count_and_test(index, test_document, ['SEE', '?']) == 1
```
#### File: caption-index/tools/scan.py
```python
import argparse
import os
import time
from multiprocessing import Pool
from tqdm import tqdm
from captions import Documents
DEFAULT_WORKERS = os.cpu_count()
def get_args():
p = argparse.ArgumentParser()
p.add_argument('index_dir', type=str,
help='Directory containing index files')
p.add_argument('-j', dest='workers', type=int, default=DEFAULT_WORKERS,
help='Number of CPU cores to use. Default: {}'.format(DEFAULT_WORKERS))
p.add_argument('--limit', dest='limit', type=int,
help='Limit the number of documents to scan')
return p.parse_args()
def count_tokens(i):
"""Do an expensive linear scan of all of the tokens"""
count = 0
for t in count_tokens.documents.open(i).tokens():
count += 1
return count
count_tokens.documents = None
def init_worker(function, index_dir):
doc_path = os.path.join(index_dir, 'documents.txt')
data_dir = os.path.join(index_dir, 'data')
function.documents = Documents.load(doc_path)
function.documents.configure(data_dir)
def main(index_dir, workers, limit):
doc_path = os.path.join(index_dir, 'documents.txt')
documents = Documents.load(doc_path)
if limit is None:
limit = len(documents)
start_time = time.time()
with Pool(
processes=workers, initializer=init_worker,
initargs=(count_tokens, index_dir)
) as pool:
count = 0
for n in tqdm(pool.imap_unordered(count_tokens, range(limit)),
desc='Counting tokens', total=limit):
count += n
print('Scanned {} documents for {} tokens in {:d}ms'.format(
limit, count, int(1000 * (time.time() - start_time))))
if __name__ == '__main__':
main(**vars(get_args()))
```
#### File: caption-index/tools/search.py
```python
import argparse
import os
import time
import traceback
from termcolor import colored, cprint
from captions import Lexicon, Documents, CaptionIndex
from captions.query import Query
from captions.util import PostingUtil
DEFAULT_CONTEXT = 3
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('index_dir', type=str,
help='Directory containing index files')
parser.add_argument('-s', dest='silent', action='store_true',
help='Silent mode for benchmarking')
parser.add_argument('-c', dest='context_size', type=int,
default=DEFAULT_CONTEXT,
help='Context window width (default: {})'.format(DEFAULT_CONTEXT))
parser.add_argument('query', nargs='*')
return parser.parse_args()
def format_seconds(s):
hours = int(s / 3600)
minutes = int(s / 60) - hours * 60
seconds = int(s) - hours * 3600 - minutes * 60
millis = int((s - int(s)) * 1000)
return '{:02d}h {:02d}m {:02d}.{:03d}s'.format(
hours, minutes, seconds, millis)
BOLD_ATTRS = ['bold']
def run_search(query_str, documents, lexicon, index, context_size, silent):
query = Query(query_str)
print('Estimated cost (% of index scanned): {}'.format(
query.estimate_cost(lexicon) * 100))
start_time = time.time()
result = query.execute(lexicon, index)
total_seconds = 0
occurence_count = 0
doc_count = 0
for i, d in enumerate(result):
if not silent:
cprint(documents[d.id].name, 'grey', 'on_white', attrs=BOLD_ATTRS)
occurence_count += len(d.postings)
d_data = documents.open(d.id) if not silent else None
for j, p in enumerate(PostingUtil.deoverlap(d.postings, use_time=False)):
total_seconds += p.end - p.start
if not silent:
if context_size > 0:
start_idx = max(p.idx - context_size, 0)
context = ' '.join([
colored(lexicon.decode(t), 'red', attrs=BOLD_ATTRS)
if k >= p.idx and k < p.idx + p.len else
lexicon.decode(t)
for k, t in enumerate(
d_data.tokens(
index=start_idx,
count=p.idx + p.len + context_size - start_idx
),
start_idx
)
])
else:
context = query
interval_str = '{} - {}'.format(
format_seconds(p.start), format_seconds(p.end))
position_str = (
str(p.idx) if p.len == 1 else
'{}-{}'.format(p.idx, p.idx + p.len))
print(
' {}-- [{}] [position: {}] "{}"'.format(
'\\' if j == len(d.postings) - 1 else '|',
colored(interval_str, 'yellow', attrs=BOLD_ATTRS),
colored(position_str, 'blue', attrs=BOLD_ATTRS),
context))
doc_count += 1
cprint(
'Found {} documents, {} occurences, spanning {:d}s in {:d}ms'.format(
doc_count, occurence_count, int(total_seconds),
int((time.time() - start_time) * 1000)),
'white', 'on_green', attrs=BOLD_ATTRS)
def main(index_dir, query, silent, context_size):
idx_path = os.path.join(index_dir, 'index.bin')
doc_path = os.path.join(index_dir, 'documents.txt')
data_path = os.path.join(index_dir, 'data')
lex_path = os.path.join(index_dir, 'lexicon.txt')
documents = Documents.load(doc_path)
documents.configure(data_path)
lexicon = Lexicon.load(lex_path)
with CaptionIndex(idx_path, lexicon, documents) as index:
if len(query) > 0:
print('Query: ', query)
run_search(' '.join(query), documents, lexicon, index,
context_size, silent)
else:
print('Enter a query:')
while True:
try:
query = input('> ')
except (EOFError, KeyboardInterrupt):
print()
break
query = query.strip()
if len(query) > 0:
try:
run_search(query, documents, lexicon, index,
context_size, silent)
except:
traceback.print_exc()
if __name__ == '__main__':
main(**vars(get_args()))
``` |
{
"source": "jhong93/esper-tv-widget",
"score": 3
} |
#### File: esper-tv-widget/app/error.py
```python
from typing import Optional
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message: str, status_code: Optional[int] = None,
payload: Optional[str] = None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self) -> object:
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class PersonNotInDatabase(InvalidUsage):
def __init__(self, person: str):
InvalidUsage.__init__(
self, 'Name "{}" is not in our database'.format(person))
class TagNotInDatabase(InvalidUsage):
def __init__(self, tag: str):
InvalidUsage.__init__(
self, 'Tag "{}" is not in our database'.format(tag))
class VideoNotInDatabase(InvalidUsage):
def __init__(self, video: str):
InvalidUsage.__init__(
self, 'Video "{}" is not in our database'.format(video))
class InvalidCaptionSearch(InvalidUsage):
def __init__(self, s: str):
InvalidUsage.__init__(
self, '"{}" is not a valid text search'.format(s))
class QueryTooExpensive(InvalidUsage):
pass
class NotFound(Exception):
def __init__(self, message: str):
self.message = message
class UnreachableCode(Exception):
pass
```
#### File: esper-tv-widget/app/route_search.py
```python
from datetime import datetime, timedelta
import json
import heapq
from enum import Enum
from typing import (
Any, List, Set, Tuple, Optional, Iterable, Generator, NamedTuple)
from flask import Flask, Response, jsonify, request
from captions.util import PostingUtil # type: ignore
from captions.query import Query # type: ignore
from rs_intervalset import ( # type: ignore
MmapIntervalSetMapping, MmapIntervalListMapping)
from rs_intervalset.wrapper import ( # type: ignore
MmapIListToISetMapping, MmapUnionIlistsToISetMapping,
MmapISetIntersectionMapping)
from rs_intervalset.wrapper import _deoverlap as deoverlap_intervals
from .types_frontend import *
from .types_backend import *
from .error import *
from .parsing import (
parse_date, format_date, parse_hour_set, parse_day_of_week_set,
ParsedTags, parse_tags)
from .load import VideoDataContext, CaptionDataContext
from .sum import DetailedDateAccumulator, SimpleDateAccumulator
MAX_VIDEO_SEARCH_IDS = 10
class SearchResultType(Enum):
video_set = 0
python_iset = 1
rust_iset = 2
class SearchContext(NamedTuple):
start_date: Optional[datetime] = None
end_date: Optional[datetime] = None
videos: Optional[Set[int]] = None
channel: Optional[str] = None
show: Optional[str] = None
hours: Optional[Set[int]] = None
days_of_week: Optional[Set[int]] = None
text_window: int = 0
class SearchResult(NamedTuple):
type: SearchResultType
context: Optional[SearchContext] = None
data: Any = None
class PythonISetData(NamedTuple):
video: Video
is_entire_video: bool
intervals: Optional[List[Interval]] = None
PythonISetDataGenerator = Generator[PythonISetData, None, None]
def get_non_none(a: Any, b: Any) -> Optional[Any]:
return a if b is None else a
def and_search_contexts(
c1: SearchContext, c2: SearchContext
) -> Optional[SearchContext]:
if c1.start_date is not None and c2.start_date is not None:
start_date = max(c1.start_date, c2.start_date)
else:
start_date = get_non_none(c1.start_date, c2.start_date)
if c1.end_date is not None and c2.end_date is not None:
end_date = min(c1.end_date, c2.end_date)
else:
end_date = get_non_none(c1.end_date, c2.end_date)
if end_date and start_date and end_date < start_date:
return None
if c1.videos is not None and c2.videos is not None:
videos = c1.videos & c2.videos
if not videos:
return None
else:
videos = get_non_none(c1.videos, c2.videos)
if c1.channel == c2.channel:
channel = c1.channel
elif c1.channel is not None and c2.channel is not None:
return None
else:
channel = get_non_none(c1.channel, c2.channel)
if c1.show == c2.show:
show = c1.show
elif c1.show is not None and c2.show is not None:
return None
else:
show = get_non_none(c1.show, c2.show)
if c1.hours is not None and c2.hours is not None:
hours = c1.hours & c2.hours
if not hours:
return None
else:
hours = get_non_none(c1.hours, c2.hours)
if c1.days_of_week is not None and c2.days_of_week is not None:
days_of_week = c1.days_of_week & c2.days_of_week
if not days_of_week:
return None
else:
days_of_week = get_non_none(c1.days_of_week, c2.days_of_week)
return SearchContext(start_date, end_date, videos, channel, show, hours,
days_of_week)
# Execution order preference (lower is higher)
SEARCH_KEY_EXEC_PRIORITY = {
SearchKey.video: 0,
SearchKey.channel: 0,
SearchKey.show: 0,
SearchKey.hour: 0,
SearchKey.day_of_week: 0,
'or': 1,
'and': 2,
SearchKey.text: 3,
SearchKey.face_name: 4,
SearchKey.face_tag: 5
}
def milliseconds(s: float) -> int:
return int(s * 1000)
def get_entire_video_ms_interval(video: Video) -> List[Interval]:
return [(0, int(video.num_frames / video.fps * 1000))]
def assert_param_not_set(
param: str, countable: str, suggested_var: Optional[str] = None
) -> None:
if param in request.args:
mesg = '"{}" cannot be used when counting "{}".'.format(
param, countable)
if suggested_var:
mesg += ' Try counting "{}" instead.'.format(suggested_var)
raise InvalidUsage(mesg)
def get_aggregate_fn(default_agg_by: str) -> AggregateFn:
agg = request.args.get(SearchParam.aggregate, None, type=str)
e = Aggregate[agg] if agg else default_agg_by
if e == Aggregate.day:
return lambda d: d
elif e == Aggregate.month:
return lambda d: datetime(d.year, d.month, 1)
elif e == Aggregate.week:
return lambda d: d - timedelta(days=d.isoweekday() - 1)
elif e == Aggregate.year:
return lambda d: datetime(d.year, 1, 1)
raise UnreachableCode()
def get_python_iset_from_filter(
vdc: VideoDataContext,
video_filter: Optional[VideoFilterFn]
) -> PythonISetDataGenerator:
for v in vdc.video_dict.values(): # sorted iterator
if video_filter is not None and video_filter(v):
yield PythonISetData(v, True)
def get_python_iset_from_rust_iset(
vdc: VideoDataContext,
isetmap: MmapIntervalSetMapping,
video_filter: Optional[VideoFilterFn]
) -> PythonISetDataGenerator:
for video_id in isetmap.get_ids():
video = vdc.video_by_id.get(video_id)
if video is not None and (
video_filter is None or video_filter(video)
):
intervals = isetmap.get_intervals(video_id, True)
if intervals:
yield PythonISetData(video, False, intervals=intervals)
MAX_TRANSCRIPT_SEARCH_COST = 0.005
def get_caption_intervals(
cdc: CaptionDataContext,
vdc: VideoDataContext,
text_str: str,
context: SearchContext
) -> PythonISetDataGenerator:
missing_videos = 0
matched_videos = 0
filtered_videos = 0
text_window = context.text_window
video_filter = get_video_filter(context)
documents = None
if context.videos is not None:
documents = []
for video_id in context.videos:
video = vdc.video_by_id[video_id]
if video_filter is None or video_filter(video):
document = cdc.document_by_name.get(video.name)
if document is not None:
documents.append(document)
if len(documents) == 0:
return iter(())
query = None
try:
query = Query(text_str.upper())
except Exception as e:
raise InvalidCaptionSearch(text_str)
if documents is None:
if query.estimate_cost(cdc.lexicon) > MAX_TRANSCRIPT_SEARCH_COST:
raise QueryTooExpensive(
'The text query is too expensive to compute. '
'"{}" contains too many common words/phrases.'.format(text_str))
results = []
for raw_result in query.execute(
cdc.lexicon, cdc.index, documents=documents,
ignore_word_not_found=True, case_insensitive=True
):
document = cdc.documents[raw_result.id]
video = vdc.video_dict.get(document.name)
if video is None:
missing_videos += 1
continue
else:
matched_videos += 1
if video_filter is not None and not video_filter(video):
filtered_videos += 1
continue
postings = raw_result.postings
if text_window > 0:
postings = PostingUtil.deoverlap(PostingUtil.to_fixed_length(
postings, text_window,
video.num_frames / video.fps))
results.append(PythonISetData(
video, False,
[(int(p.start * 1000), int(p.end * 1000)) for p in postings]))
results.sort(key=lambda x: x.video.id)
return iter(results)
def search_result_to_python_iset(
vdc: VideoDataContext,
result: SearchResult
) -> PythonISetDataGenerator:
if result.type == SearchResultType.video_set:
video_filter = get_video_filter(result.context)
return get_python_iset_from_filter(
vdc, video_filter)
elif result.type == SearchResultType.rust_iset:
video_filter = get_video_filter(result.context)
return get_python_iset_from_rust_iset(
vdc, result.data, video_filter)
elif result.type == SearchResultType.python_iset:
return result.data
raise UnreachableCode()
def and_python_isets(
r1: SearchResult,
r2: SearchResult
) -> PythonISetDataGenerator:
prev = None
for curr in heapq.merge(
((d.video.id, d) for d in r1.data),
((d.video.id, d) for d in r2.data)
):
if prev is None:
prev = curr
elif prev[0] == curr[0]:
if prev[1].is_entire_video:
yield curr[1]
elif curr[1].is_entire_video:
yield prev[1]
else:
intervals = list(merge_close_intervals(
intersect_sorted_intervals(
prev[1].intervals, curr[1].intervals)))
if len(intervals) > 0:
yield PythonISetData(curr[1].video, False, intervals)
prev = None
else:
assert curr[0] > prev[0], '{} < {}'.format(curr[0], prev[0])
prev = curr
def or_python_iset_with_filter(
vdc: VideoDataContext,
video_filter: VideoFilterFn,
search_result: SearchResult
) -> PythonISetDataGenerator:
assert video_filter is not None
assert search_result.type == SearchResultType.python_iset
all_videos_iter = iter(vdc.video_dict.values())
# Match videos from search_result to all videos
for curr in search_result.data:
curr_video = curr.video
all_videos_head = next(all_videos_iter)
while all_videos_head and all_videos_head.id < curr_video.id:
if video_filter(all_videos_head):
yield PythonISetData(curr_video, True)
all_videos_head = next(all_videos_iter)
assert all_videos_head.id == curr_video.id
if video_filter(curr_video):
yield PythonISetData(curr_video, True)
else:
yield curr
# Finish up all_videos_iter
for video in all_videos_iter:
if video_filter(video):
yield PythonISetData(video, True)
def or_python_isets(
r1: SearchResult,
r2: SearchResult
) -> PythonISetDataGenerator:
assert r1.type == SearchResultType.python_iset
assert r2.type == SearchResultType.python_iset
prev = None
for curr in heapq.merge(
((s.video.id, s) for s in r1.data),
((s.video.id, s) for s in r2.data)
):
if prev is None:
prev = curr
elif curr[0] == prev[0]:
if curr[1].is_entire_video:
yield curr[1]
elif prev[1].is_entire_video:
yield prev[1]
else:
yield PythonISetData(
curr[1].video, False,
deoverlap_intervals(
heapq.merge(curr[1].intervals, prev[1].intervals),
100))
prev = None
else:
assert curr[0] > prev[0], '{} < {}'.format(curr[0], prev[0])
yield prev[1]
prev = curr
if prev is not None:
yield prev[1]
def or_python_iset_with_rust_iset(
vdc: VideoDataContext,
python_result: SearchResult,
rust_result: SearchResult
) -> PythonISetDataGenerator:
assert python_result.type == SearchResultType.python_iset
assert rust_result.type == SearchResultType.rust_iset
try:
python_result_head = next(python_result.data)
except StopIteration:
python_result_head = None
video_filter = get_video_filter(rust_result.context)
for video_id in rust_result.data.get_ids():
video = vdc.video_by_id.get(video_id)
if not video:
continue
while (
python_result_head is not None
and python_result_head.video.id < video_id
):
yield python_result_head
try:
python_result_head = next(python_result.data)
except StopIteration:
python_result_head = None
assert (python_result_head is None
or python_result_head.video.id >= video_id)
if (
python_result_head is None
or python_result_head.video.id != video_id
):
if video_filter is None or video_filter(video):
intervals = rust_result.data.get_intervals(video_id, True)
if intervals:
yield PythonISetData(video, False, intervals=intervals)
else:
assert python_result_head.video.id == video_id
if (
python_result_head.is_entire_video
or (video_filter is not None and not video_filter(video))
):
yield python_result_head
else:
yield PythonISetData(
video, False,
deoverlap_intervals(
heapq.merge(
python_result_head.intervals,
rust_result.data.get_intervals(video_id, True)
),
100))
try:
python_result_head = next(python_result.data)
except StopIteration:
python_result_head = None
# yield any remaining results
if python_result_head is not None:
yield python_result_head
for x in python_result.data:
yield x
def or_rust_isets(
vdc: VideoDataContext,
r1: SearchResult,
r2: SearchResult
) -> PythonISetDataGenerator:
assert r1.type == SearchResultType.rust_iset
assert r2.type == SearchResultType.rust_iset
r1_filter = get_video_filter(r1.context)
r2_filter = get_video_filter(r2.context)
r1_ids = set(r1.data.get_ids())
r2_ids = set(r2.data.get_ids())
for video_id in sorted(r1_ids | r2_ids):
video = vdc.video_by_id.get(video_id)
if video is None:
continue
r1_intervals = (
r1.data.get_intervals(video_id, True)
if (
video.id in r1_ids
and (r1_filter is None or r1_filter(video))
) else None)
r2_intervals = (
r2.data.get_intervals(video_id, True)
if (
video.id in r2_ids
and (r2_filter is None or r2_filter(video))
) else None)
if r1_intervals and r2_intervals:
yield PythonISetData(
video, False,
deoverlap_intervals(
heapq.merge(r1_intervals, r2_intervals), 100))
elif r1_intervals:
yield PythonISetData(video, False, r1_intervals)
elif r2_intervals:
yield PythonISetData(video, False, r2_intervals)
def join_intervals_with_commercials(
vdc: VideoDataContext,
video: Video,
intervals: List[Interval],
is_commercial: Ternary
) -> List[Interval]:
if is_commercial != Ternary.both:
if is_commercial == Ternary.true:
intervals = intersect_isetmap(
video, vdc.commercial_isetmap, intervals)
else:
intervals = minus_isetmap(
video, vdc.commercial_isetmap, intervals)
return intervals
def get_global_tags(tag: ParsedTags) -> Set[str]:
return {t for t in tag.tags if t in GLOBAL_TAGS}
def either_tag_or_none(a: str, b: str, s: set) -> Optional[str]:
has_a = a in s
has_b = b in s
if has_a and has_b:
raise InvalidUsage(
'Cannot use {} and {} tags simultaneously. Try using "all".'.format(a, b))
elif has_a:
return a
elif has_b:
return b
return None
def people_to_ilistmaps(
video_data_context: VideoDataContext,
people: Iterable[str]
) -> List[MmapIntervalListMapping]:
ilistmaps = []
for person in people:
person_intervals = video_data_context.all_person_intervals.get(
person, None)
if person_intervals is None:
raise PersonNotInDatabase(person)
ilistmaps.append(person_intervals.ilistmap)
return ilistmaps
def interpret_global_tags(
global_tags: Set[str]
) -> Tuple[bool, Optional[str], Optional[str]]:
gender_tag = either_tag_or_none(GlobalTags.male, GlobalTags.female, global_tags)
host_tag = either_tag_or_none(GlobalTags.host, GlobalTags.non_host, global_tags)
is_all = GlobalTags.all in global_tags and gender_tag is None and host_tag is None
return is_all, gender_tag, host_tag
def person_tags_to_people(
video_data_context: VideoDataContext,
tags: Iterable[str]
) -> List[MmapIntervalListMapping]:
selected_names = None
for tag in tags:
if tag not in GLOBAL_TAGS:
people_with_tag = \
video_data_context.all_person_tags.tag_name_to_names(tag)
if not people_with_tag:
raise TagNotInDatabase(tag)
if selected_names is None:
selected_names = set(people_with_tag)
else:
selected_names = selected_names.intersection(people_with_tag)
return selected_names
def person_tags_to_ilistmaps(
video_data_context: VideoDataContext,
tags: Iterable[str]
) -> List[MmapIntervalListMapping]:
non_global_tags = [t for t in tags if t not in GLOBAL_TAGS]
if len(non_global_tags) == 1:
ilistmap = video_data_context.cached_tag_intervals.get(
non_global_tags[0], None)
if ilistmap:
return [ilistmap]
ilistmaps = []
if len(non_global_tags) > 0:
people = person_tags_to_people(video_data_context, non_global_tags)
assert people is not None
ilistmaps.extend(people_to_ilistmaps(video_data_context, people))
return ilistmaps
def get_face_time_filter_mask(
gender_tag: Optional[str],
host_tag: Optional[str]
) -> Tuple[int, int]:
payload_mask = 0
payload_value = 0
if gender_tag:
gender_tag = gender_tag.strip().lower()
if gender_tag:
payload_mask |= 0b1
if gender_tag == GlobalTags.male:
payload_value |= 0b1
elif gender_tag == GlobalTags.female:
pass
else:
raise UnreachableCode()
if host_tag:
host_tag = host_tag.strip().lower()
if host_tag:
payload_mask |= 0b100
if host_tag == GlobalTags.host:
payload_value |= 0b100
elif host_tag == GlobalTags.non_host:
pass
else:
raise UnreachableCode()
return payload_mask, payload_value
def get_video_filter(context: SearchContext) -> Optional[VideoFilterFn]:
if (
context.videos is not None
or context.start_date is not None
or context.end_date is not None
or context.channel is not None
or context.show is not None
or context.hours is not None
or context.days_of_week is not None
):
def video_filter(video: Video) -> bool:
if (
(context.videos is not None and video.id not in context.videos)
or (context.show is not None and video.show != context.show)
or (context.days_of_week is not None
and video.dayofweek not in context.days_of_week)
or (context.channel is not None
and video.channel != context.channel)
or (context.start_date is not None
and video.date < context.start_date)
or (context.end_date is not None
and video.date > context.end_date)
):
return False
if context.hours:
video_start = video.hour
video_end = (
video.hour + round(video.num_frames / video.fps / 3600))
for h in range(video_start, video_end + 1):
if h in context.hours:
break
else:
return False
return True
return video_filter
return None
def get_face_tag_intervals(
vdc: VideoDataContext,
tag_str: str
) -> MmapIntervalSetMapping:
all_tags = parse_tags(tag_str)
global_tags = get_global_tags(all_tags)
if len(global_tags) == len(all_tags.tags):
is_all, gender_tag, host_tag = interpret_global_tags(global_tags)
if is_all:
isetmap = vdc.face_intervals.all_isetmap
elif gender_tag is None:
if host_tag == GlobalTags.host:
isetmap = vdc.face_intervals.host_isetmap
elif host_tag == GlobalTags.non_host:
isetmap = vdc.face_intervals.nonhost_isetmap
else:
raise UnreachableCode()
elif gender_tag == GlobalTags.male:
if host_tag is None:
isetmap = vdc.face_intervals.male_isetmap
elif host_tag == GlobalTags.host:
isetmap = vdc.face_intervals.male_host_isetmap
elif host_tag == GlobalTags.non_host:
isetmap = vdc.face_intervals.male_nonhost_isetmap
else:
raise UnreachableCode()
elif gender_tag == GlobalTags.female:
if host_tag is None:
isetmap = vdc.face_intervals.female_isetmap
elif host_tag == GlobalTags.host:
isetmap = vdc.face_intervals.female_host_isetmap
elif host_tag == GlobalTags.non_host:
isetmap = vdc.face_intervals.female_nonhost_isetmap
else:
raise UnreachableCode()
else:
ilistmaps = person_tags_to_ilistmaps(vdc, all_tags.tags)
_, gender_tag, host_tag = interpret_global_tags(global_tags)
payload_mask, payload_value = get_face_time_filter_mask(
gender_tag, host_tag)
isetmap = MmapUnionIlistsToISetMapping(
ilistmaps, payload_mask, payload_value, 3000, 100)
return isetmap
def get_face_name_intervals(
vdc: VideoDataContext,
name: str
) -> MmapIntervalSetMapping:
person_intervals = vdc.all_person_intervals.get(name, None)
if person_intervals is None:
raise PersonNotInDatabase(name)
return person_intervals.isetmap
def get_face_count_intervals(
vdc: VideoDataContext,
face_count: int
) -> MmapIntervalSetMapping:
if face_count < 0:
raise InvalidUsage(
'"{}" cannot be less than 1'.format(SearchKey.face_count))
if face_count > 0xFF:
raise InvalidUsage(
'"{}" cannot be less than {}'.format(SearchKey.face_count, 0xFF))
return MmapIListToISetMapping(
vdc.face_intervals.num_faces_ilistmap,
0xFF, face_count, 3000, 0)
def intersect_isetmap(
video: Video,
isetmap: MmapIntervalSetMapping,
intervals: Optional[List[Interval]]
) -> List[Interval]:
return (isetmap.get_intervals(video.id, True) if intervals is None
else isetmap.intersect(video.id, intervals, True))
def minus_isetmap(
video: Video,
isetmap: MmapIntervalSetMapping,
intervals: Optional[List[Interval]]
) -> List[Interval]:
return isetmap.minus(
video.id, intervals if intervals is not None
else get_entire_video_ms_interval(video), True)
def merge_close_intervals(
intervals: Iterable[Interval],
threshold: float = 0.25
) -> Generator[Interval, None, None]:
curr_i = None
for i in intervals:
if curr_i is not None:
if max(curr_i[0], i[0]) - min(curr_i[1], i[1]) < threshold:
curr_i = (min(curr_i[0], i[0]), max(curr_i[1], i[1]))
else:
yield curr_i
curr_i = i
else:
curr_i = i
if curr_i is not None:
yield curr_i
def intersect_sorted_intervals(
l1: List[Interval],
l2: List[Interval]
) -> Generator[Interval, None, None]:
i, j = 0, 0
while i < len(l1) and j < len(l2):
a1, b1 = l1[i]
a2, b2 = l2[j]
max_a = max(a1, a2)
min_b = min(b1, b2)
if min_b - max_a > 0:
yield (max_a, min_b)
if a1 < a2:
i += 1
else:
j += 1
def get_video_metadata_json(video: Video) -> JsonObject:
return {
'id': video.id,
'name': video.name,
'channel': video.channel,
'show': video.show,
'date': format_date(video.date),
'width': video.width,
'height': video.height,
'fps': video.fps,
'num_frames': video.num_frames
}
def add_search_routes(
app: Flask,
caption_data_context: CaptionDataContext,
video_data_context: VideoDataContext,
default_aggregate_by: str,
default_is_commercial: Ternary,
default_text_window: int
):
def _get_is_commercial() -> Ternary:
value = request.args.get(SearchParam.is_commercial, None, type=str)
return Ternary[value] if value else default_is_commercial
def _search_and(
children: Iterable[Any],
context: SearchContext
) -> Optional[SearchResult]:
# First pass: update the context
deferred_children = []
for c in children:
kc, vc = c
if kc == SearchKey.video:
video = video_data_context.video_dict.get(vc)
if video is None:
raise VideoNotInDatabase(vc)
if context.videos is not None and video.id not in context.videos:
return None
context = context._replace(videos={video.id})
elif kc == SearchKey.channel:
if context.channel is not None and context.channel != vc:
return None
else:
context = context._replace(channel=vc)
elif kc == SearchKey.show:
if context.show is not None and context.show != vc:
return None
else:
context = context._replace(show=vc)
elif kc == SearchKey.hour:
hours = parse_hour_set(vc)
if context.hours is not None:
hours &= context.hours
if not hours:
return None
context = context._replace(hours=hours)
elif kc == SearchKey.day_of_week:
days = parse_day_of_week_set(vc)
if context.days_of_week is not None:
days &= context.days_of_week
if not days:
return None
context = context._replace(days_of_week=days)
elif kc == SearchKey.text_window:
context = context._replace(text_window=int(vc))
else:
deferred_children.append(c)
# Second pass: execute search
if deferred_children:
deferred_children.sort(
key=lambda x: SEARCH_KEY_EXEC_PRIORITY.get(x[0], 100))
curr_result = None
for child in deferred_children:
child_result = _search_recursive(child, context)
if child_result is None:
return None
if curr_result is None:
curr_result = child_result
continue
r1 = curr_result
r2 = child_result
# Symmetric cases
if (
r2.type == SearchResultType.video_set
or (r1.type != SearchResultType.video_set
and r2.type == SearchResultType.python_iset)
):
r1, r2 = r2, r1
if r1.type == SearchResultType.video_set:
if r2.type == SearchResultType.video_set:
# Result: video_set
new_context = and_search_contexts(
r1.context, r2.context)
if new_context is None:
return None
curr_result = r2._replace(context=new_context)
elif r2.type == SearchResultType.python_iset:
# Result: python_iset
video_filter = get_video_filter(r1.context)
if video_filter:
curr_result = SearchResult(
SearchResultType.python_iset,
data=filter(lambda x: video_filter(x.video),
r2.data))
elif r2.type == SearchResultType.rust_iset:
# Result: rust_iset
new_context = and_search_contexts(
r1.context, r2.context)
if new_context is None:
return None
curr_result = r2._replace(context=new_context)
else:
raise UnreachableCode()
elif r1.type == SearchResultType.python_iset:
if r2.type == SearchResultType.python_iset:
# Result: python_iset
curr_result = SearchResult(
SearchResultType.python_iset,
data=and_python_isets(r1, r2))
elif r2.type == SearchResultType.rust_iset:
# Result: python_iset
video_filter = get_video_filter(r2.context)
new_data = [
PythonISetData(
x.video, False,
intervals=intersect_isetmap(
x.video, r2.data, x.intervals))
for x in r1.data if video_filter(x.video)]
curr_result = SearchResult(
SearchResultType.python_iset,
data=filter(lambda x: len(x.intervals) > 0,
new_data))
else:
raise UnreachableCode()
elif r1.type == SearchResultType.rust_iset:
if r2.type == SearchResultType.rust_iset:
# Result: rust_iset
curr_result = SearchResult(
SearchResultType.rust_iset,
context=and_search_contexts(
r1.context, r2.context),
data=MmapISetIntersectionMapping(
[r1.data, r2.data]))
else:
raise UnreachableCode()
else:
raise UnreachableCode()
return curr_result
else:
return SearchResult(SearchResultType.video_set, context=context)
def _search_or(
children: Iterable[Any],
context: SearchContext
) -> Optional[SearchResult]:
# First, collect the child results with type video_set
child_results = []
deferred_children = []
for c in children:
kc, vc = c
if (
kc == SearchKey.video or kc == SearchKey.channel
or kc == SearchKey.show or kc == SearchKey.hour
or kc == SearchKey.day_of_week
):
child_result = _search_recursive(c, context)
if child_result is not None:
child_results.append(child_result)
elif kc == SearchKey.text_window:
# This is a no-op
continue
else:
deferred_children.append(c)
child_video_filters = []
for c in child_results:
assert c.type == SearchResultType.video_set, c.type
child_video_filter = get_video_filter(c.context)
if child_video_filter is None:
# One of the children is "everything"
return c
child_video_filters.append(child_video_filter)
curr_result = None
if child_video_filters:
curr_result = SearchResult(
SearchResultType.python_iset,
data=get_python_iset_from_filter(
video_data_context,
lambda v: any(f(v) for f in child_video_filters)))
for c in deferred_children:
child_result = _search_recursive(c, context)
if child_result is None:
continue
if curr_result is None:
curr_result = child_result
continue
r1 = curr_result
r2 = child_result
# Symmetric cases
if (
r2.type == SearchResultType.video_set
or (r1.type != SearchResultType.video_set
and r2.type == SearchResultType.python_iset)
):
r1, r2 = r2, r1
if r1.type == SearchResultType.video_set:
r1_filter = get_video_filter(r1.context)
if r1_filter is None:
# R1 is "everything"
return r1
elif r2.type != SearchResultType.video_set:
r2_filter = get_video_filter(r2.context)
if r2_filter is None:
# R2 is "everything"
return r2
else:
# Return: python_iset
curr_result = SearchResult(
SearchResultType.python_iset,
data=get_python_iset_from_filter(
video_data_context,
lambda v: r1_filter(v) or r2_filter(v)))
elif r2.type == SearchResultType.python_iset:
# Return: python_iset
curr_result = SearchResult(
SearchResultType.python_iset,
data=or_python_iset_with_filter(
video_data_context, r1_filter, r2))
elif r2.type == SearchResultType.rust_iset:
# Return: python_iset
curr_result = SearchResult(
SearchResultType.python_iset,
data=or_python_iset_with_rust_iset(
video_data_context,
SearchResult(
SearchResultType.python_iset,
data=get_python_iset_from_filter(
video_data_context, r1_filter)
), r2))
else:
raise UnreachableCode()
elif r1.type == SearchResultType.python_iset:
if r2.type == SearchResultType.python_iset:
curr_result = SearchResult(
SearchResultType.python_iset,
data=or_python_isets(r1, r2))
elif r2.type == SearchResultType.rust_iset:
curr_result = SearchResult(
SearchResultType.python_iset,
data=or_python_iset_with_rust_iset(
video_data_context, r1, r2))
else:
raise UnreachableCode()
elif r1.type == SearchResultType.rust_iset:
if r2.type == SearchResultType.rust_iset:
# Return: python_iset
curr_result = SearchResult(
SearchResultType.python_iset,
data=or_rust_isets(video_data_context, r1, r2))
else:
raise UnreachableCode()
else:
raise UnreachableCode()
return curr_result
def _search_recursive(
query: Any,
context: SearchContext
) -> Optional[SearchResult]:
k, v = query
if k == 'all':
return SearchResult(SearchResultType.video_set, context=context)
elif k == 'or':
return _search_or(v, context)
elif k == 'and':
return _search_and(v, context)
elif k == SearchKey.face_name:
return SearchResult(
SearchResultType.rust_iset, context=context,
data=get_face_name_intervals(video_data_context, v.lower()))
elif k == SearchKey.face_tag:
return SearchResult(
SearchResultType.rust_iset, context=context,
data=get_face_tag_intervals(video_data_context, v.lower()))
elif k == SearchKey.face_count:
return SearchResult(
SearchResultType.rust_iset, context=context,
data=get_face_count_intervals(video_data_context, int(v)))
elif k == SearchKey.text:
return SearchResult(
SearchResultType.python_iset,
data=get_caption_intervals(
caption_data_context, video_data_context, v, context))
elif k == SearchKey.text_window:
# FIXME: this doesnt make any real sense here
return None
elif k == SearchKey.video:
video = video_data_context.video_dict.get(v)
if video is None:
raise VideoNotInDatabase(v)
if context.videos is not None and video.id not in context.videos:
return None
else:
return SearchResult(
SearchResultType.video_set,
context=context._replace(videos={video.id}))
elif k == SearchKey.channel:
if context.channel is not None and context.channel != v:
return None
else:
return SearchResult(
SearchResultType.video_set,
context=context._replace(channel=v))
elif k == SearchKey.show:
if context.show is not None and context.show != v:
return None
else:
return SearchResult(
SearchResultType.video_set,
context=context._replace(show=v))
elif k == SearchKey.hour:
hours = parse_hour_set(v)
if context.hours is not None:
hours &= context.hours
if not hours:
return None
return SearchResult(
SearchResultType.video_set,
context=context._replace(hours=hours))
elif k == SearchKey.day_of_week:
days = parse_day_of_week_set(v)
if context.days_of_week is not None:
days &= context.days_of_week
if not days:
return None
return SearchResult(
SearchResultType.video_set,
context=context._replace(days_of_week=days))
raise UnreachableCode()
@app.route('/search')
def search() -> Response:
aggregate_fn = get_aggregate_fn(default_aggregate_by)
accumulator = (
DetailedDateAccumulator(aggregate_fn)
if request.args.get(
SearchParam.detailed, 'true', type=str
) == 'true' else SimpleDateAccumulator(aggregate_fn))
query_str = request.args.get(SearchParam.query, type=str)
if query_str:
query = json.loads(query_str)
else:
query = ['all', None]
start_date = parse_date(
request.args.get(SearchParam.start_date, None, type=str))
end_date = parse_date(
request.args.get(SearchParam.end_date, None, type=str))
is_commercial = _get_is_commercial()
search_result = _search_recursive(
query, SearchContext(
start_date=start_date, end_date=end_date,
text_window=default_text_window))
if search_result is not None:
def helper(video: Video, intervals: List[Interval]) -> None:
intervals = join_intervals_with_commercials(
video_data_context, video, intervals, is_commercial)
if intervals:
accumulator.add(
video.date, video.id,
sum(i[1] - i[0] for i in intervals) / 1000)
for data in search_result_to_python_iset(
video_data_context, search_result
):
if data.is_entire_video:
intervals = get_entire_video_ms_interval(data.video)
else:
assert data.intervals is not None
intervals = data.intervals
helper(data.video, intervals)
return jsonify(accumulator.get())
def _video_name_or_id(v: str) -> str:
try:
v_id = int(v)
return video_data_context.video_by_id[v_id].name
except ValueError:
return v
def _get_entire_video(video: Video) -> JsonObject:
document = caption_data_context.document_by_name.get(video.name)
return {
'metadata': get_video_metadata_json(video),
'intervals': [(0, video.num_frames)],
}
@app.route('/search-videos')
def search_videos() -> Response:
video_ids_str = request.args.get(SearchParam.video_ids, None, type=str)
if not video_ids_str:
raise InvalidUsage('must specify video ids')
video_ids = set(json.loads(video_ids_str))
if len(video_ids) > MAX_VIDEO_SEARCH_IDS:
raise QueryTooExpensive('Too many video ids specified')
query_str = request.args.get(SearchParam.query, type=str)
if query_str:
query = json.loads(query_str)
else:
query = ['all', None]
is_commercial = _get_is_commercial()
results = []
def collect(video: Video, intervals: List[Interval]) -> None:
intervals = join_intervals_with_commercials(
video_data_context, video, intervals, is_commercial)
if intervals:
results.append({
'metadata': get_video_metadata_json(video),
'intervals': list(merge_close_intervals(
(i[0] / 1000, i[1] / 1000) for i in intervals
))
})
search_result = _search_recursive(
query, SearchContext(
videos=video_ids, text_window=default_text_window))
if search_result is not None:
for data in search_result_to_python_iset(
video_data_context, search_result
):
assert data.video.id in video_ids, \
'Unexpected video {}, not in {}'.format(
data.video.id, video_ids)
if data.is_entire_video:
intervals = get_entire_video_ms_interval(data.video)
else:
assert data.intervals is not None
intervals = data.intervals
collect(data.video, intervals)
assert len(results) <= len(video_ids), \
'Expected {} results, got {}'.format(len(video_ids), len(results))
return jsonify(results)
```
#### File: jhong93/esper-tv-widget/derive_data.py
```python
import argparse
import os
import json
import heapq
import time
from collections import defaultdict
from functools import wraps
from inspect import getfullargspec
from multiprocessing import Pool
from typing import List, Tuple
from pytz import timezone
from rs_intervalset import MmapIntervalListMapping, MmapIntervalSetMapping
from rs_intervalset.writer import (
IntervalSetMappingWriter, IntervalListMappingWriter)
from app.load import load_videos
U32_MAX = 0xFFFFFFFF
# Mask for data bits that are used
PAYLOAD_DATA_MASK = 0b00000111
PAYLOAD_LEN = 1
# Minimum interval for no faces
MIN_NO_FACES_MS = 1000
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', type=str, default='data')
parser.add_argument(
'-i', '--incremental', action='store_true',
help='Incrementally update existing derived files (skips video ids '
'with existing derived data).')
parser.add_argument(
'-t', '--tag-limit', type=int, default=250,
help='Tags exceeding this number of individuals will be precomputed.')
parser.add_argument(
'-p', '--person-limit', type=int, default=2 ** 20, # 1MB
help='Person isets will be precomputed for people ilists exceeding this size.')
return parser.parse_args()
def mkdir_if_not_exists(d: str):
os.makedirs(d, exist_ok=True)
# TODO(james): investigate why derived data are subtly different from Spark
class IntervalAccumulator:
def __init__(self, fuzz: int = 250):
self._intervals = None
self._fuzz = fuzz
def add(self, start: int, end: int) -> None:
assert start <= end
if not self._intervals:
self._intervals = [(start, end)]
else:
last_int = self._intervals[-1]
if start > last_int[1] + self._fuzz:
self._intervals.append((start, end))
elif end > last_int[1]:
assert start >= last_int[0]
assert last_int[0] <= end
self._intervals[-1] = (last_int[0], end)
def get(self):
return self._intervals
# TODO(james): not sure this is actually catching any errors
def build_error_callback(message):
def cb(e):
print(message)
raise e
return cb
def print_task_info(f):
arg_spec = getfullargspec(f)
arg_idx = arg_spec.args.index('outfile')
assert arg_idx >= 0
@wraps(f)
def _task_info(*args, **kwargs):
outfile = args[arg_idx]
print('Writing:', outfile)
start_time = time.time()
result = f(*args, **kwargs)
print('Done:', outfile, '({:0.3f}s)'.format(time.time() - start_time))
return result
return _task_info
@print_task_info
def derive_face_iset(
face_ilist_file: str,
payload_mask: int,
payload_value: int,
outfile: str,
is_incremental: bool
) -> None:
ilistmap = MmapIntervalListMapping(face_ilist_file, PAYLOAD_LEN)
video_ids = set(ilistmap.get_ids())
if is_incremental and os.path.exists(outfile):
video_ids -= get_iset_ids(outfile)
with IntervalSetMappingWriter(outfile, append=is_incremental) as writer:
for video_id in sorted(video_ids):
acc = IntervalAccumulator()
for interval in ilistmap.intersect(
video_id, [(0, U32_MAX)], payload_mask, payload_value,
False
):
acc.add(*interval)
result = acc.get()
if result:
writer.write(video_id, result)
def derive_face_isets(
workers: Pool,
face_ilist_file: str,
outdir: str,
is_incremental: bool
) -> None:
mkdir_if_not_exists(outdir)
def helper(mask: int, value: int, outfile: str) -> None:
workers.apply_async(
derive_face_iset,
(
face_ilist_file, mask, value, os.path.join(outdir, outfile),
is_incremental
),
error_callback=build_error_callback('Failed on: ' + face_ilist_file))
# There are 3 bits in the encoding
# The 1's place is binary gender. 1 if male, 0 if female. Ignore if
# the 2's place is 1.
# The 2's place is nonbinary gender. If 1, ignore the 1's place.
# This individual counted in neither male nor female aggregations
# The 4's place is 1 if the individual is a host of the show, 0 otherwise
helper(0b000, 0b000, 'all.iset.bin')
helper(0b011, 0b001, 'male.iset.bin')
helper(0b011, 0b000, 'female.iset.bin')
helper(0b100, 0b100, 'host.iset.bin')
helper(0b100, 0b000, 'nonhost.iset.bin')
helper(0b111, 0b101, 'male_host.iset.bin')
helper(0b111, 0b001, 'male_nonhost.iset.bin')
helper(0b111, 0b100, 'female_host.iset.bin')
helper(0b111, 0b000, 'female_nonhost.iset.bin')
IntervalAndPayload = Tuple[int, int, int]
def get_ilist_ids(fname):
return set(MmapIntervalListMapping(fname, PAYLOAD_LEN).get_ids())
def get_iset_ids(fname):
return set(MmapIntervalSetMapping(fname).get_ids())
@print_task_info
def derive_num_faces_ilist(
data_dir: str,
face_ilist_file: str,
outfile: str,
is_incremental: bool
) -> None:
def deoverlap(
intervals: List[IntervalAndPayload], fuzz: int = 250
) -> List[IntervalAndPayload]:
result = []
for i in intervals:
if len(result) == 0:
result.append(i)
else:
last = result[-1]
if last[2] == i[2] and i[0] - last[1] <= fuzz:
result[-1] = (min(i[0], last[0]), max(i[1], last[1]), i[2])
else:
result.append(i)
return result
ilistmap = MmapIntervalListMapping(face_ilist_file, PAYLOAD_LEN)
video_ids = set(ilistmap.get_ids())
if is_incremental and os.path.exists(outfile):
video_ids -= get_ilist_ids(outfile)
# timezone does not matter here since we only want video length
video_durations = {
v.id : int(v.num_frames / v.fps * 1000)
for v in load_videos(data_dir, timezone('UTC')).values()
}
with IntervalListMappingWriter(
outfile, PAYLOAD_LEN, append=is_incremental
) as writer:
for video_id in sorted(video_ids):
intervals = []
curr_interval = None
curr_interval_count = None
for interval in ilistmap.get_intervals(video_id, 0, 0, False):
if not curr_interval:
if interval[0] > 0 and interval[0] > MIN_NO_FACES_MS:
intervals.append((0, interval[0], 0))
curr_interval = interval
curr_interval_count = 1
else:
assert interval >= curr_interval
if interval == curr_interval:
curr_interval_count += 1
else:
intervals.append((*curr_interval, curr_interval_count))
if interval[0] - curr_interval[1] > MIN_NO_FACES_MS:
intervals.append((curr_interval[1], interval[0], 0))
curr_interval = interval
curr_interval_count = 1
else:
if curr_interval:
intervals.append((*curr_interval, curr_interval_count))
if video_durations[video_id] - curr_interval[1] > MIN_NO_FACES_MS:
intervals.append((curr_interval[1], video_durations[video_id], 0))
else:
intervals.append((0, video_durations[video_id], 0))
writer.write(video_id, deoverlap(intervals))
@print_task_info
def derive_person_iset(
person_ilist_file: str,
outfile: str,
is_incremental: bool
) -> None:
ilistmap = MmapIntervalListMapping(person_ilist_file, PAYLOAD_LEN)
video_ids = set(ilistmap.get_ids())
if is_incremental and os.path.exists(outfile):
video_ids -= get_iset_ids(outfile)
with IntervalSetMappingWriter(outfile, append=is_incremental) as writer:
for video_id in sorted(video_ids):
acc = IntervalAccumulator()
for interval in ilistmap.intersect(
video_id, [(0, U32_MAX)],
0, 0, # Keep all faces
False
):
acc.add(*interval)
result = acc.get()
if result:
writer.write(video_id, result)
def parse_person_name(fname: str) -> str:
return os.path.splitext(os.path.splitext(fname)[0])[0]
def derive_person_isets(
workers: Pool,
person_ilist_dir: str,
outdir: str,
threshold_in_bytes: int,
is_incremental: bool
) -> None:
mkdir_if_not_exists(outdir)
skipped_count = 0
for person_file in os.listdir(person_ilist_dir):
if not person_file.endswith('.ilist.bin'):
print('Skipping:', person_file)
continue
person_name = parse_person_name(person_file)
person_path = os.path.join(person_ilist_dir, person_file)
derived_path = os.path.join(outdir, person_name + '.iset.bin')
if not os.path.exists(derived_path) and os.path.getsize(person_path) < threshold_in_bytes:
if skipped_count < 100:
print('Skipping (too small):', person_file)
skipped_count += 1
continue
workers.apply_async(
derive_person_iset,
(person_path, derived_path, is_incremental),
error_callback=build_error_callback('Failed on: ' + person_file))
if skipped_count > 0:
print('Skipped {} people (files too small).'.format(skipped_count))
@print_task_info
def derive_tag_ilist(
person_ilist_files: str,
outfile: str,
is_incremental: bool
) -> None:
ilistmaps = [MmapIntervalListMapping(f, PAYLOAD_LEN)
for f in person_ilist_files]
video_id_set = set()
for ilist in ilistmaps:
video_id_set.update(ilist.get_ids())
def deoverlap_intervals(intervals):
payload_dict = defaultdict(IntervalAccumulator)
for a, b, c in heapq.merge(*intervals):
payload_dict[c & PAYLOAD_DATA_MASK].add(a, b)
return list(heapq.merge(*[
[(a, b, payload) for a, b in acc.get()]
for payload, acc in payload_dict.items()
]))
if is_incremental and os.path.exists(outfile):
video_id_set -= get_ilist_ids(outfile)
with IntervalListMappingWriter(
outfile, PAYLOAD_LEN, append=is_incremental
) as writer:
for i in sorted(video_id_set):
intervals = []
for ilist in ilistmaps:
intervals.append(ilist.get_intervals_with_payload(i, True))
writer.write(i, deoverlap_intervals(intervals))
def derive_tag_ilists(
workers: Pool,
person_ilist_dir: str,
metadata_path: str,
outdir: str,
threshold: int,
is_incremental: bool
) -> None:
people_available = {
parse_person_name(p) for p in os.listdir(person_ilist_dir)
if p.endswith('.ilist.bin')
}
with open(metadata_path) as f:
people_to_tags = json.load(f)
people_to_tags = {
k.lower(): v for k, v in people_to_tags.items()
if k.lower() in people_available
}
tag_to_people = defaultdict(list)
for person, tags in people_to_tags.items():
for tag, _ in tags:
tag_to_people[tag].append(person)
mkdir_if_not_exists(outdir)
# Try to queue the expensive ones first
for tag, people in sorted(tag_to_people.items(), key=lambda x: -len(x[1])):
tag_path = os.path.join(outdir, tag + '.ilist.bin')
if os.path.exists(tag_path) or len(people) >= threshold:
people_ilist_files = [
os.path.join(person_ilist_dir, '{}.ilist.bin'.format(p))
for p in people]
workers.apply_async(
derive_tag_ilist,
(people_ilist_files, tag_path, is_incremental),
error_callback=build_error_callback('Failed on: ' + tag))
def main(
datadir: str,
incremental: bool,
tag_limit: int,
person_limit: int
) -> None:
outdir = os.path.join(datadir, 'derived')
mkdir_if_not_exists(outdir)
# Tasks are added from most expensive to least expensive to reduce tail
# latency and CPU underutilization
with Pool() as workers:
workers.apply_async(
derive_num_faces_ilist,
(
datadir,
os.path.join(datadir, 'faces.ilist.bin'),
os.path.join(outdir, 'num_faces.ilist.bin'),
incremental
),
error_callback=build_error_callback('Failed on: num faces ilist'))
metadata_path = os.path.join(datadir, 'people.metadata.json')
if os.path.exists(metadata_path):
derive_tag_ilists(
workers, os.path.join(datadir, 'people'),
metadata_path,
os.path.join(outdir, 'tags'),
tag_limit, incremental)
derive_face_isets(
workers, os.path.join(datadir, 'faces.ilist.bin'),
os.path.join(outdir, 'face'), incremental)
derive_person_isets(
workers, os.path.join(datadir, 'people'),
os.path.join(outdir, 'people'),
person_limit, incremental)
workers.close()
workers.join()
print('Done!')
if __name__ == '__main__':
main(**vars(get_args()))
``` |
{
"source": "jhong93/vpd",
"score": 2
} |
#### File: jhong93/vpd/detect.py
```python
import os
import argparse
import random
import math
from collections import Counter, defaultdict
from typing import NamedTuple
from tabulate import tabulate
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import numba
import torch
torch.set_num_threads(8)
from util.io import store_json, load_json, load_text
from util.proposal import BaseProposalModel, EnsembleProposalModel
from util.video import get_metadata
from action_dataset.load import load_embs, load_actions
from action_dataset.eval import get_test_prefixes
import video_dataset_paths as dataset_paths
class DataConfig(NamedTuple):
video_name_prefix: 'Optional[str]'
classes: 'List[str]'
window_before: float = 0.
window_after: float = 0.
TENNIS_CLASSES = [
'forehand_topspin', 'forehand_slice', 'backhand_topspin', 'backhand_slice',
'forehand_volley', 'backhand_volley', 'overhead', 'serve', 'unknown_swing'
]
TENNIS_WINDOW = 0.1
TENNIS_MIN_SWINGS_FEW_SHOT = 5
DATA_CONFIGS = {
'tennis': DataConfig(
video_name_prefix=None,
classes=TENNIS_CLASSES,
window_before=TENNIS_WINDOW,
window_after=TENNIS_WINDOW
),
'tennis_front': DataConfig(
video_name_prefix='front__',
classes=TENNIS_CLASSES,
window_before=TENNIS_WINDOW,
window_after=TENNIS_WINDOW
),
'tennis_back': DataConfig(
video_name_prefix='back__',
classes=TENNIS_CLASSES,
window_before=TENNIS_WINDOW,
window_after=TENNIS_WINDOW
),
'fs_jump': DataConfig(
video_name_prefix=None,
classes=['axel', 'lutz', 'flip', 'loop', 'salchow', 'toe_loop'],
),
'fx': DataConfig(video_name_prefix=None, classes=[])
}
class Label(NamedTuple):
video: str
value: str
start_frame: int
end_frame: int
fps: float
EMB_FILE_SUFFIX = '.emb.pkl'
SEQ_MODELS = ['lstm', 'gru']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', choices=list(DATA_CONFIGS.keys()))
parser.add_argument('-k', type=int, default=1)
parser.add_argument('-o', '--out_dir', type=str)
parser.add_argument('--emb_dir', type=str)
parser.add_argument('-nt', '--n_trials', type=int, default=1)
parser.add_argument('--algorithm', type=str, choices=SEQ_MODELS,
default='gru')
parser.add_argument('-ne', '--n_examples', type=int, default=-1)
parser.add_argument('-tw', '--tennis_window', type=float)
parser.add_argument('--_all', action='store_true')
parser.add_argument('--norm', action='store_true')
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--batch_size', type=int)
return parser.parse_args()
def get_video_intervals(examples):
result = defaultdict(list)
for l in examples:
result[l.video].append((l.start_frame, l.end_frame))
def deoverlap(intervals):
ret = []
for a, b in sorted(intervals):
if len(ret) == 0 or ret[-1][1] < a:
ret.append((a, b))
else:
ret[-1] = (ret[-1][0], b)
return tuple(ret)
return {k: deoverlap(v) for k, v in result.items()}
class ProposalModel:
MIN_TRAIN_EPOCHS = 25
NUM_TRAIN_EPOCHS = 200
def __init__(self, arch_type, emb_dict, train_labels, hidden_dim,
ensemble_size, splits=5, **kwargs):
self.embs = emb_dict
train_videos = list({l.video for l in train_labels
if l.video in emb_dict})
def get_gt(video):
vx, _ = emb_dict[video]
vy = np.zeros(vx.shape[0], dtype=np.int32)
for l in train_labels:
if l.video == video:
vy[l.start_frame:l.end_frame] = 1
return vx, vy
X, y = [], []
custom_split = None
for i, v in enumerate(train_videos):
vx, vy = get_gt(v)
if len(vx.shape) == 3:
if custom_split is None:
custom_split = []
for j in range(vx.shape[1]):
X.append(vx[:, j, :])
y.append(vy)
custom_split.append(i)
else:
X.append(vx)
y.append(vy)
if custom_split is not None:
assert len(custom_split) == len(X)
if len(X) < ensemble_size:
ensemble_size = splits = len(X)
print('Too few videos for full ensemble:', ensemble_size)
kwargs.update({
'ensemble_size': ensemble_size, 'splits': splits,
'num_epochs': ProposalModel.NUM_TRAIN_EPOCHS,
'min_epochs': ProposalModel.MIN_TRAIN_EPOCHS,
'custom_split': custom_split
})
if len(X) < ensemble_size:
raise Exception('Not enough examples for ensemble!')
else:
self.model = EnsembleProposalModel(
arch_type, X, y, hidden_dim, **kwargs)
def predict(self, video):
x = self.embs[video][0]
if len(x.shape) == 3:
return self.model.predict_n(
*[x[:, i, :] for i in range(x.shape[1])])
else:
return self.model.predict(x)
LOC_TEMPORAL_IOUS = [0.1 * i for i in range(1, 10)]
@numba.jit(nopython=True)
def calc_iou(a1, a2, b1, b2):
isect = min(a2, b2) - max(a1, b1)
return isect / (max(a2, b2) - min(a1, b1)) if isect > 0 else 0
def compute_precision_recall_curve(is_tp, num_pos):
recall = []
precision = []
tp, fp = 0, 0
for p in is_tp:
if p:
tp += 1
else:
fp += 1
recall.append(tp / num_pos)
precision.append(tp / (tp + fp))
return precision, recall
def compute_interpolated_precision(precision, recall):
interp_recall = []
interp_precision = []
max_precision = 0
min_recall = 1
for i in range(1, len(recall) + 1):
r = recall[-i]
p = precision[-i]
if r < min_recall:
if len(interp_precision) == 0 or p > interp_precision[-1]:
interp_recall.append(min_recall)
interp_precision.append(max_precision)
max_precision = max(max_precision, p)
min_recall = min(min_recall, r)
interp_recall.append(0)
interp_precision.append(1)
interp_precision.reverse()
interp_recall.reverse()
return interp_precision, interp_recall
def compute_ap(pc, rc):
ipc, irc = compute_interpolated_precision(pc, rc)
assert irc[0] == 0, irc[0]
assert irc[-1] == 1, (irc[-1], len(irc))
area = 0
for i in range(len(irc) - 1):
dr = irc[i + 1] - irc[i]
assert dr > 0
p = ipc[i + 1]
if i > 0:
assert p < ipc[i], (p, ipc[i])
area += p * dr
assert area >= 0 and area <= 1, area
return area
def plot_proposal_dist(props):
fig = plt.figure()
plt.hist(x=[b - a for a, b in props], bins=50)
plt.xlabel('num frames')
plt.ylabel('frequency')
plt.show()
plt.close(fig)
def plot_precision_recall_curve(p, r):
fig = plt.figure()
plt.plot(r, p)
ip, ir = compute_interpolated_precision(p, r)
plt.step(ir, ip)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('recall')
plt.ylabel('precision')
plt.show()
plt.close(fig)
def make_split(train_examples, is_tennis):
print('Making a new split!')
train_videos = list({l.video for l in train_examples})
if is_tennis:
# Split off the player
train_videos = list({
v.split('__', 1)[1] for v in train_videos})
print('Videos:', len(train_videos))
train_videos.sort()
random.Random(42).shuffle(train_videos)
if is_tennis:
train_intervals = get_video_intervals(train_examples)
def tennis_filter(t):
front_video = 'front__' + t
back_video = 'back__' + t
# Dont sample videos with too few swings
return (
len(train_intervals.get(front_video, []))
>= TENNIS_MIN_SWINGS_FEW_SHOT
and len(train_intervals.get(back_video, []))
>= TENNIS_MIN_SWINGS_FEW_SHOT)
train_videos = list(filter(tennis_filter, train_videos))
for v in train_videos:
print(v)
return train_videos
def run_localization(dataset_name, emb_dict, train_examples, test_examples,
n_examples, n_trials, algorithm, k, hidden_dim, batch_size,
out_dir, _all=False):
test_video_ints = get_video_intervals(test_examples)
test_video_int_count = sum(len(v) for v in test_video_ints.values())
print('Test examples (non-overlapping):', test_video_int_count)
mean_train_int_len = np.mean([
t.end_frame - t.start_frame for t in train_examples])
min_prop_len = 0.67 * math.ceil(mean_train_int_len)
max_prop_len = 1.33 * math.ceil(mean_train_int_len)
if n_examples == -1:
exp_name = 'full train'
else:
exp_name = '{} shot'.format(n_examples)
# Activation threshold ranges
thresholds = (
np.linspace(0.05, 0.5, 10) if 'tennis' in dataset_name
else np.linspace(0.1, 0.9, 9))
trial_results = []
for trial in range(n_trials):
if n_examples < 0:
exp_train_examples = train_examples
else:
few_shot_file = \
'action_dataset/{}/train.localize.{}.txt'.format(
'fs' if dataset_name.startswith('fs') else dataset_name,
trial)
print('Loading split:', few_shot_file)
train_videos = load_text(few_shot_file)
train_videos = train_videos[:n_examples]
exp_train_examples = [
l for l in train_examples
if (l.video in train_videos or
('tennis' in dataset_name and
l.video.split('__', 1)[1] in train_videos))]
kwargs = {}
if batch_size is not None:
kwargs['batch_size'] = batch_size
model = ProposalModel(algorithm, emb_dict, exp_train_examples,
hidden_dim, ensemble_size=k, **kwargs)
results = []
for video in tqdm(
set(emb_dict) if _all else
{l.video for l in test_examples if l.video in emb_dict},
desc='Running {}'.format(exp_name)
):
scores = model.predict(video)
results.append((video, scores))
if out_dir:
os.makedirs(out_dir, exist_ok=True)
out_path = os.path.join(
out_dir, '{}_trial{}_{}_pred.json'.format(
'train{}'.format(len(exp_train_examples)
if n < 0 else n),
trial, algorithm))
store_json(out_path, {k: v.tolist() for k, v in results})
def calc_ap_at_threshold(act_thresh):
all_props = []
for video, scores in results:
props = BaseProposalModel.get_proposals(scores, act_thresh)
for p, score in props:
all_props.append((video, p, score))
all_props.sort(key=lambda x: -x[-1])
# plot_proposal_dist([x[1] for x in all_props])
aps_at_tiou = []
for t_iou in LOC_TEMPORAL_IOUS:
all_remaining = {}
for video, gt_ints in test_video_ints.items():
all_remaining[video] = set(gt_ints)
is_tp = []
for video, p, score in all_props:
mid = (p[1] + p[0]) // 2
if p[1] - p[0] < min_prop_len:
p = (max(0, mid - min_prop_len // 2),
mid + min_prop_len // 2)
elif p[1] - p[0] > max_prop_len:
p = (max(0, mid - max_prop_len // 2),
mid + max_prop_len // 2)
# Only the first retrieval can be correct
video_remaining = all_remaining.get(video)
if video_remaining is None:
is_tp.append(False)
else:
recalled = []
for gt in video_remaining:
if calc_iou(*p, *gt) >= t_iou:
recalled.append(gt)
# Disallow subsequent recall of these ground truth
# intervals
for gt in recalled:
video_remaining.remove(gt)
if len(video_remaining) == 0:
del all_remaining[video]
is_tp.append(len(recalled) > 0)
if len(is_tp) > 0 and any(is_tp):
pc, rc = compute_precision_recall_curve(
is_tp, test_video_int_count)
# if (
# np.isclose(t_iou, 0.5)
# and np.isclose(act_thresh, 0.2)
# ):
# plot_precision_recall_curve(pc, rc)
aps_at_tiou.append(compute_ap(pc, rc))
else:
aps_at_tiou.append(0)
return aps_at_tiou
all_aps = []
for act_thresh in thresholds:
all_aps.append(calc_ap_at_threshold(act_thresh))
headers = ['tIoU', *['AP@{:0.2f}'.format(x) for x in thresholds]]
rows = []
for i, t_iou in enumerate(LOC_TEMPORAL_IOUS):
rows.append([t_iou, *[x[i] for x in all_aps]])
print(tabulate(rows, headers=headers))
trial_results.append(np.array(all_aps))
if len(trial_results) > 1:
mean_result = trial_results[0] / n_trials
for t in trial_results[1:]:
mean_result += t / n_trials
headers = ['tIoU', *['AP@{:0.2f}'.format(x) for x in thresholds]]
rows = []
for i, t_iou in enumerate(LOC_TEMPORAL_IOUS):
rows.append(
[t_iou, *[mean_result[j, i] for j in range(len(thresholds))]])
print('\nMean across {} trials:'.format(len(trial_results)))
print(tabulate(rows, headers=headers))
def load_tennis_data(config):
def parse_video_name(v):
v = os.path.splitext(v)[0]
video_name, start, end = v.rsplit('_', 2)
return (video_name, int(start), int(end), v)
video_meta_dict = {
parse_video_name(v): get_metadata(
os.path.join(dataset_paths.TENNIS_VIDEO_DIR, v)
) for v in tqdm(os.listdir(dataset_paths.TENNIS_VIDEO_DIR),
desc='Loading video metadata')
if v.endswith('.mp4')
}
actions = load_actions('action_dataset/tennis/all.txt')
test_prefixes = get_test_prefixes('tennis')
train_labels = []
test_labels = []
for action, label in actions.items():
if label not in config.classes:
continue
base_video, player, frame = action.split(':')
frame = int(frame)
for k in video_meta_dict:
if k[0] == base_video and k[1] <= frame and k[2] >= frame:
fps = video_meta_dict[k].fps
mid_frame = frame - k[1]
start_frame = max(
0, int(mid_frame - fps * config.window_before))
end_frame = int(mid_frame + fps * config.window_after)
label = Label(
'{}__{}'.format(player, k[-1]),
'action', start_frame, end_frame, fps)
break
if base_video.startswith(test_prefixes):
test_labels.append(label)
else:
train_labels.append(label)
return train_labels, test_labels
def load_fs_data(config):
video_meta_dict = {
os.path.splitext(v)[0]: get_metadata(
os.path.join(dataset_paths.FS_VIDEO_DIR, v))
for v in tqdm(os.listdir(dataset_paths.FS_VIDEO_DIR),
desc='Loading video metadata')
if v.endswith('.mp4')
}
actions = load_actions('action_dataset/fs/all.txt')
test_prefixes = get_test_prefixes('fs')
durations = []
train_labels = []
test_labels = []
for action, label in actions.items():
if label not in config.classes:
continue
video, start_frame, end_frame = action.split(':')
start_frame = int(start_frame)
end_frame = int(end_frame)
fps = video_meta_dict[video].fps
# Dilate
mid_frame = (start_frame + end_frame) / 2
start_frame = min(
start_frame, int(mid_frame - fps * config.window_before))
end_frame = max(end_frame, int(mid_frame + fps * config.window_after))
durations.append((end_frame - start_frame) / fps)
label = Label(video, 'action', start_frame, end_frame, fps)
if video.startswith(test_prefixes):
test_labels.append(label)
else:
train_labels.append(label)
print(np.mean(durations))
return train_labels, test_labels
def load_fx_data(config):
from finegym.util import ANNOTATION_FILE
from sklearn.model_selection import train_test_split
video_meta_dict = {
os.path.splitext(v)[0]: get_metadata(
os.path.join(dataset_paths.FX_VIDEO_DIR, v))
for v in tqdm(os.listdir(dataset_paths.FX_VIDEO_DIR),
desc='Loading video metadata')
if v.endswith('.mp4')
}
all_labels = []
event_id = 2 # Female fx
annotations = load_json(ANNOTATION_FILE)
for video in annotations:
for event, event_data in annotations[video].items():
if event_data['event'] != event_id:
continue
video_name = '{}_{}'.format(video, event)
if event_data['segments'] is None:
print('{} has no segments'.format(video_name))
continue
for segment, segment_data in event_data['segments'].items():
assert segment_data['stages'] == 1
assert len(segment_data['timestamps']) == 1
start, end = segment_data['timestamps'][0]
fps = video_meta_dict[video_name].fps
start_frame = int(max(0, fps * (start - config.window_before)))
end_frame = int(fps * (end + config.window_after))
all_labels.append(Label(
video_name, 'action', start_frame, end_frame, fps))
_, test_videos = train_test_split(
list(video_meta_dict.keys()), test_size=0.25)
test_videos = set(test_videos)
train_labels = []
test_labels = []
for l in all_labels:
if l.video in test_videos:
test_labels.append(l)
else:
train_labels.append(l)
return train_labels, test_labels
def main(dataset, out_dir, n_trials, n_examples, tennis_window,
emb_dir, _all, algorithm, norm, k, hidden_dim, batch_size):
config = DATA_CONFIGS[dataset]
emb_dict = load_embs(emb_dir, norm)
def print_label_dist(labels):
print('Videos:', len({l.video for l in labels}))
for name, count in Counter(x.value for x in labels).most_common():
print(' {} : {}'.format(name, count))
if dataset.startswith('tennis'):
if tennis_window is not None:
config = config._replace(
window_before=tennis_window,
window_after=tennis_window)
train_labels, test_labels = load_tennis_data(config)
elif dataset.startswith('fs'):
train_labels, test_labels = load_fs_data(config)
else:
train_labels, test_labels = load_fx_data(config)
print('\nLoaded {} train labels'.format(len(train_labels)))
print_label_dist(train_labels)
print('\nLoaded {} test labels'.format(len(test_labels)))
print_label_dist(test_labels)
print('\nTrain / test split: {} / {}\n'.format(
len(train_labels), len(test_labels)))
run_localization(dataset, emb_dict, train_labels, test_labels,
n_examples, n_trials, algorithm, k, hidden_dim, batch_size,
out_dir, _all=_all)
if __name__ == '__main__':
main(**vars(get_args()))
```
#### File: vpd/finegym/util.py
```python
import os
import math
from typing import NamedTuple
import numpy as np
from util.io import load_pickle
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
ANNOTATION_FILE = os.path.join(
DIR_PATH, 'data', 'finegym_annotation_info_v1.1.json')
GYM99_CATEGORY_FILE = os.path.join(DIR_PATH, 'data', 'gym99_categories.txt')
GYM99_ABRV_CATEGORY_FILE = os.path.join(
DIR_PATH, 'data', 'gym99_categories_abrv.txt')
GYM99_TRAIN_FILE = os.path.join(
DIR_PATH, 'data', 'gym99_train_element_v1.1.txt')
GYM99_VAL_FILE = os.path.join(DIR_PATH, 'data', 'gym99_val_element.txt')
class Category(NamedTuple):
class_id: int
set_id: int
g530_id: int
event: str
name: str
def _parse_label(s):
return int(s.split(':', 1)[1].strip())
def load_categories(file_name):
result = {}
with open(file_name) as fp:
for line in fp:
clabel, slabel, glabel, data = line.split(';')
clabel = _parse_label(clabel)
slabel = _parse_label(slabel)
glabel = _parse_label(glabel)
event, name = data.strip()[1:].split(')', 1)
result[clabel] = Category(clabel, slabel, glabel, event, name.strip())
return result
def load_labels(file_name):
result = {}
with open(file_name) as fp:
for line in fp:
action_id, label = line.split(' ')
result[action_id] = int(label)
return result
def parse_full_action_id(s):
s, action_id = s.split('_A_')
video_id, event_id = s.split('_E_')
return video_id, 'E_' + event_id, 'A_' + action_id
def _normalize_rows(x):
d = np.linalg.norm(x, axis=1, keepdims=True)
d[d < 1e-12] = 1
return x / d
def load_actions(annotations, labels, meta_dict, emb_dir=None, norm=False,
pre_seconds=0, min_seconds=0, max_seconds=1000,
target_fps=None, interp_skipped=False):
result = {}
for full_action_id in labels:
video_id, event_id, action_id = parse_full_action_id(full_action_id)
video_event_id = '{}_{}'.format(video_id, event_id)
video_meta = meta_dict.get(video_event_id)
if video_meta is None:
continue
timestamps = annotations[video_id][event_id]['segments'][action_id]['timestamps']
start, end = timestamps[0]
if end - start > max_seconds:
end = start + max_seconds
elif end - start < min_seconds:
end = start + min_seconds
if pre_seconds > 0:
start -= pre_seconds
start = max(start, 0)
start_frame = math.floor(start * video_meta.fps)
end_frame = math.ceil(end * video_meta.fps)
embs = []
if emb_dir is not None:
sample_incr = 1
if target_fps is not None:
sample_incr = min(1, target_fps / video_meta.fps)
sample_balance = 1
emb_path = os.path.join(emb_dir, video_event_id + '.emb.pkl')
if os.path.isfile(emb_path):
skipped_embs = []
for frame_num, emb, _ in load_pickle(emb_path):
if frame_num >= start_frame and frame_num <= end_frame:
if sample_balance >= 0:
sample_balance -= 1
if interp_skipped and len(skipped_embs) > 0:
skipped_embs.append(emb)
emb = np.mean(skipped_embs, axis=0)
skipped_embs = []
embs.append(emb)
else:
if interp_skipped:
skipped_embs.append(emb)
sample_balance += sample_incr
if len(embs) > 0:
embs = np.stack(embs)
if norm:
embs = _normalize_rows(embs)
else:
embs = None
result[full_action_id] = ((start_frame, end_frame), embs)
return result
```
#### File: vpd/models/keypoint.py
```python
from contextlib import nullcontext
from collections import Counter
import torch
import torch.nn.functional as F
import torch.cuda.amp as amp
from .util import step, batch_mulitplexer, batch_zipper
TRIPLET_LOSS = False
class _BaseModel:
def __init__(self, encoder, decoders, device):
self.encoder = encoder
self.decoders = decoders
self.device = device
# Move to device
self.encoder.to(device)
for decoder in self.decoders.values():
decoder.to(device)
def _train(self):
self.encoder.train()
for decoder in self.decoders.values():
decoder.train()
def _eval(self):
self.encoder.eval()
for decoder in self.decoders.values():
decoder.eval()
class Keypoint_EmbeddingModel(_BaseModel):
def epoch(self, data_loaders, optimizer=None, scaler=None, progress_cb=None,
weight_3d=1):
self._train() if optimizer is not None else self._eval()
dataset_losses = Counter()
dataset_contra_losses = Counter()
dataset_counts = Counter()
with torch.no_grad() if optimizer is None else nullcontext():
for zipped_batch in (
batch_zipper(data_loaders) if optimizer is not None
else ((x,) for x in batch_mulitplexer(data_loaders))
):
batch_loss = 0.
batch_n = 0
for dataset_name, batch in zipped_batch:
contra_loss = 0.
with nullcontext() if scaler is None else amp.autocast():
pose1 = batch['pose1'].to(self.device)
n = pose1.shape[0]
emb1 = self.encoder(pose1.view(n, -1))
emb2 = None
if 'pose2' in batch:
pose2 = batch['pose2'].to(self.device)
emb2 = self.encoder(pose2.view(n, -1))
contra_loss += F.hinge_embedding_loss(
torch.norm(emb1 - emb2, dim=1),
torch.ones(n, dtype=torch.int32, device=self.device),
reduction='sum')
if 'pose_neg' in batch:
pose_neg = batch['pose_neg'].to(self.device)
emb_neg = self.encoder(pose_neg.view(n, -1))
if TRIPLET_LOSS:
contra_loss += torch.sum(F.triplet_margin_with_distance_loss(
emb1, emb2, emb_neg, reduction='none'
) * batch['pose_neg_is_valid'].to(self.device))
else:
contra_loss += torch.sum(F.hinge_embedding_loss(
torch.norm(emb1 - emb_neg, dim=1),
-torch.ones(n, dtype=torch.int32, device=self.device),
reduction='none'
) * batch['pose_neg_is_valid'].to(self.device))
loss = 0.
loss += contra_loss
if 'kp_features' in batch:
pose_decoder = self.decoders['3d']
true3d = batch['kp_features'].float().to(self.device)
pred3d1 = pose_decoder(
emb1, dataset_name).reshape(true3d.shape)
loss += weight_3d * F.mse_loss(
pred3d1, true3d, reduction='sum')
if emb2 is not None:
pred3d2 = pose_decoder(
emb2, dataset_name).reshape(true3d.shape)
loss += weight_3d * F.mse_loss(
pred3d2, true3d, reduction='sum')
if contra_loss > 0:
dataset_contra_losses[dataset_name] += contra_loss.item()
dataset_losses[dataset_name] += loss.item()
dataset_counts[dataset_name] += n
# Sum the losses for the dataset
batch_loss += loss
batch_n += n
# Take mean of losses before backprop
batch_loss /= batch_n
if optimizer is not None:
step(optimizer, scaler, batch_loss)
if progress_cb is not None:
progress_cb(batch_n)
# print({k: v / dataset_counts[k]
# for k, v in dataset_contra_losses.items()})
epoch_n = sum(dataset_counts.values())
return (sum(dataset_contra_losses.values()) / epoch_n,
sum(dataset_losses.values()) / epoch_n,
{k: v / dataset_counts[k] for k, v in dataset_losses.items()})
def _predict(self, pose, get_emb, decoder_target=None):
assert get_emb or decoder_target is not None, 'Nothing to predict'
if not isinstance(pose, torch.Tensor):
pose = torch.FloatTensor(pose)
pose = pose.to(self.device)
if len(pose.shape) == 2:
pose = pose.unsqueeze(0)
self.encoder.eval()
if decoder_target is not None:
decoder = self.decoders['3d']
decoder.eval()
else:
decoder = None
with torch.no_grad():
n = pose.shape[0]
emb = self.encoder(pose.view(n, -1))
if decoder is None:
return emb.cpu().numpy(), None
pred3d = decoder(emb, decoder_target)
if get_emb:
return emb.cpu().numpy(), pred3d.cpu().numpy()
else:
return None, pred3d.cpu().numpy()
def embed(self, pose):
return self._predict(pose, get_emb=True)[0]
def predict3d(self, pose, decoder_target):
return self._predict(
pose, get_emb=False, decoder_target=decoder_target)[1]
def embed_and_predict3d(self, pose, decoder_target):
return self._predict(
pose, get_emb=True, decoder_target=decoder_target)
```
#### File: vpd/models/rgb.py
```python
import torch
import torch.nn as nn
from efficientnet_pytorch import EfficientNet, model
from .module import ENCODER_ARCH
def add_flow_to_model(base_model):
# modify the convolution layers
# Torch models are usually defined in a hierarchical way.
# nn.modules.children() return all sub modules in a DFS manner
modules = list(base_model.modules())
first_conv_idx = list(filter(lambda x: isinstance(modules[x], nn.Conv2d),
list(range(len(modules)))))[0]
conv_layer = modules[first_conv_idx]
container = modules[first_conv_idx - 1]
# modify parameters, assume the first blob contains the convolution kernels
params = [x.clone() for x in conv_layer.parameters()]
kernel_size = params[0].size()
new_kernel_size = kernel_size[:1] + (5,) + kernel_size[2:]
new_kernels = params[0].data.mean(dim=1, keepdim=True).expand(
new_kernel_size).contiguous()
new_conv = nn.Conv2d(5, conv_layer.out_channels,
conv_layer.kernel_size, conv_layer.stride,
conv_layer.padding,
bias=True if len(params) == 2 else False)
new_conv.weight.data = new_kernels
if len(params) == 2:
new_conv.bias.data = params[1].data # add bias if neccessary
layer_name = list(container.state_dict().keys())[0][:-7]
# remove .weight suffix to get the layer name
# replace the first convlution layer
setattr(container, layer_name, new_conv)
return base_model
def replace_last_layer(base_model, last_layer_name, out_dim):
feature_dim = getattr(base_model, last_layer_name).in_features
setattr(base_model, last_layer_name, nn.Linear(feature_dim, out_dim))
return base_model
class RGBF_EmbeddingModel(nn.Module):
"""Basic embedding model with single frame features"""
def __init__(self, model_arch, emb_dim, use_flow, device,
pretrained=False):
super().__init__()
self.device = device
self.use_flow = use_flow
self.emb_dim = emb_dim
if 'resnet' in model_arch:
backbone = ENCODER_ARCH[model_arch].pretrained_init(
pretrained=pretrained)
if use_flow:
backbone = add_flow_to_model(backbone)
self.resnet = replace_last_layer(backbone, 'fc', emb_dim)
elif 'effnet' in model_arch:
effnet_name = 'efficientnet-b{}'.format(model_arch[-1])
self.effnet = EfficientNet.from_name(
effnet_name, in_channels=5 if use_flow else 3,
num_classes=emb_dim, image_size=128)
def forward(self, x):
backbone = self.resnet if hasattr(self, 'resnet') else self.effnet
return backbone(x)
def embed(self, x):
if not isinstance(x, torch.Tensor):
x = torch.Tensor(x)
x = x.to(self.device)
if len(x.shape) == 3:
x = x.unsqueeze(0)
if self.use_flow:
assert x.shape[1] == 5, 'Wrong number of channels for RGB + flow'
else:
assert x.shape[1] == 3, 'Wrong number of channels for RGB'
self.eval()
with torch.no_grad():
return self(x).cpu().numpy()
```
#### File: jhong93/vpd/preprocess_3d_pose.py
```python
import os
import argparse
import numpy as np
from tqdm import tqdm
from util.io import store_pickle, load_pickle
from vipe_dataset.people3d import load_3dpeople_skeleton
from vipe_dataset.human36m import load_human36m_skeleton
from vipe_dataset.nba2k import load_nba2k_skeleton
from vipe_dataset.amass import load_amass_skeleton
DATASETS = ['3dpeople', 'panoptic', 'human36m', 'nba2k', 'amass']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('data_dir')
parser.add_argument('dataset', choices=DATASETS)
parser.add_argument('-o', '--out_file', type=str)
parser.add_argument('-v', '--visualize', action='store_true')
parser.add_argument('-vf', '--visualize_frequency', type=int, default=25)
return parser.parse_args()
def process_3dpeople_data(data_dir, visualize, visualize_frequency):
i = 0
result = {}
for person in sorted(os.listdir(data_dir)):
person_dir = os.path.join(data_dir, person)
for action in tqdm(
sorted(os.listdir(person_dir)),
desc='Processing {}'.format(person)
):
action_cam_dir = os.path.join(person_dir, action, 'camera01')
frames = os.listdir(action_cam_dir)
frame_pose3d = [None] * len(frames)
for frame in frames:
frame_path = os.path.join(action_cam_dir, frame)
frame_no = int(os.path.splitext(frame)[0])
frame_pose3d[frame_no - 1] = load_3dpeople_skeleton(
frame_path, visualize and i % visualize_frequency == 0)
i += 1
result[(person, action)] = frame_pose3d
return result
def process_human36m_data(data_dir, visualize, visualize_frequency):
import cdflib
i = 0
result = {}
for person in os.listdir(data_dir):
pose_dir = os.path.join(data_dir, person, 'MyPoseFeatures', 'D3_Positions')
for action_file in tqdm(
os.listdir(pose_dir), desc='Processing {}'.format(person)
):
action = os.path.splitext(action_file)[0]
action_path = os.path.join(pose_dir, action_file)
cdf_data = cdflib.CDF(action_path)
raw_poses = cdf_data.varget('Pose').squeeze()
cdf_data.close()
frame_poses = []
for j in range(raw_poses.shape[0]):
frame_poses.append(load_human36m_skeleton(
raw_poses[j, :],
visualize and i % visualize_frequency == 0))
i += 1
result[(person, action)] = frame_poses
return result
def process_nba2k_data(data_dir, visualize, visualize_frequency):
i = 0
result = {}
for person in os.listdir(data_dir):
pose_file = os.path.join(
data_dir, person, 'release_{}_2ku.pkl'.format(person))
pose_data = load_pickle(pose_file)
person_poses = []
frames = os.listdir(os.path.join(data_dir, person, 'images', '2ku'))
frames.sort()
j3d = pose_data['j3d']
assert len(frames) == len(j3d)
for joints in tqdm(j3d, desc='Processing {}'.format(person)):
person_poses.append(load_nba2k_skeleton(
joints, visualize and i % visualize_frequency == 0))
i += 1
result[(person,)] = person_poses
return result
def process_amass_data(data_dir, visualize, visualize_frequency):
i = 0
result = {}
for seq in sorted(os.listdir(data_dir)):
seq_dir = os.path.join(data_dir, seq)
pose_file = os.path.join(data_dir, seq, 'pose.npy')
if not os.path.isfile(pose_file):
continue
pose_arr = np.load(pose_file)
frame_poses = []
frames = list({
f.split('_')[0] for f in os.listdir(seq_dir)
if f.endswith(('jpg', 'png'))
})
frames.sort()
assert len(frames) == pose_arr.shape[0], '{} has bad data'.format(seq)
for j in tqdm(
range(pose_arr.shape[0]), desc='Processing {}'.format(seq)
):
frame_poses.append(
load_amass_skeleton(
pose_arr[j, :, :],
visualize and i % visualize_frequency == 0))
i += 1
dataset, action = seq.split('_', 1)
result[(dataset, action)] = frame_poses
return result
def main(data_dir, dataset, out_file, visualize, visualize_frequency):
if dataset == '3dpeople':
pose3d = process_3dpeople_data(data_dir, visualize, visualize_frequency)
elif dataset == 'human36m':
pose3d = process_human36m_data(data_dir, visualize, visualize_frequency)
elif dataset == 'nba2k':
pose3d = process_nba2k_data(data_dir, visualize, visualize_frequency)
elif dataset == 'amass':
pose3d = process_amass_data(data_dir, visualize, visualize_frequency)
else:
raise NotImplementedError()
if out_file is not None:
store_pickle(out_file, pose3d)
print('Done!')
if __name__ == '__main__':
main(**vars(get_args()))
```
#### File: jhong93/vpd/recut_finegym_video.py
```python
import os
import math
import argparse
from tqdm import tqdm
from util.io import load_json
from util.video import get_metadata, cut_segment
from finegym.util import ANNOTATION_FILE
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('video_dir')
parser.add_argument('event')
parser.add_argument('-o', '--out_dir')
return parser.parse_args()
EVENT_TYPES = {
'female_VT': 1,
'female_FX': 2,
'female_BB': 3,
'female_UB': 4
}
def main(video_dir, event, out_dir):
annotations = load_json(ANNOTATION_FILE)
event_type_id = EVENT_TYPES[event]
if out_dir:
os.makedirs(out_dir, exist_ok=True)
for video, events in tqdm(annotations.items()):
video_path = os.path.join(video_dir, '{}.mp4'.format(video))
if not os.path.exists(video_path):
video_path = os.path.join(video_dir, '{}.mkv'.format(video))
video_meta = get_metadata(video_path)
for event_id, event_data in events.items():
timestamps = event_data['timestamps']
assert len(timestamps) == 1, 'Too many timestamps for event'
start, end = timestamps[0]
start_frame = math.floor(start * video_meta.fps)
end_frame = math.ceil(end * video_meta.fps)
if event_data['event'] == event_type_id and out_dir:
clip_out_path = os.path.join(
out_dir, '{}_{}.mp4'.format(video, event_id))
if not os.path.exists(clip_out_path):
cut_segment(video_path, video_meta, clip_out_path,
start_frame, end_frame)
else:
print('Already done:', clip_out_path)
print('Done!')
if __name__ == '__main__':
main(**vars(get_args()))
```
#### File: jhong93/vpd/train_vipe_model.py
```python
import argparse
import os
import math
import re
import itertools
from typing import NamedTuple
from tqdm import tqdm
import cv2
import numpy as np
import torch
import torch.optim as optim
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader
from util.io import store_json, load_json
from vipe_dataset import human36m, people3d, nba2k, amass
from vipe_dataset.util import render_3d_skeleton_views
from vipe_dataset.dataset_base import NUM_COCO_KEYPOINTS, NUM_COCO_BONES
from vipe_dataset.keypoint import (
Human36MDataset, People3dDataset, Human36MDataset, NBA2kDataset,
AmassDataset, Pairwise_People3dDataset)
from models.module import FCResNetPoseDecoder, FCPoseDecoder, FCResNet
from models.keypoint import Keypoint_EmbeddingModel
import vipe_dataset_paths as dataset_paths
NUM_RENDER_SEQS = 10
DATASETS_3D = ['3dpeople', 'human36m', 'nba2k', 'amass']
DATASETS_PAIR = ['3dpeople_pair']
DATASETS = DATASETS_3D + DATASETS_PAIR
LIFT_3D_WEIGHT = 1
USE_RESNET_DECODER = False
ENCODER_DROPOUT = 0.2
DECODER_DROPOUT = 0
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, nargs='+')
parser.add_argument('--save_dir', type=str, required=True)
parser.add_argument('--checkpoint_frequency', type=int, default=25)
parser.add_argument('--render_preview_frequency', type=int, default=100)
parser.add_argument('--num_epochs', type=int, default=500)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--embedding_dim', type=int, default=32)
parser.add_argument('--encoder_arch', type=int, nargs=2, default=(2, 1024),
help='Num blocks, hidden size')
parser.add_argument('--decoder_arch', type=int, nargs=2, default=(2, 512),
help='Num blocks, hidden size'),
parser.add_argument('--embed_bones', action='store_true')
parser.add_argument('--model_select_contrast', action='store_true')
parser.add_argument('--model_select_window', type=int, default=1)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--no_camera_aug', action='store_true')
return parser.parse_args()
def render_frame_sequences(model, dataset_name, dataset, count,
skeleton_decoder):
count = min(count, dataset.num_sequences)
for i in tqdm(range(count), desc='Render - {}'.format(dataset_name)):
sequence = dataset.get_sequence(i)
for data in sequence:
part_norms = data['kp_offset_norms']
# Normalize the longest part to 1
part_norms = part_norms / np.max(part_norms)
true3d = data['kp_offsets'] * part_norms[:, None]
pred3d = model.predict3d(
data['pose'], dataset_name
).reshape(true3d.shape[0], -1)[:, :3] * part_norms[:, None]
render_im = render_3d_skeleton_views(
[
skeleton_decoder(true3d),
skeleton_decoder(pred3d),
],
labels=['true', 'pred'],
title='[{}] person={}, action={}, camera={}'.format(
dataset_name, data['person'], data['action'],
data['camera'])
)
yield cv2.cvtColor(render_im, cv2.COLOR_RGB2BGR)
def save_video_preview(out_file, frames):
vo = None
for frame in frames:
if vo is None:
h, w, _ = frame.shape
vo = cv2.VideoWriter(
out_file, cv2.VideoWriter_fourcc(*'mp4v'), 10, (w, h))
vo.write(frame)
vo.release()
print('Saved video:', out_file)
class PoseDataset(NamedTuple):
name: str
train: 'Dataset'
val: 'Dataset'
has_3d: bool = False
skeleton_decoder: 'Function' = None
pose_3d_shape: 'Tuple' = None
mean_kp_offset_norms: 'List[float]' = None
def load_datasets(dataset_names, embed_bones, augment_camera):
datasets = []
if 'human36m' in dataset_names:
train_dataset, val_dataset = Human36MDataset.load_default(
dataset_paths.HUMAN36M_KEYPOINT_DIR,
dataset_paths.HUMAN36M_3D_POSE_FILE, embed_bones, augment_camera)
datasets.append(PoseDataset(
'human36m', train_dataset, val_dataset,
has_3d=True, skeleton_decoder=human36m.decode_skeleton_from_offsets,
pose_3d_shape=train_dataset[0]['kp_features'].shape,
mean_kp_offset_norms=train_dataset.mean_kp_offset_norms.tolist()))
if '3dpeople' in dataset_names:
train_dataset, val_dataset = People3dDataset.load_default(
dataset_paths.PEOPLE_3D_KEYPOINT_DIR,
dataset_paths.PEOPLE_3D_3D_POSE_FILE, embed_bones, augment_camera)
datasets.append(PoseDataset(
'3dpeople', train_dataset, val_dataset,
has_3d=True, skeleton_decoder=people3d.decode_skeleton_from_offsets,
pose_3d_shape=train_dataset[0]['kp_features'].shape,
mean_kp_offset_norms=train_dataset.mean_kp_offset_norms.tolist()))
if '3dpeople_pair' in dataset_names:
train_dataset, val_dataset = Pairwise_People3dDataset.load_default(
dataset_paths.PEOPLE_3D_KEYPOINT_DIR, 20, embed_bones)
datasets.append(PoseDataset(
'3dpeople_pair', train_dataset, val_dataset))
if 'nba2k' in dataset_names:
train_dataset, val_dataset = NBA2kDataset.load_default(
dataset_paths.NBA2K_KEYPOINT_DIR, dataset_paths.NBA2K_3D_POSE_FILE,
embed_bones)
datasets.append(PoseDataset(
'nba2k', train_dataset, val_dataset,
has_3d=True, skeleton_decoder=nba2k.decode_skeleton_from_offsets,
pose_3d_shape=train_dataset[0]['kp_features'].shape,
mean_kp_offset_norms=train_dataset.mean_kp_offset_norms.tolist()))
if 'amass' in dataset_names:
train_dataset, val_dataset = AmassDataset.load_default(
dataset_paths.AMASS_KEYPOINT_DIR, dataset_paths.AMASS_3D_POSE_FILE,
embed_bones, augment_camera)
datasets.append(PoseDataset(
'amass', train_dataset, val_dataset,
has_3d=True, skeleton_decoder=amass.decode_skeleton_from_offsets,
pose_3d_shape=train_dataset[0]['kp_features'].shape,
mean_kp_offset_norms=train_dataset.mean_kp_offset_norms.tolist()))
return datasets
def get_model_params(encoder, decoders):
params = list(encoder.parameters())
for d in decoders.values():
params.extend(d.parameters())
return params
def save_model(save_dir, name, encoder, decoders, optimizer):
torch.save(
encoder.state_dict(),
os.path.join(save_dir, '{}.encoder.pt'.format(name)))
for k, v in decoders.items():
torch.save(
v.state_dict(),
os.path.join(save_dir, '{}.decoder-{}.pt'.format(name, k)))
torch.save(
optimizer.state_dict(),
os.path.join(save_dir, '{}.optimizer.pt'.format(name)))
def load_model(save_dir, name, encoder, decoders, optimizer, device):
encoder_path = os.path.join(save_dir, '{}.encoder.pt'.format(name))
print('Loading:', encoder_path)
encoder.load_state_dict(torch.load(encoder_path, map_location=device))
for k, decoder in decoders.items():
decoder_path = os.path.join(
save_dir, '{}.decoder-{}.pt'.format(name, k))
print('Loading:', decoder_path)
decoder.load_state_dict(torch.load(decoder_path, map_location=device))
optimizer_path = os.path.join(save_dir, '{}.optimizer.pt'.format(name))
print('Loading:', optimizer_path)
optimizer.load_state_dict(torch.load(optimizer_path, map_location=device))
def get_last_checkpoint(save_dir):
last_epoch = -1
for fname in os.listdir(save_dir):
m = re.match(r'epoch(\d+).encoder.pt', fname)
if m:
epoch = int(m.group(1))
last_epoch = max(epoch, last_epoch)
return last_epoch
def get_train_loaders(datasets, batch_size, num_load_workers):
total = sum(len(d.train) for d in datasets)
num_batches = math.ceil(total / batch_size)
train_loaders = [
(d.name, DataLoader(
d.train, round(len(d.train) / num_batches),
shuffle=True, num_workers=num_load_workers,
)) for d in datasets
]
print('Target # train batches:', num_batches)
for dataset, loader in train_loaders:
print(' {} has {} batches'.format(dataset, len(loader)))
num_batches = len(loader)
return train_loaders
def get_moving_avg_loss(losses, n, key):
return np.mean([l[key] for l in losses[-n:]])
def main(
num_epochs, batch_size, embedding_dim, encoder_arch,
decoder_arch, embed_bones, dataset, save_dir, render_preview_frequency,
checkpoint_frequency, model_select_contrast, model_select_window,
learning_rate, resume, no_camera_aug
):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
augment_camera = not no_camera_aug
del no_camera_aug
if resume:
print('Resuming training from:', save_dir)
assert os.path.exists(save_dir)
old_config = load_json(os.path.join(save_dir, 'config.json'))
num_epochs = old_config['num_epochs']
batch_size = old_config['batch_size']
learning_rate = old_config['learning_rate']
embedding_dim = old_config['embedding_dim']
encoder_arch = old_config['encoder_arch']
decoder_arch = old_config['decoder_arch']
embed_bones = old_config['embed_bones']
augment_camera = old_config['augment_camera']
dataset = [d['name'] for d in old_config['datasets']]
else:
assert dataset is not None
if 'all' in dataset:
print('Using all datasets!', DATASETS)
dataset = DATASETS
elif '3d' in dataset:
print('Using 3d datasets!', DATASETS_3D)
dataset = DATASETS_3D
print('Device:', device)
print('Num epochs:', num_epochs)
print('Batch size:', batch_size)
print('Embedding dim:', embedding_dim)
print('Encoder arch:', encoder_arch)
print('Decoder arch:', decoder_arch)
print('Embed bones:', embed_bones)
print('Augment camera:', augment_camera)
datasets = load_datasets(dataset, embed_bones, augment_camera)
n_train_examples = sum(len(d.train) for d in datasets)
n_val_examples = sum(len(d.val) for d in datasets if d.val is not None)
for vipe_dataset in datasets:
print('Dataset:', vipe_dataset.name)
print('', 'Train sequences:', len(vipe_dataset.train))
if vipe_dataset.val is not None:
print('', 'Val sequences:', len(vipe_dataset.val))
num_load_workers = max(os.cpu_count(), 4)
train_loaders = get_train_loaders(datasets, batch_size, num_load_workers)
val_loaders = [
(d.name, DataLoader(
d.val, batch_size, shuffle=False, num_workers=num_load_workers
)) for d in datasets if d.val is not None]
encoder = FCResNet(
(NUM_COCO_KEYPOINTS + NUM_COCO_BONES
if embed_bones else NUM_COCO_KEYPOINTS) * 3,
embedding_dim, *encoder_arch, dropout=ENCODER_DROPOUT)
# Add a 3d pose decoder
pose_decoder_targets = [(d.name, math.prod(d.pose_3d_shape))
for d in datasets if d.has_3d]
if USE_RESNET_DECODER:
# A bigger decoder is not always better
decoders = {'3d': FCResNetPoseDecoder(
embedding_dim, *decoder_arch, pose_decoder_targets,
dropout=DECODER_DROPOUT)}
else:
decoders = {'3d': FCPoseDecoder(
embedding_dim, [decoder_arch[1]] * decoder_arch[0],
pose_decoder_targets, dropout=DECODER_DROPOUT)}
# Wrapper that moves the models to the device
model = Keypoint_EmbeddingModel(encoder, decoders, device)
def get_optimizer():
return optim.AdamW(get_model_params(encoder, decoders),
lr=learning_rate)
optimizer = get_optimizer()
scaler = GradScaler() if device == 'cuda' else None
# Initialize the model
if resume:
last_checkpoint = get_last_checkpoint(save_dir)
load_model(save_dir, 'epoch{:04d}'.format(last_checkpoint),
encoder, decoders, optimizer, device)
start_epoch = last_checkpoint + 1
else:
start_epoch = 1
# Save the model settings
os.makedirs(save_dir)
store_json(os.path.join(save_dir, 'config.json'), {
'datasets': [{
'name': d.name,
'3d_pose_shape': d.pose_3d_shape,
'mean_kp_offset_norms': d.mean_kp_offset_norms
} for d in datasets],
'num_epochs': num_epochs,
'learning_rate': learning_rate,
'batch_size': batch_size,
'embedding_dim': embedding_dim,
'encoder_arch': encoder_arch,
'decoder_arch': decoder_arch,
'embed_bones': embed_bones,
'augment_camera': augment_camera
})
# Initialize the loss history
loss_file = os.path.join(save_dir, 'loss.json')
if resume:
losses = [x for x in load_json(loss_file) if x['epoch'] < start_epoch]
best_val_loss = min(get_moving_avg_loss(
losses[:i], model_select_window, 'val'
) for i in range(model_select_window, len(losses)))
print('Resumed val loss:', best_val_loss)
else:
losses = []
best_val_loss = float('inf')
for epoch in range(start_epoch, num_epochs + 1):
with tqdm(
desc='Epoch {} - train'.format(epoch), total=n_train_examples
) as pbar:
train_contra_loss, train_loss, dataset_train_losses = model.epoch(
train_loaders, optimizer=optimizer, scaler=scaler,
progress_cb=lambda n: pbar.update(n), weight_3d=LIFT_3D_WEIGHT)
dataset_val_losses = []
with tqdm(
desc='Epoch {} - val'.format(epoch), total=n_val_examples
) as pbar:
val_contra_loss, val_loss, dataset_val_losses = model.epoch(
val_loaders, progress_cb=lambda n: pbar.update(n),
weight_3d=LIFT_3D_WEIGHT)
losses.append({
'epoch': epoch,
'train': train_contra_loss if model_select_contrast else train_loss,
'val': val_contra_loss if model_select_contrast else val_loss,
'dataset_train': [('contrast', train_contra_loss)]
+ list(dataset_train_losses.items()),
'dataset_val': [('contrast', val_contra_loss)]
+ list(dataset_val_losses.items())
})
def print_loss(name, total, contra, mv_avg):
print('Epoch {} - {} loss: {:0.5f}, contra: {:0.3f} [mv-avg: {:0.5f}]'.format(
epoch, name, total, contra, mv_avg))
mv_avg_val_loss = get_moving_avg_loss(losses, model_select_window, 'val')
print_loss('train', train_loss, train_contra_loss,
get_moving_avg_loss(losses, model_select_window, 'train'))
print_loss('val', val_loss, val_contra_loss, mv_avg_val_loss)
if loss_file is not None:
store_json(loss_file, losses)
if epoch % render_preview_frequency == 0 and save_dir is not None:
save_video_preview(
os.path.join(save_dir, 'epoch{:04d}.train.mp4'.format(epoch)),
itertools.chain(*[
render_frame_sequences(
model, d.name, d.train, NUM_RENDER_SEQS,
d.skeleton_decoder
) for d in datasets if d.has_3d]))
save_video_preview(
os.path.join(save_dir, 'epoch{:04d}.val.mp4'.format(epoch)),
itertools.chain(*[
render_frame_sequences(
model, d.name, d.val, NUM_RENDER_SEQS,
d.skeleton_decoder
) for d in datasets if d.has_3d and d.val is not None]))
if save_dir is not None:
if mv_avg_val_loss < best_val_loss:
print('New best epoch!')
save_model(save_dir, 'best_epoch', encoder, decoders,
optimizer)
if epoch % checkpoint_frequency == 0:
print('Saving checkpoint: {}'.format(epoch))
save_model(save_dir, 'epoch{:04d}'.format(epoch), encoder,
decoders, optimizer)
best_val_loss = min(mv_avg_val_loss, best_val_loss)
print('Done!')
if __name__ == '__main__':
main(**vars(get_args()))
```
#### File: vpd/util/eval.py
```python
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
def save_confusion_matrix(truth, pred, out_file, norm=None):
label_names = list(set(truth) | set(pred))
label_names.sort()
truth_compact = [label_names.index(x) for x in truth]
pred_compact = [label_names.index(x) for x in pred]
cm = confusion_matrix(
truth_compact, pred_compact, labels=list(range(len(label_names))),
normalize=norm)
if norm is not None:
cm *= 100
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=label_names)
disp.plot(ax=ax, xticks_rotation='vertical',
values_format='.1f' if norm is not None else 'd')
plt.tight_layout()
plt.savefig(out_file)
plt.close(fig)
```
#### File: vpd/util/neighbors.py
```python
from collections import Counter
import heapq
import numpy as np
from multiprocessing import Pool
from sklearn.metrics import pairwise_distances
from dtw import dtw
def build_dtw_distance_fn(step_pattern='symmetricP2'):
def dtw_distance(a, b):
pd = pairwise_distances(a, b).astype(np.double)
try:
align = dtw(pd, distance_only=True, step_pattern=step_pattern)
return align.normalizedDistance
except ValueError:
return float('inf')
return dtw_distance
# Hack to transfer dist function to worker before forking
_WORKER_DIST_FN = {}
_WORKER_PROCESSES = 4
def _worker_helper(dist_fn_id, x, x_train, i):
return _WORKER_DIST_FN[dist_fn_id](x, x_train), i
class KNearestNeighbors:
def __init__(self, X, y, distance_fn, k=1, use_processes=False):
self.X = X
self.y = y
self.k = k
self.distance_fn = distance_fn
self._dist_fn_id = -1
if use_processes:
self._dist_fn_id = len(_WORKER_DIST_FN)
_WORKER_DIST_FN[self._dist_fn_id] = distance_fn
self._pool = Pool(_WORKER_PROCESSES)
def predict(self, x):
return self.predict_n(x)
def predict_n(self, *xs):
if self._dist_fn_id < 0:
top_k = []
for x in xs:
for i, x_train in enumerate(self.X):
d = self.distance_fn(x, x_train)
(heapq.heappush if len(top_k) < self.k else heapq.heappushpop
)(top_k, (-d, i))
top_k = [(-d, i) for d, i in top_k]
else:
args = []
for x in xs:
for i, x_train in enumerate(self.X):
args.append((self._dist_fn_id, x, x_train, i))
results = self._pool.starmap(_worker_helper, args)
top_k = sorted(results)[:self.k]
cls_count = Counter(self.y[i] for _, i in top_k)
max_count = cls_count.most_common(1)[0][1]
best_i = None
best_cls_dist = float('inf')
for d, i in top_k:
cls_ = self.y[i]
if cls_count[cls_] == max_count and d < best_cls_dist:
best_cls_dist = d
best_i = i
return self.y[best_i], best_i
class Neighbors:
def __init__(self, X, distance_fn):
self.X = X
self.distance_fn = distance_fn
def find(self, x, k, min_len):
knn_pq = []
for i, x_train in enumerate(self.X):
if x_train is not None and x_train.shape[0] >= min_len:
d = self.distance_fn(x, x_train)
(heapq.heappush if len(knn_pq) < k else heapq.heappushpop
)(knn_pq, (-d, i))
return [(i, -nd) for nd, i in sorted(knn_pq, key=lambda x: -x[0])]
def dist(self, x, i):
return self.distance_fn(x, self.X[i])
```
#### File: vpd/util/proposal.py
```python
import random
import copy
from contextlib import nullcontext
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import KFold
from tqdm import trange
class BaseProposalModel:
class _Seq(nn.Module):
def __init__(self, cell_type, emb_dim, hidden_dim,
depth=2, dropout=0.5, input_dropout=0.2):
super().__init__()
print('Backbone:', cell_type)
print(' input dim:', emb_dim)
print(' hidden dim:', hidden_dim)
print(' dropout:', dropout)
print(' input dropout:', input_dropout)
self.hidden_dim = hidden_dim
self.cell_type = cell_type
if cell_type == 'lstm':
self.backbone = nn.LSTM(
emb_dim, hidden_dim, num_layers=depth, batch_first=True,
bidirectional=True)
elif cell_type == 'gru':
self.backbone = nn.GRU(
emb_dim, hidden_dim, num_layers=depth, batch_first=True,
bidirectional=True)
else:
raise NotImplementedError()
self.fc_out = nn.Sequential(
nn.BatchNorm1d(2 * hidden_dim),
nn.Dropout(p=dropout),
nn.Linear(2 * hidden_dim, 2 * hidden_dim),
nn.ReLU(),
nn.BatchNorm1d(2 * hidden_dim),
nn.Dropout(p=dropout),
nn.Linear(2 * hidden_dim, 2))
self.drop_in = nn.Dropout(p=input_dropout)
def forward(self, x):
x = self.drop_in(x)
output, _ = self.backbone(x)
return self.fc_out(torch.reshape(output, (-1, output.shape[-1])))
class _Dataset(Dataset):
def __init__(self, X, y, seq_len=250, n=5000):
self.X = [torch.FloatTensor(xx) for xx in X]
self.y = [torch.LongTensor(yy) for yy in y]
self.weights = [max(0, len(z) - seq_len) for z in y]
assert max(self.weights) > 0, 'All sequences are too short!'
self.seq_len = seq_len
self.n = n
def __getitem__(self, unused):
idx = random.choices(range(len(self.y)), weights=self.weights, k=1)[0]
x = self.X[idx]
y = self.y[idx]
start_frame = random.randint(0, y.shape[0] - self.seq_len - 1)
return (x[start_frame:start_frame + self.seq_len, :],
y[start_frame:start_frame + self.seq_len])
def __len__(self):
return self.n
def __init__(self, arch_type, X, y, hidden_dim, batch_size=100,
num_epochs=25, min_epochs=10, early_term_acc=1,
early_term_no_val_improvement=50,
X_val=None, y_val=None, **kwargs):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
emb_dim = X[0].shape[-1]
model = BaseProposalModel._Seq(arch_type, emb_dim, hidden_dim, **kwargs)
optimizer = torch.optim.AdamW(model.parameters())
train_loader = DataLoader(
BaseProposalModel._Dataset(X, y), batch_size=batch_size)
# Model selection
best_model = None
best_val_err_loss = (1, float('inf'))
best_val_epoch = 0
if X_val is not None:
val_loader = DataLoader(
BaseProposalModel._Dataset(X_val, y_val),
batch_size=batch_size)
model.to(self.device)
with trange(num_epochs) as pbar:
def refresh_loss(l, a, cva=None, bva=None, bva_epoch=None):
pbar.set_description(
'Train {} (tl={:0.3f}, ta={:0.1f}{})'.format(
arch_type.upper(), l, a * 100, '' if cva is None else
', va={:0.1f}, bva={:0.1f} @{}'.format(
cva * 100, bva * 100, bva_epoch)))
pbar.refresh()
for epoch in pbar:
loss, acc = BaseProposalModel._epoch(
model, train_loader, self.device, optimizer)
if X_val is not None:
val_loss, val_acc = BaseProposalModel._epoch(
model, val_loader, self.device)
if (1 - val_acc, val_loss) <= best_val_err_loss:
best_val_epoch = epoch
best_val_err_loss = (1 - val_acc, val_loss)
best_model = copy.deepcopy(model).to('cpu')
if (
1 - best_val_err_loss[0] >= early_term_acc
and epoch > min_epochs
):
break
elif (
epoch - best_val_epoch >= early_term_no_val_improvement
and epoch > min_epochs
):
break
refresh_loss(loss, acc, val_acc,
1 - best_val_err_loss[0], best_val_epoch)
else:
refresh_loss(loss, acc)
if epoch >= min_epochs and acc > early_term_acc:
break
if best_model is None:
self.model = model
else:
self.model = best_model.to(self.device)
del model
self.model.eval()
@staticmethod
def _epoch(model, loader, device, optimizer=None):
model.eval() if optimizer is None else model.train()
epoch_loss = 0.
n_correct = 0
with torch.no_grad() if optimizer is None else nullcontext():
n = 0
nt = 0
for X, y in loader:
pred = model(X.to(device))
y = y.flatten().to(device)
loss = F.cross_entropy(pred, y)
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.cpu().item()
n_correct += torch.sum(torch.argmax(pred, 1) == y).cpu().item()
nt += y.shape[0]
n += X.shape[0]
return epoch_loss / n, n_correct / nt
def predict(self, x):
x = torch.unsqueeze(torch.Tensor(x).to(self.device), 0)
with torch.no_grad():
pred = F.softmax(self.model(x), dim=1).squeeze()
return pred[:, 1].cpu().numpy()
@staticmethod
def get_proposals(scores, activation_thresh, min_prop_len=3,
merge_thresh=1):
props = []
curr_prop = None
for i in range(len(scores)):
if scores[i] >= activation_thresh:
if curr_prop is None:
curr_prop = (i, i)
else:
curr_prop = (curr_prop[0], i)
else:
if curr_prop is not None:
props.append(curr_prop)
curr_prop = None
if curr_prop is not None:
props.append(curr_prop)
del curr_prop
merged_props = []
for p in props:
if len(merged_props) == 0:
merged_props.append(p)
else:
last_p = merged_props[-1]
if p[0] - last_p[1] <= merge_thresh:
merged_props[-1] = (last_p[0], p[1])
else:
merged_props.append(p)
def get_score(a, b):
return np.mean(scores[a:b + 1])
return [(p, get_score(*p)) for p in merged_props
if p[1] - p[0] > min_prop_len]
class EnsembleProposalModel:
def __init__(self, arch_type, X, y, hidden_dim, ensemble_size=3, splits=5,
custom_split=None, **kwargs):
if ensemble_size > 1:
print('Training an ensemble of {} {}s with {} folds'.format(
ensemble_size, arch_type.upper(), splits))
else:
print('Holding out 1 / {} for validation'.format(splits))
kf = KFold(n_splits=splits, shuffle=True)
if custom_split is None:
custom_split = np.arange(len(X))
unique_idxs = list(set(custom_split))
models = []
for train, val in kf.split(unique_idxs):
train = set(train)
val = set(val)
X_train, y_train = zip(*[(X[j], y[j]) for j in range(len(X))
if custom_split[j] in train])
X_val, y_val = zip(*[(X[j], y[j]) for j in range(len(X))
if custom_split[j] in val])
models.append(BaseProposalModel(
arch_type, X_train, y_train, hidden_dim,
X_val=X_val, y_val=y_val, **kwargs))
if len(models) >= ensemble_size:
break
self.models = models
def predict(self, x):
return self.predict_n(x)
def predict_n(self, *xs):
pred = None
denom = 0
for model in self.models:
for x in xs:
tmp = model.predict(x)
if pred is None:
pred = tmp
else:
pred += tmp
denom += 1
return pred / denom
``` |
{
"source": "JhonGalarza/Jhon_Mario_Galarza_Lopez",
"score": 3
} |
#### File: Jhon_Mario_Galarza_Lopez/CAMBIO MONEDAS/CambiosMonedas.py
```python
import pandas as pd
from matplotlib import pyplot as plt
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import Notebook
import Util
from datetime import *
#Lista de imágenes para los botones
iconos = ["./iconos/grafica.png", \
"./iconos/datos.png"
]
textosBotones = ["Gráfica", \
"Datos"
]
df = None
def obtenerMonedas():
global df
df = pd.read_csv("Cambios Monedas.csv")
#Traer los datos de la columna MONEDA
monedas = df["Moneda"].tolist()
return list(dict.fromkeys(monedas))
def graficar():
global df, monedas, paneles
#Verificar que se haya seleccionado una moneda
if cmbMoneda.current()>=0:
#Ordenar los datos por la fecha
df.sort_values(by="Fecha", ascending=False).head()
#filtrar los datos por la moneda seleccionada
cambios = df[df["Moneda"]==monedas[cmbMoneda.current()]]
#obtener los datos para el eje Y
y = cambios["Cambio"]
#obtener los datos para el eje X
fechas = cambios["Fecha"]
#obtener la fecha
x = [datetime.strptime(f, "%d/%m/%Y").date() for f in fechas]
#Crear la grafica
plt.clf() #limpiar la grafica
plt.title("Cambio "+monedas[cmbMoneda.current()])
plt.ylabel("Cambio")
plt.xlabel("Fecha")
#graficar
plt.plot(x, y)
#exportar la grafica a una imagen
plt.savefig("graficaMonedas.png")
#Mostrar la imagen de la grafica
#Quitar elementos de un contenedor
#list = paneles[0].grid_slaves()
#for l in list:
# l.destroy()
lblGrafica=Label(paneles[0])
imgGrafica=PhotoImage(file = "graficaMonedas.png")
lblGrafica.config(image=imgGrafica)
lblGrafica.image=imgGrafica
lblGrafica.place(x=0, y=0)
#redimensionar la ventana de acuerdo a la dimension de la imagen de la grafica
v.minsize(imgGrafica.width(), imgGrafica.height()+100)
def mostrarDatos():
global df, monedas, paneles
#Verificar que se haya seleccionado una moneda
if cmbMoneda.current()>=0:
#filtrar los datos por la moneda seleccionada
cambios = df[df["Moneda"]==monedas[cmbMoneda.current()]]
#mostrar el promedio
Util.agregarEtiqueta(paneles[1], "Promedio:", 0, 0)
Util.agregarEtiqueta(paneles[1], float(cambios.mean()), 0, 1)
#print(cambios.std()) #desviacion standar
#print(cambios.max()) #maximo
#print(cambios.min()) #minimo
v = Tk()
v.title("Cambios de Moneda")
v.geometry("400x200")
botones = Util.agregarBarra(v, iconos, textosBotones) #Agrega una barra de herramientas
botones[0].configure(command=graficar)
botones[1].configure(command=mostrarDatos)
frm = Frame(v)
frm.pack(side=TOP, fill=X)
Util.agregarEtiqueta(frm, "Moneda:", 0, 0)
monedas=obtenerMonedas()
cmbMoneda=Util.agregarLista(frm, monedas, 0, 1)
nb = Notebook(v)
nb.pack(fill=BOTH, expand=YES)
encabezados=["Gráfica", "Datos"]
paneles=[]
for e in encabezados:
frm = Frame(v)
paneles.append(frm)
nb.add(frm, text=e)
```
#### File: Jhon_Mario_Galarza_Lopez/CAMBIO MONEDAS/Util.py
```python
from tkinter import *
from tkinter.ttk import *
#Importar libreria para Expresiones Regulares
import re
#Importar fuentes de texto
from tkinter import font
def mostrar(txt, valor, soloLectura=True):
if soloLectura:
#desbloquear la caja de texto
txt.configure(state=NORMAL)
#limpiar la caja de texto
txt.delete(0, END)
#Asignar el valor
txt.insert(0, str(valor))
if soloLectura:
#bloquear la caja de texto de nuevo
txt.configure(state=DISABLED)
def agregarImagen(ventana, archivo, fila, columna, expandir=1):
#cargar la imagen
img=PhotoImage(file = archivo)
#Mostrar imagen en una etiqueta
lbl=Label(ventana)
lbl.config(image=img)
lbl.image=img
lbl.grid(row=fila, column=columna, columnspan=expandir)
return lbl
def agregarEtiqueta(ventana, texto, fila, columna, expandir=1):
Label(ventana, text=texto).grid(row=fila, column=columna, columnspan=expandir)
def agregarTexto(ventana, ancho, fila, columna, expandir=1, habilitado=True):
txt=Entry(ventana, width=ancho)
txt.grid(row=fila, column=columna, columnspan=expandir)
if habilitado:
txt.configure(state=NORMAL)
else:
txt.configure(state=DISABLED)
return txt
def agregarLista(ventana, opciones, fila, columna, expandir=1):
cmb=Combobox(ventana)
cmb.grid(row=fila, column=columna, columnspan=expandir)
cmb["values"]=opciones
return cmb
def esReal(texto):
return True if re.match("^[-]?[0-9]+[.]?[0-9]*$", texto) else False
def esEntero(texto):
return True if re.match("^[-]?[0-9]+$", texto) else False
def crearToolTip(objetoTkinter, texto):
toolTip = ToolTip(objetoTkinter)
#Definir eventos que activan/desactivan el tooltip
def enter(event):
toolTip.mostrar(texto)
def leave(event):
toolTip.ocultar()
objetoTkinter.bind("<Enter>", enter)
objetoTkinter.bind("<Leave>", leave)
def agregarBarra(ventana, imagenes, textosTooltip=None):
frmBarra = Frame(ventana)
frmBarra.pack(side=TOP, fill=X)
botones = []
i = 0
for imagen in imagenes:
#cargar la imagen
img=PhotoImage(file = imagen)
btn = Button(frmBarra, image=img)
btn.image = img
btn.pack(side=LEFT, padx=2, pady=2)
if textosTooltip:
crearToolTip(btn, textosTooltip[i])
i +=1
botones.append(btn)
frmBarra.pack(side=TOP, fill=X)
return botones
def mostrarTabla(ventana, encabezados, datos, tabla=None):
tabla = VistaTabla(ventana, encabezados, datos, tabla)
return tabla.obtenerTabla()
#************************************************************
class VistaTabla(object):
#Utiliza ttk.TreeView como una Rejilla de datos
#Metodo constructor
def __init__(varClase, ventana, encabezados, datos, arbol=None):
varClase.arbol = arbol #objeto para el despliegue de la tabla
varClase.crear(ventana, encabezados, datos)
varClase.configurar(encabezados, datos)
#Metodo que crea los objetos para despliegue
def crear(varClase, ventana, encabezados, datos):
#crear el contenedor
frm = Frame(ventana)
frm.pack(fill=BOTH, expand=True)
#Crear objeto Treeview con 2 barras de desplazamiento
if varClase.arbol == None:
varClase.arbol = Treeview(columns=encabezados, show="headings")
vsb = Scrollbar(orient="vertical", command=varClase.arbol.yview)
hsb = Scrollbar(orient="horizontal", command=varClase.arbol.xview)
varClase.arbol.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
varClase.arbol.grid(column=0, row=0, sticky=N+S+E+W, in_=frm)
vsb.grid(column=1, row=0, sticky=N+S, in_=frm)
hsb.grid(column=0, row=1, sticky=E+W, in_=frm)
#Metodo que define la estructura de los datos a desplegar
def configurar(varClase, encabezados, datosTabla):
#Recorrer los encabezados
for encabezado in encabezados:
#Asignar el encabezado y comando de ordenamiento
varClase.arbol.heading(encabezado, text=encabezado.title(),
command=lambda c=encabezado: varClase.ordenar(varClase.arbol, c, 0))
#Ajusta el ancho de la columna al texto del encabezado
varClase.arbol.column(encabezado, width=font.Font().measure(encabezado.title()))
#limpiar el arbol
varClase.arbol.delete(*varClase.arbol.get_children())
#Recorrer los datos
for fila in datosTabla:
varClase.arbol.insert("", "end", values=fila)
#Ajusta el ancho de la columna si es necesario
for i, dato in enumerate(fila):
anchoColumna = font.Font().measure(dato)
if varClase.arbol.column(encabezados[i],width=None)<anchoColumna:
varClase.arbol.column(encabezados[i], width=anchoColumna)
#Metodo que retorna el objeto TreeView
def obtenerTabla(varClase):
return varClase.arbol
#Metodo que permita ordenar los datos cuando se hace clic en el encabezado
def ordenar(varClase, arbol, encabezado, descendente):
#Obtener los valores a ordenar
datos = [(arbol.set(nodo, encabezado), nodo) \
for nodo in arbol.get_children("")]
#Ordenar los datos
datos.sort(reverse=descendente)
for i, fila in enumerate(datos):
arbol.move(fila[1], "", i)
#Intercambiar el encabezado para que ordene en sentido contrario
arbol.heading(encabezado, command=lambda encabezado=encabezado: varClase.ordenar(arbol, encabezado, \
int(not descendente)))
#************************************************************
class ToolTip(object):
def __init__(varClase, objetoTkinter):
varClase.objetoTkinter = objetoTkinter
varClase.objetoTooltip = None
varClase.id = None
varClase.x = varClase.y = 0
def mostrar(varClase, texto):
#Mostrar texto como tooltip
varClase.texto = texto
if varClase.objetoTooltip or not varClase.texto:
return
x, y, cx, cy = varClase.objetoTkinter.bbox("insert")
x = x + varClase.objetoTkinter.winfo_rootx() + 27
y = y + cy + varClase.objetoTkinter.winfo_rooty() +27
varClase.objetoTooltip = tw = Toplevel(varClase.objetoTkinter)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# Para Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except TclError:
pass
lblTooltip = Label(tw, text=varClase.texto, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
lblTooltip.pack(ipadx=1)
def ocultar(varClase):
tp = varClase.objetoTooltip
varClase.objetoTooltip = None
if tp:
tp.destroy()
``` |
{
"source": "JhonGalarza/SOLID",
"score": 4
} |
#### File: JhonGalarza/SOLID/CodigoSOLID.py
```python
ANIMAL= int(input("¿De cual animal quiere conocer la caracteristicas? 1.Leon 2.Ballena 3.Tucan? "))
class Animal:
def __init__(self, ANIMAL):
self.ANIMAL = ANIMAL
def acciones_comun():
comun = "Comer"
return comun
def sentido_vista():
vista = "Puede ver"
return vista
class Animal_Tierra:
def acciones_Tierra():
Tierra = "camina en cuatro patas"
return Tierra
class Animal_Agua:
def acciones_Agua():
return "Nada bajo el agua"
class Animal_Aire (Animal):
def acciones_Aire():
return "Vuela"
class Leon (Animal, Animal_Tierra):
def llamar():
caracteristicas = ()
return caracteristicas
class Ballena(Animal, Animal_Agua):
def llamar():
caracteristicas = ()
return caracteristicas
class Tucan(Animal, Animal_Aire):
def llamar():
caracteristicas = ()
return caracteristicas
if ANIMAL == 1 :
print ("debe imprimir las caracteristicas del leon, el leon es clase hija de animal y debe agragar animal_tierra" )
elif ANIMAL == 2 :
print ("lo mismo que el leon, pero con la ballena")
elif ANIMAL == 3 :
print("Lo mismo pero con el tucan")
``` |
{
"source": "Jhon-G/Diplom",
"score": 2
} |
#### File: Jhon-G/Diplom/utils.py
```python
import vk
import settings
from telegram import ReplyKeyboardMarkup
def session_api():
session = vk.Session(access_token=settings.VK_TOKEN)
api = vk.API(session, v=5.126)
return api
def main_keyboard():
return ReplyKeyboardMarkup([
['Mutabor'],
['Random']
])
``` |
{
"source": "Jhongesell/PyFerret",
"score": 3
} |
#### File: PyFerret/pviewmod/cmndhelperpq.py
```python
import sys
try:
import sip
except ImportError:
import PyQt4.sip as sip
try:
sip.setapi('QVariant', 2)
except Exception:
pass
# First try to import PyQt5, then try PyQt4 if that fails
try:
import PyQt5
QT_VERSION = 5
except ImportError:
import PyQt4
QT_VERSION = 4
# Now that the PyQt version is determined, import the parts
# allowing any import errors to propagate out
if QT_VERSION == 5:
from PyQt5.QtCore import Qt, QPointF, QSizeF
from PyQt5.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
else:
from PyQt4.QtCore import Qt, QPointF, QSizeF
from PyQt4.QtGui import QBrush, QColor, QFont, QPainterPath, QPen
class SidesRectF(object):
'''
Trivial helper class for defining a rectangle with floating point
values for the left-x, top-y, right-x, and bottom-y edges.
'''
def __init__(self, left, top, right, bottom):
'''
Create a SidesRectF with the given left, top, right,
and bottom as float values.
'''
super(SidesRectF, self).__init__()
self.__left = float(left)
self.__top = float(top)
self.__right = float(right)
self.__bottom = float(bottom)
def left(self):
'''
Return the left value as a float.
'''
return self.__left
def setLeft(self, val):
'''
Set the SidesRectF left as a float value of the argument.
'''
self.__left = float(val)
def top(self):
'''
Return the top value as a float.
'''
return self.__top
def setTop(self, val):
'''
Set the SidesRectF top as a float value of the argument.
'''
self.__top = float(val)
def right(self):
'''
Return the right value as a float.
'''
return self.__right
def setRight(self, val):
'''
Set the SidesRectF right as a float value of the argument.
'''
self.__right = float(val)
def bottom(self):
'''
Return the bottom value as a float.
'''
return self.__bottom
def setBottom(self, val):
'''
Set the SidesRectF bottom as a float value of the argument.
'''
self.__bottom = float(val)
class SymbolPath(object):
'''
Trivial helper class for defining a symbol
'''
def __init__(self, painterpath, isfilled):
'''
Create a SymbolPath representing a symbol.
Arguments:
painterpath: the QPainterPath representing this symbol
isfilled: if True, the symbol should be drawn with a
solid brush; if False, the symbol should be
drawn with a solid pen
'''
super(SymbolPath, self).__init__()
self.__painterpath = painterpath
self.__isfilled = isfilled
if isfilled:
try:
self.__painterpath = painterpath.simplified()
except:
pass
def painterPath(self):
'''
Return the QPainterPath for this symbol
'''
return self.__painterpath
def isFilled(self):
'''
Return True if the symbol should be drawn with a solid brush;
return False if the symbol should be drawn with a solid pen.
'''
return self.__isfilled
class CmndHelperPQ(object):
'''
Helper class of static methods for dealing with commands
sent to a PyQt piped viewer.
'''
def __init__(self, viewer):
'''
Creates a cmndpipe command helper. The widget viewer
is only used for determining the default font and for
translation of error messages.
'''
super(CmndHelperPQ, self).__init__()
self.__viewer = viewer
self.__symbolpaths = { }
def getFontFromCmnd(self, fontinfo):
'''
Returns a QFont based on the information in the dictionary
fontinfo.
Recognized keys in the font dictionary are:
"family": font family name (string)
"size": text size in points (1/72 inches)
"italic": italicize? (False/True)
"bold": make bold? (False/True)
"underline": underline? (False/True)
'''
try:
myfont = QFont(fontinfo["family"])
except KeyError:
myfont = self.__viewer.font()
try:
myfont.setPointSizeF(fontinfo["size"])
except KeyError:
pass
try:
myfont.setItalic(fontinfo["italic"])
except KeyError:
pass
try:
myfont.setBold(fontinfo["bold"])
except KeyError:
pass
try:
myfont.setUnderline(fontinfo["underline"])
except KeyError:
pass
return myfont
def getBrushFromCmnd(self, brushinfo):
'''
Returns a QBrush based on the information in the dictionary
brushinfo. A ValueError is raised if the value for the
"style" key, if given, is not recognized.
Recognized keys in the fill dictionary are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
"style": brush style name ("solid", "dense1" to "dense7",
"none", "hor", "ver", "cross",
"bdiag", "fdiag", "diagcross")
'''
try:
mycolor = self.getColorFromCmnd(brushinfo)
mybrush = QBrush(mycolor)
except KeyError:
mybrush = QBrush()
try:
mystyle = brushinfo["style"]
if mystyle == "solid":
mystyle = Qt.SolidPattern
elif mystyle == "dense1":
mystyle = Qt.Dense1Pattern
elif mystyle == "dense2":
mystyle = Qt.Dense2Pattern
elif mystyle == "dense3":
mystyle = Qt.Dense3Pattern
elif mystyle == "dense4":
mystyle = Qt.Dense4Pattern
elif mystyle == "dense5":
mystyle = Qt.Dense5Pattern
elif mystyle == "dense6":
mystyle = Qt.Dense6Pattern
elif mystyle == "dense7":
mystyle = Qt.Dense7Pattern
elif mystyle == "none":
mystyle = Qt.NoBrush
elif mystyle == "hor":
mystyle = Qt.HorPattern
elif mystyle == "ver":
mystyle = Qt.VerPattern
elif mystyle == "cross":
mystyle = Qt.CrossPattern
elif mystyle == "bdiag":
mystyle = Qt.BDiagPattern
elif mystyle == "fdiag":
mystyle = Qt.FDiagPattern
elif mystyle == "diagcross":
mystyle = Qt.DiagCrossPattern
else:
raise ValueError("Unknown brush style '%s'" % str(mystyle))
mybrush.setStyle(mystyle)
except KeyError:
pass
return mybrush
def getPenFromCmnd(self, peninfo):
'''
Returns a QPen based on the information in the dictionary
peninfo. A ValueError is raised if the value for the
"style", "capstyle", or "joinstyle" key, if given, is not
recognized.
Recognized keys in the outline dictionary are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
"width": pen width in points (1/72 inches); possibly
further scaled by the width scaling factor
"style": pen style name ("solid", "dash", "dot", "dashdot",
"dashdotdot")
"capstyle": pen cap style name ("square", "flat", "round")
"joinstyle": pen join style name ("bevel", "miter", "round")
'''
try:
mycolor = self.getColorFromCmnd(peninfo)
mypen = QPen(mycolor)
except KeyError:
mypen = QPen()
try:
penwidth = float(peninfo["width"])
penwidth *= self.__viewer.widthScalingFactor()
mypen.setWidthF(penwidth)
except KeyError:
pass
try:
mystyle = peninfo["style"]
if mystyle == "solid":
mystyle = Qt.SolidLine
elif mystyle == "dash":
mystyle = Qt.DashLine
elif mystyle == "dot":
mystyle = Qt.DotLine
elif mystyle == "dashdot":
mystyle = Qt.DashDotLine
elif mystyle == "dashdotdot":
mystyle = Qt.DashDotDotLine
else:
raise ValueError("Unknown pen style '%s'" % str(mystyle))
mypen.setStyle(mystyle)
except KeyError:
pass
try:
mystyle = peninfo["capstyle"]
if mystyle == "square":
mystyle = Qt.SquareCap
elif mystyle == "flat":
mystyle = Qt.FlatCap
elif mystyle == "round":
mystyle = Qt.RoundCap
else:
raise ValueError("Unknown pen cap style '%s'" % str(mystyle))
mypen.setCapStyle(mystyle)
except KeyError:
pass
try:
mystyle = peninfo["joinstyle"]
if mystyle == "bevel":
mystyle = Qt.BevelJoin
elif mystyle == "miter":
mystyle = Qt.MiterJoin
elif mystyle == "round":
mystyle = Qt.RoundJoin
else:
raise ValueError("Unknown pen join style '%s'" % str(mystyle))
mypen.setJoinStyle(mystyle)
except KeyError:
pass
return mypen
def getSymbolFromCmnd(self, symbolinfo):
'''
Returns the SymbolPath for the symbol described in symbolinfo,
which can either be a string or a dictionary.
If symbolinfo is a string, it should be the name of a symbol that
has already been defined, either as a pre-defined symbol or from
a previous symbol definition.
Current pre-defined symbol names are ones involving circles:
'dot': very small filled circle
'dotex': very small filled circle and outer lines of an ex mark
'dotplus': very small filled circle and outer lines of a plus mark
'circle': unfilled circle
'circfill': normal-sized filled circle
'circex': small unfilled circle and outer lines of an ex mark
'circplus': small unfilled circle and outer lines of a plus mark
If symbolinfo is a dictionary, the following key/value pairs are
recognized:
'name' : (string) symbol name (required)
'pts' : (sequence of pairs of floats) vertex coordinates
'fill' : (bool) color-fill symbol?
If 'pts' is given, the value is coordinates that define the symbol
as multiline subpaths in a [-50,50] square for typical size. The
location of the point this symbol represents will be at the center
of the square. A coordinate outside [-100,100] will terminate the
current subpath, and the next valid coordinate will start a new subpath.
This definition will replace an existing symbol with the given name.
If 'pts' is not given, the symbol must already be defined, either as
a pre-defined symbol (see above) or from a previous symbol definition.
Raises:
TypeError - if symbolinfo is neither a string nor a dictionary
KeyError - if symbolinfo is a dictionary and
the key 'name' is not given
ValueError - if there are problems generating the symbol
'''
# get the information about the symbol
if isinstance(symbolinfo, str):
symbol = symbolinfo
pts = None
fill = False
elif isinstance(symbolinfo, dict):
symbol = symbolinfo['name']
pts = symbolinfo.get('pts', None)
fill = symbolinfo.get('fill', False)
else:
raise TypeError('symbolinfo must either be a dictionary or a string')
if pts is None:
# no path given; check if already defined
sympath = self.__symbolpaths.get(symbol)
if sympath is not None:
return sympath
# symbol not defined - if well known, create a SymbolPath for it
if symbol == 'dot':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
sympath = SymbolPath(path, True)
elif symbol == 'dotplus':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
# filled path, so need to draw "lines" as rectangles
path.addRect( -4.0, -50.0, 8.0, 24.0)
path.addRect( -4.0, 26.0, 8.0, 24.0)
path.addRect(-50.0, -4.0, 24.0, 8.0)
path.addRect( 26.0, -4.0, 24.0, 8.0)
sympath = SymbolPath(path, True)
elif symbol == 'dotex':
path = QPainterPath()
path.addEllipse(-10.0, -10.0, 20.0, 20.0)
# filled path, so need to draw "lines" as rectangles
path.moveTo(-38.18, -32.53)
path.lineTo(-32.53, -38.18)
path.lineTo(-15.56, -21.21)
path.lineTo(-21.21, -15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo(-38.18, 32.53)
path.lineTo(-32.53, 38.18)
path.lineTo(-15.56, 21.21)
path.lineTo(-21.21, 15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo( 38.18, -32.53)
path.lineTo( 32.53, -38.18)
path.lineTo( 15.56, -21.21)
path.lineTo( 21.21, -15.56)
# moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo( 38.18, 32.53)
path.lineTo( 32.53, 38.18)
path.lineTo( 15.56, 21.21)
path.lineTo( 21.21, 15.56)
# Qt closes the subpath automatically
sympath = SymbolPath(path, True)
elif symbol == 'circle':
path = QPainterPath()
path.addEllipse(-35.0, -35.0, 70.0, 70.0)
sympath = SymbolPath(path, False)
elif symbol == 'circfill':
path = QPainterPath()
path.addEllipse(-39.0, -39.0, 78.0, 78.0)
sympath = SymbolPath(path, True)
elif symbol == 'circplus':
path = QPainterPath()
path.addEllipse(-20.0, -20.0, 40.0, 40.0)
# not a filled path, so just draw the lines
path.moveTo( 0.0, -50.0)
path.lineTo( 0.0, -20.0)
path.moveTo( 0.0, 50.0)
path.lineTo( 0.0, 20.0)
path.moveTo(-50.0, 0.0)
path.lineTo(-20.0, 0.0)
path.moveTo( 50.0, 0.0)
path.lineTo( 20.0, 0.0)
sympath = SymbolPath(path, False)
elif symbol == 'circex':
path = QPainterPath()
path.addEllipse(-20.0, -20.0, 40.0, 40.0)
# not a filled path, so just draw the lines
path.moveTo(-35.35, -35.35)
path.lineTo(-14.15, -14.15)
path.moveTo(-35.35, 35.35)
path.lineTo(-14.15, 14.15)
path.moveTo( 35.35, -35.35)
path.lineTo( 14.15, -14.15)
path.moveTo( 35.35, 35.35)
path.lineTo( 14.15, 14.15)
sympath = SymbolPath(path, False)
else:
raise ValueError("Unknown symbol '%s'" % str(symbol))
else:
# define (or redefine) a symbol from the given path
try:
coords = [ [ float(val) for val in coord ] for coord in pts ]
if not coords:
raise ValueError
for crd in coords:
if len(crd) != 2:
raise ValueError
except Exception:
raise ValueError('pts, if given, must be a sequence of pairs of numbers')
path = QPainterPath()
somethingdrawn = False
newstart = True
for (xval, yval) in coords:
# flip so positive y is up
yval *= -1.0
if (xval < -100.0) or (xval > 100.0) or (yval < -100.0) or (yval > 100.0):
# end the current subpath
newstart = True
elif newstart:
# start a new subpath; moveTo adds an implicit closeSubpath in QPainterPath
path.moveTo(xval, yval)
newstart = False
else:
# continue the current subpath
path.lineTo(xval, yval)
somethingdrawn = True
if not somethingdrawn:
del path
raise ValueError('symbol definition does not contain any drawn lines')
# Qt closes the (sub)path automatically
sympath = SymbolPath(path, fill)
# save and return the SymbolPath
self.__symbolpaths[symbol] = sympath
return sympath
def getSizeFromCmnd(self, sizeinfo):
'''
Returns a QSizeF based on the information in the dictionary
sizeinfo. Recognized keys are "width" and "height", and
correspond to those float values in the QSizeF. Values not
given in sizeinfo are assigned as zero in the returned QSizeF.
'''
myrect = QSizeF(0.0, 0.0)
try:
myrect.setWidth(float(sizeinfo["width"]))
except KeyError:
pass
try:
myrect.setHeight(float(sizeinfo["height"]))
except KeyError:
pass
return myrect
def getSidesFromCmnd(self, rectinfo):
'''
Returns a SidesQRectF based on the information in the dictionary
rectinfo. Recognized keys are "left", "top", "right", and "bottom",
and correspond to those float values in the SidesQRectF. Default
values: "left": 0.0, "top": 0.0, "right":1.0, "bottom":1.0
'''
myrect = SidesRectF(left=0.0, top=0.0, right=1.0, bottom=1.0)
try:
myrect.setLeft(float(rectinfo["left"]))
except KeyError:
pass
try:
myrect.setTop(float(rectinfo["top"]))
except KeyError:
pass
try:
myrect.setRight(float(rectinfo["right"]))
except KeyError:
pass
try:
myrect.setBottom(float(rectinfo["bottom"]))
except KeyError:
pass
return myrect
def getColorFromCmnd(self, colorinfo):
'''
Returns a QColor based on the information in the dictionary
colorinfo. Raises a KeyError if the "color" key is not given.
Recognized keys are:
"color": color name or 24-bit RGB integer value
(eg, 0xFF0088)
"alpha": alpha value from 0 (transparent) to 255 (opaque)
if viewer.ignoreAlpha True, this value is ignored
'''
colordata = colorinfo["color"]
mycolor = QColor(colordata)
if not mycolor.isValid():
raise ValueError("Invalid color '%s'" % str(colordata))
if not self.__viewer.ignoreAlpha():
try:
mycolor.setAlpha(int(colorinfo["alpha"]))
except KeyError:
pass
return mycolor
def computeARGB32PreMultInt(self, color):
'''
Returns the Format_ARGB32_Premultiplied integer value
of the given QColor.
'''
(redint, greenint, blueint, alphaint) = color.getRgb()
if self.__viewer.ignoreAlpha():
alphaint = 255
elif (alphaint < 255):
# Scale the RGB values by the alpha value
alphafactor = alphaint / 255.0
redint = int( redint * alphafactor + 0.5 )
if redint > alphaint:
redint = alphaint
greenint = int( greenint * alphafactor + 0.5 )
if greenint > alphaint:
greenint = alphaint
blueint = int( blueint * alphafactor + 0.5 )
if blueint > alphaint:
blueint = alphaint
fillint = ((alphaint * 256 + redint) * 256 + \
greenint) * 256 + blueint
return fillint
```
#### File: PyFerret/pviewmod/pipedimagerpq.py
```python
from __future__ import print_function
import sys
import os
import time
import signal
try:
import sip
except ImportError:
import PyQt4.sip as sip
try:
sip.setapi('QVariant', 2)
except Exception:
pass
# First try to import PyQt5, then try PyQt4 if that fails
try:
import PyQt5
QT_VERSION = 5
except ImportError:
import PyQt4
QT_VERSION = 4
# Now that the PyQt version is determined, import the parts
# allowing any import errors to propagate out
if QT_VERSION == 5:
from PyQt5.QtCore import Qt, QPointF, QRectF, QSize, QTimer
from PyQt5.QtGui import QBrush, QColor, QImage, QPainter, \
QPalette, QPen, QPixmap, QPolygonF
from PyQt5.QtWidgets import QAction, QApplication, QDialog, \
QFileDialog, QLabel, QMainWindow, \
QMessageBox, QPushButton, QScrollArea
else:
from PyQt4.QtCore import Qt, QPointF, QRectF, QSize, QTimer
from PyQt4.QtGui import QAction, QApplication, QBrush, QColor, QDialog, \
QFileDialog, QImage, QLabel, QMainWindow, \
QMessageBox, QPainter, QPalette, QPen, QPixmap, \
QPolygonF, QPushButton, QScrollArea
import multiprocessing
from pipedviewer import WINDOW_CLOSED_MESSAGE
from pipedviewer.cmndhelperpq import CmndHelperPQ
from pipedviewer.scaledialogpq import ScaleDialogPQ
class PipedImagerPQ(QMainWindow):
'''
A PyQt graphics viewer that receives images and commands through
a pipe.
A command is a dictionary with string keys. For example,
{ "action":"save",
"filename":"ferret.png",
"fileformat":"png" }
The command { "action":"exit" } will shutdown the viewer.
'''
def __init__(self, cmndpipe, rspdpipe):
'''
Create a PyQt viewer which reads commands from the Pipe
cmndpipe and writes responses back to rspdpipe.
'''
super(PipedImagerPQ, self).__init__()
self.__cmndpipe = cmndpipe
self.__rspdpipe = rspdpipe
# ignore Ctrl-C
signal.signal(signal.SIGINT, signal.SIG_IGN)
# unmodified image for creating the scene
self.__sceneimage = None
# bytearray of data for the above image
self.__scenedata = None
# flag set if in the process of reading image data from commands
self.__loadingimage = False
# width and height of the unmodified scene image
# when the image is defined
# initialize the width and height to values that will create
# a viewer (mainWindow) of the right size
self.__scenewidth = int(10.8 * self.physicalDpiX())
self.__sceneheight = int(8.8 * self.physicalDpiY())
# by default pay attention to any alpha channel values in colors
self.__noalpha = False
# initial default color for the background (opaque white)
self.__lastclearcolor = QColor(0xFFFFFF)
self.__lastclearcolor.setAlpha(0xFF)
# scaling factor for creating the displayed scene
self.__scalefactor = 1.0
# automatically adjust the scaling factor to fit the window frame?
self.__autoscale = True
# minimum label width and height (for minimum scaling factor)
# and minimum image width and height (for error checking)
self.__minsize = 128
# create the label, that will serve as the canvas, in a scrolled area
self.__scrollarea = QScrollArea(self)
self.__label = QLabel(self.__scrollarea)
# set the initial label size and other values for the scrolled area
self.__label.setMinimumSize(self.__scenewidth, self.__sceneheight)
self.__label.resize(self.__scenewidth, self.__sceneheight)
# setup the scrolled area
self.__scrollarea.setWidget(self.__label)
self.__scrollarea.setBackgroundRole(QPalette.Dark)
self.setCentralWidget(self.__scrollarea)
# default file name and format for saving the image
self.__lastfilename = "ferret.png"
self.__lastformat = "png"
# command helper object
self.__helper = CmndHelperPQ(self)
# create the menubar
self.__scaleact = QAction(self.tr("&Scale"), self,
shortcut=self.tr("Ctrl+S"),
statusTip=self.tr("Scale the image (canvas and image change size)"),
triggered=self.inquireSceneScale)
self.__saveact = QAction(self.tr("Save &As..."), self,
shortcut=self.tr("Ctrl+A"),
statusTip=self.tr("Save the image to file"),
triggered=self.inquireSaveFilename)
self.__redrawact = QAction(self.tr("&Redraw"), self,
shortcut=self.tr("Ctrl+R"),
statusTip=self.tr("Clear and redraw the image"),
triggered=self.redrawScene)
self.__aboutact = QAction(self.tr("&About"), self,
statusTip=self.tr("Show information about this viewer"),
triggered=self.aboutMsg)
self.__aboutqtact = QAction(self.tr("About &Qt"), self,
statusTip=self.tr("Show information about the Qt library"),
triggered=self.aboutQtMsg)
self.createMenus()
# set the initial size of the viewer
self.__framedelta = 4
mwwidth = self.__scenewidth + self.__framedelta
mwheight = self.__sceneheight + self.__framedelta \
+ self.menuBar().height() \
+ self.statusBar().height()
self.resize(mwwidth, mwheight)
# check the command queue any time there are no window events to deal with
self.__timer = QTimer(self)
self.__timer.timeout.connect(self.checkCommandPipe)
self.__timer.setInterval(0)
self.__timer.start()
def createMenus(self):
'''
Create the menu items for the viewer
using the previously created actions.
'''
menuBar = self.menuBar()
sceneMenu = menuBar.addMenu(menuBar.tr("&Image"))
sceneMenu.addAction(self.__scaleact)
sceneMenu.addAction(self.__saveact)
sceneMenu.addAction(self.__redrawact)
helpMenu = menuBar.addMenu(menuBar.tr("&Help"))
helpMenu.addAction(self.__aboutact)
helpMenu.addAction(self.__aboutqtact)
def resizeEvent(self, event):
'''
Monitor resizing in case auto-scaling of the image is selected.
'''
if self.__autoscale:
if self.autoScaleScene():
# continue with the window resize
event.accept()
else:
# another resize coming in, so ignore this one
event.ignore()
else:
# continue with the window resize
event.accept()
def closeEvent(self, event):
'''
Clean up and send the WINDOW_CLOSED_MESSAGE on the response pipe
before closing the window.
'''
self.__timer.stop()
self.__cmndpipe.close()
try:
try:
self.__rspdpipe.send(WINDOW_CLOSED_MESSAGE)
finally:
self.__rspdpipe.close()
except Exception:
pass
event.accept()
def exitViewer(self):
'''
Close and exit the viewer.
'''
self.close()
def aboutMsg(self):
QMessageBox.about(self, self.tr("About PipedImagerPQ"),
self.tr("\n" \
"PipedImagerPQ is a graphics viewer application that receives its " \
"displayed image and commands primarily from another application " \
"through a pipe. A limited number of commands are provided by the " \
"viewer itself to allow saving and some manipulation of the " \
"displayed image. The controlling application, however, may be " \
"unaware of these modifications made to the image. " \
"\n\n" \
"PipedImagerPQ was developed by the Thermal Modeling and Analysis " \
"Project (TMAP) of the National Oceanographic and Atmospheric " \
"Administration's (NOAA) Pacific Marine Environmental Lab (PMEL). "))
def aboutQtMsg(self):
QMessageBox.aboutQt(self, self.tr("About Qt"))
def ignoreAlpha(self):
'''
Return whether the alpha channel in colors should always be ignored.
'''
return self.__noalpha
def updateScene(self):
'''
Clear the displayed scene using self.__lastclearcolor,
then draw the scaled current image.
'''
# get the scaled scene size
labelwidth = int(self.__scalefactor * self.__scenewidth + 0.5)
labelheight = int(self.__scalefactor * self.__sceneheight + 0.5)
# Create the new pixmap for the label to display
newpixmap = QPixmap(labelwidth, labelheight)
newpixmap.fill(self.__lastclearcolor)
if self.__sceneimage != None:
# Draw the scaled image to the pixmap
mypainter = QPainter(newpixmap)
trgrect = QRectF(0.0, 0.0, float(labelwidth),
float(labelheight))
srcrect = QRectF(0.0, 0.0, float(self.__scenewidth),
float(self.__sceneheight))
mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor)
mypainter.end()
# Assign the new pixmap to the label
self.__label.setPixmap(newpixmap)
# set the label size and values
# so the scrollarea knows of the new size
self.__label.setMinimumSize(labelwidth, labelheight)
self.__label.resize(labelwidth, labelheight)
# update the label from the new pixmap
self.__label.update()
def clearScene(self, bkgcolor=None):
'''
Deletes the scene image and fills the label with bkgcolor.
If bkgcolor is None or an invalid color, the color used is
the one used from the last clearScene or redrawScene call
with a valid color (or opaque white if a color has never
been specified).
'''
# get the color to use for clearing (the background color)
if bkgcolor:
if bkgcolor.isValid():
self.__lastclearcolor = bkgcolor
# Remove the image and its bytearray
self.__sceneimage = None
self.__scenedata = None
# Update the scene label using the current clearing color and image
self.updateScene()
def redrawScene(self, bkgcolor=None):
'''
Clear and redraw the displayed scene.
'''
# get the background color
if bkgcolor:
if bkgcolor.isValid():
self.__lastclearcolor = bkgcolor
# Update the scene label using the current clearing color and image
QApplication.setOverrideCursor(Qt.WaitCursor)
self.statusBar().showMessage( self.tr("Redrawing image") )
try:
self.updateScene()
finally:
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
def resizeScene(self, width, height):
'''
Resize the scene to the given width and height in units of pixels.
If the size changes, this deletes the current image and clear the
displayed scene.
'''
newwidth = int(width + 0.5)
if newwidth < self.__minsize:
newwidth = self.__minsize
newheight = int(height + 0.5)
if newheight < self.__minsize:
newheight = self.__minsize
if (newwidth != self.__scenewidth) or (newheight != self.__sceneheight):
# set the new size for the empty scene
self.__scenewidth = newwidth
self.__sceneheight = newheight
# If auto-scaling, set scaling factor to 1.0 and resize the window
if self.__autoscale:
self.__scalefactor = 1.0
barheights = self.menuBar().height() + self.statusBar().height()
self.resize(newwidth + self.__framedelta,
newheight + self.__framedelta + barheights)
# clear the scene with the last clearing color
self.clearScene(None)
def loadNewSceneImage(self, imageinfo):
'''
Create a new scene image from the information given in this
and subsequent dictionaries imageinfo. The image is created
from multiple calls to this function since there is a limit
on the size of a single object passed through a pipe.
The first imageinfo dictionary given when creating an image
must define the following key and value pairs:
"width": width of the image in pixels
"height": height of the image in pixels
"stride": number of bytes in one line of the image
in the bytearray
The scene image data is initialized to all zero (transparent)
at this time.
This initialization call must be followed by (multiple) calls
to this method with imageinfo dictionaries defining the key
and value pairs:
"blocknum": data block number (1, 2, ... numblocks)
"numblocks": total number of image data blocks
"startindex": index in the bytearray of image data
where this block of image data starts
"blockdata": this block of data as a bytearray
On receipt of the last block of data (blocknum == numblocks)
the scene image will be created and the scene will be updated.
Raises:
KeyError - if one of the above keys is not given
ValueError - if a value for a key is not valid
'''
if not self.__loadingimage:
# prepare for a new image data from subsequent calls
# get dimensions of the new image
myimgwidth = int( imageinfo["width"] )
myimgheight = int( imageinfo["height"] )
myimgstride = int( imageinfo["stride"] )
if (myimgwidth < self.__minsize) or (myimgheight < self.__minsize):
raise ValueError("image width and height cannot be less than %s" % str(self.__minsize))
# Newer PyQt versions allow separate specification of the stride
if myimgstride != 4 * myimgwidth:
raise ValueError("image stride is not four times the image width")
# create the bytearray to contain the new scene data
# automatically initialized to zero
self.__scenedata = bytearray(myimgstride * myimgheight)
self.__scenewidth = myimgwidth
self.__sceneheight = myimgheight
# set the flag for subsequent calls to this method
self.__loadingimage = True
# change the cursor to warn the user this may take some time
QApplication.setOverrideCursor(Qt.WaitCursor)
# put up an appropriate status message
self.statusBar().showMessage( self.tr("Loading new image") )
return
# loading an image; add the next block of data
myblocknum = int( imageinfo["blocknum"] )
mynumblocks = int( imageinfo["numblocks"] )
mystartindex = int( imageinfo["startindex"] )
myblockdata = imageinfo["blockdata"]
if (myblocknum < 1) or (myblocknum > mynumblocks):
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
raise ValueError("invalid image data block number or number of blocks")
if (mystartindex < 0) or (mystartindex >= len(self.__scenedata)):
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
raise ValueError("invalid start index for an image data block")
myblocksize = len(myblockdata)
myendindex = mystartindex + myblocksize
if (myblocksize < 1) or (myendindex > len(self.__scenedata)):
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
raise ValueError("invalid length of an image data block")
# update the status message to show progress
self.statusBar().showMessage( self.tr("Loading new image (block %s of %s)" % \
(str(myblocknum),str(mynumblocks))) )
# assign the data
self.__scenedata[mystartindex:myendindex] = myblockdata
# if this is the last block of data, create and display the scene image
if myblocknum == mynumblocks:
self.__loadingimage = False
self.statusBar().showMessage( self.tr("Creating new image") )
try:
self.__sceneimage = QImage(self.__scenedata,
self.__scenewidth,
self.__sceneheight,
QImage.Format_ARGB32_Premultiplied)
self.statusBar().showMessage( self.tr("Drawing new image") )
# update the displayed scene in the label
self.updateScene()
finally:
# clear the status message
self.statusBar().clearMessage()
# restore the cursor back to normal
QApplication.restoreOverrideCursor()
def inquireSceneScale(self):
'''
Prompt the user for the desired scaling factor for the scene.
'''
labelwidth = int(self.__scenewidth * self.__scalefactor + 0.5)
labelheight = int(self.__sceneheight * self.__scalefactor + 0.5)
scaledlg = ScaleDialogPQ(self.__scalefactor, labelwidth, labelheight,
self.__minsize, self.__minsize, self.__autoscale, self)
if scaledlg.exec_():
(newscale, autoscale, okay) = scaledlg.getValues()
if okay:
if autoscale:
self.__autoscale = True
self.autoScaleScene()
else:
self.__autoscale = False
self.scaleScene(newscale, False)
def autoScaleScene(self):
'''
Selects a scaling factor that maximizes the scene within the window
frame without requiring scroll bars. Intended to be called when
the window size is changed by the user and auto-scaling is turn on.
Returns:
True if scaling of this scene is done (no window resize)
False if the a window resize command was issued
'''
barheights = self.menuBar().height() + self.statusBar().height()
# get the size for the central widget
cwheight = self.height() - barheights - self.__framedelta
heightsf = float(cwheight) / float(self.__sceneheight)
cwwidth = self.width() - self.__framedelta
widthsf = float(cwwidth) / float(self.__scenewidth)
if heightsf < widthsf:
factor = heightsf
else:
factor = widthsf
newcwheight = int(factor * self.__sceneheight + 0.5)
newcwwidth = int(factor * self.__scenewidth + 0.5)
# if the window does not have the correct aspect ratio, resize it so
# it will; this will generate another call to this method. Otherwise,
# scale the scene and be done.
if self.isMaximized() or \
( (abs(cwheight - newcwheight) <= self.__framedelta) and \
(abs(cwwidth - newcwwidth) <= self.__framedelta) ):
self.scaleScene(factor, False)
return True
else:
self.resize(newcwwidth + self.__framedelta,
newcwheight + self.__framedelta + barheights)
return False
def scaleScene(self, factor, resizewin):
'''
Scales both the horizontal and vertical directions by factor.
Scaling factors are not accumulative. So if the scene was
already scaled, that scaling is "removed" before this scaling
factor is applied. If resizewin is True, the main window is
resized to accommodate this new scaled scene size.
If factor is zero, just switch to auto-scaling at the current
window size. If factor is negative, rescale using the absolute
value (possibly resizing the window) then switch to auto-scaling.
'''
fltfactor = float(factor)
if fltfactor != 0.0:
if resizewin:
# from command - turn off autoscaling for the following
# then turn back on if appropriate
self.__autoscale = False
newfactor = abs(fltfactor)
newlabwidth = int(newfactor * self.__scenewidth + 0.5)
newlabheight = int(newfactor * self.__sceneheight + 0.5)
if (newlabwidth < self.__minsize) or (newlabheight < self.__minsize):
# Set to minimum size
if self.__scenewidth <= self.__sceneheight:
newfactor = float(self.__minsize) / float(self.__scenewidth)
else:
newfactor = float(self.__minsize) / float(self.__sceneheight)
newlabwidth = int(newfactor * self.__scenewidth + 0.5)
newlabheight = int(newfactor * self.__sceneheight + 0.5)
oldlabwidth = int(self.__scalefactor * self.__scenewidth + 0.5)
oldlabheight = int(self.__scalefactor * self.__sceneheight + 0.5)
if (newlabwidth != oldlabwidth) or (newlabheight != oldlabheight):
# Set the new scaling factor
self.__scalefactor = newfactor
# Update the scene label using the current clearing color and image
QApplication.setOverrideCursor(Qt.WaitCursor)
self.statusBar().showMessage( self.tr("Scaling image") )
try:
self.updateScene()
finally:
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
if resizewin:
# resize the main window (if possible)
barheights = self.menuBar().height() + self.statusBar().height()
mwheight = newlabheight + barheights + self.__framedelta
mwwidth = newlabwidth + self.__framedelta
# Do not exceed the available real estate on the screen.
# If autoscaling is in effect, the resize will trigger
# any required adjustments.
scrnrect = QApplication.desktop().availableGeometry()
if mwwidth > 0.95 * scrnrect.width():
mwwidth = int(0.9 * scrnrect.width() + 0.5)
if mwheight > 0.95 * scrnrect.height():
mwheight = int(0.9 * scrnrect.height() + 0.5)
self.resize(mwwidth, mwheight)
if fltfactor <= 0.0:
# From command - turn on autoscaling
self.__autoscale = True
self.autoScaleScene();
def inquireSaveFilename(self):
'''
Prompt the user for the name of the file into which to save the scene.
The file format will be determined from the filename extension.
'''
formattypes = [ ( "png",
"PNG - Portable Networks Graphics (*.png)" ),
( "jpeg",
"JPEG - Joint Photographic Experts Group (*.jpeg *.jpg *.jpe)" ),
( "tiff",
"TIFF - Tagged Image File Format (*.tiff *.tif)" ),
( "bmp",
"BMP - Windows Bitmap (*.bmp)" ),
( "ppm",
"PPM - Portable Pixmap (*.ppm)" ),
( "xpm",
"XPM - X11 Pixmap (*.xpm)" ),
( "xbm",
"XBM - X11 Bitmap (*.xbm)" ), ]
filters = ";;".join( [ t[1] for t in formattypes ] )
if QT_VERSION == 5:
# getSaveFileName; tr returns Python unicode strings in PyQt5/Python3
(fileName, fileFilter) = QFileDialog.getSaveFileName(self,
self.tr("Save the current image as "), self.tr(self.__lastfilename), self.tr(filters))
else:
# getSaveFileNameAndFilter; tr returns QStrings in PyQt4
(fileName, fileFilter) = QFileDialog.getSaveFileNameAndFilter(self,
self.tr("Save the current image as "), self.tr(self.__lastfilename), self.tr(filters))
if fileName:
for (fmt, fmtQName) in formattypes:
if self.tr(fmtQName) == fileFilter:
fileFormat = fmt
break
else:
raise RuntimeError("Unexpected file format name '%s'" % fileFilter)
self.saveSceneToFile(fileName, fileFormat, None, None)
self.__lastfilename = fileName
self.__lastformat = fileFormat
def saveSceneToFile(self, filename, imageformat, transparent, rastsize):
'''
Save the current scene to the named file.
If imageformat is empty or None, the format is guessed from
the filename extension.
If transparent is False, the entire scene is initialized
to the last clearing color.
If given, rastsize is the pixels size of the saved image.
If rastsize is not given, the saved image will be saved
at the current scaled image size.
'''
# This could be called when there is no image present.
# If this is the case, ignore the call.
if ( self.__sceneimage == None ):
return
if not imageformat:
# Guess the image format from the filename extension
# This is only done to silently change gif to png
fileext = ( os.path.splitext(filename)[1] ).lower()
if fileext == '.gif':
myformat = 'gif'
else:
# let QImage figure out the format
myformat = None
else:
myformat = imageformat.lower()
if myformat == 'gif':
# Silently convert gif filename and format to png
myformat = 'png'
myfilename = os.path.splitext(filename)[0] + ".png"
else:
myfilename = filename
# set the cursor and status message to indicate a save is happending
QApplication.setOverrideCursor(Qt.WaitCursor)
self.statusBar().showMessage( self.tr("Saving image") )
try:
if rastsize:
imagewidth = int(rastsize.width() + 0.5)
imageheight = int(rastsize.height() + 0.5)
else:
imagewidth = int(self.__scenewidth * self.__scalefactor + 0.5)
imageheight = int(self.__sceneheight * self.__scalefactor + 0.5)
myimage = QImage( QSize(imagewidth, imageheight),
QImage.Format_ARGB32_Premultiplied )
# Initialize the image
if not transparent:
# Clear the image with self.__lastclearcolor
fillint = self.__helper.computeARGB32PreMultInt(self.__lastclearcolor)
else:
fillint = 0
myimage.fill(fillint)
# draw the scaled scene to this QImage
mypainter = QPainter(myimage)
trgrect = QRectF(0.0, 0.0, float(imagewidth),
float(imageheight))
srcrect = QRectF(0.0, 0.0, float(self.__scenewidth),
float(self.__sceneheight))
mypainter.drawImage(trgrect, self.__sceneimage, srcrect, Qt.AutoColor)
mypainter.end()
# save the image to file
if not myimage.save(myfilename, myformat):
raise ValueError("Unable to save the plot as " + myfilename)
finally:
self.statusBar().clearMessage()
QApplication.restoreOverrideCursor()
def checkCommandPipe(self):
'''
Get and perform commands waiting in the pipe.
Stop when no more commands or if more than 50
milliseconds have passed.
'''
try:
starttime = time.clock()
# Wait up to 2 milliseconds waiting for a command.
# This prevents unchecked spinning when there is
# nothing to do (Qt immediately calling this method
# again only for this method to immediately return).
while self.__cmndpipe.poll(0.002):
cmnd = self.__cmndpipe.recv()
self.processCommand(cmnd)
# Continue to try to process commands until
# more than 50 milliseconds have passed.
# This reduces Qt overhead when there are lots
# of commands waiting in the queue.
if (time.clock() - starttime) > 0.050:
break
except EOFError:
# Assume PyFerret has shut down
self.exitViewer()
except Exception:
# Some problem, but presumably still functional
(exctype, excval) = sys.exc_info()[:2]
try:
if excval:
self.__rspdpipe.send("**ERROR %s: %s" % (str(exctype), str(excval)))
else:
self.__rspdpipe.send("**ERROR %s" % str(exctype))
except Exception:
pass
def processCommand(self, cmnd):
'''
Examine the action of cmnd and call the appropriate
method to deal with this command. Raises a KeyError
if the "action" key is missing.
'''
try:
cmndact = cmnd["action"]
except KeyError:
raise ValueError("Unknown command '%s'" % str(cmnd))
if cmndact == "clear":
try:
bkgcolor = self.__helper.getColorFromCmnd(cmnd)
except KeyError:
bkgcolor = None
self.clearScene(bkgcolor)
elif cmndact == "exit":
self.exitViewer()
elif cmndact == "hide":
self.showMinimized()
elif cmndact == "screenInfo":
scrnrect = QApplication.desktop().availableGeometry()
info = ( self.physicalDpiX(), self.physicalDpiY(),
scrnrect.width(), scrnrect.height() )
self.__rspdpipe.send(info)
elif cmndact == "redraw":
try:
bkgcolor = self.__helper.getColorFromCmnd(cmnd)
except KeyError:
bkgcolor = None
self.redrawScene(bkgcolor)
elif cmndact == "rescale":
self.scaleScene(float(cmnd["factor"]), True)
elif cmndact == "resize":
mysize = self.__helper.getSizeFromCmnd(cmnd)
self.resizeScene(mysize.width(), mysize.height())
elif cmndact == "newImage":
self.loadNewSceneImage(cmnd)
elif cmndact == "save":
filename = cmnd["filename"]
fileformat = cmnd.get("fileformat", None)
try:
bkgcolor = self.__helper.getColorFromCmnd(cmnd)
except KeyError:
bkgcolor = None
rastsize = self.__helper.getSizeFromCmnd(cmnd["rastsize"])
self.saveSceneToFile(filename, fileformat, bkgcolor, rastsize)
elif cmndact == "setTitle":
self.setWindowTitle(cmnd["title"])
elif cmndact == "imgname":
myvalue = cmnd.get("name", None)
if myvalue:
self.__lastfilename = myvalue
myvalue = cmnd.get("format", None)
if myvalue:
self.__lastformat = myvalue.lower()
elif cmndact == "show":
if not self.isVisible():
self.show()
elif cmndact == "noalpha":
# ignore any alpha channel values in colors
self.__noalpha = True
else:
raise ValueError("Unknown command action %s" % str(cmndact))
class PipedImagerPQProcess(multiprocessing.Process):
'''
A Process specifically tailored for creating a PipedImagerPQ.
'''
def __init__(self, cmndpipe, rspdpipe):
'''
Create a Process that will produce a PipedImagerPQ
attached to the given Pipes when run.
'''
super(PipedImagerPQProcess,self).__init__(group=None, target=None, name='PipedImagerPQ')
self.__cmndpipe = cmndpipe
self.__rspdpipe = rspdpipe
self.__app = None
self.__viewer = None
def run(self):
'''
Create a PipedImagerPQ that is attached
to the Pipe of this instance.
'''
self.__app = QApplication(["PipedImagerPQ"])
self.__viewer = PipedImagerPQ(self.__cmndpipe, self.__rspdpipe)
myresult = self.__app.exec_()
sys.exit(myresult)
#
# The following are for testing this module
#
class _CommandSubmitterPQ(QDialog):
'''
Testing dialog for controlling the addition of commands to a pipe.
Used for testing PipedImagerPQ in the same process as the viewer.
'''
def __init__(self, parent, cmndpipe, rspdpipe, cmndlist):
'''
Create a QDialog with a single QPushButton for controlling
the submission of commands from cmndlist to cmndpipe.
'''
super(_CommandSubmitterPQ,self).__init__(parent)
self.__cmndlist = cmndlist
self.__cmndpipe = cmndpipe
self.__rspdpipe = rspdpipe
self.__nextcmnd = 0
self.__button = QPushButton("Submit next command", self)
self.__button.pressed.connect(self.submitNextCommand)
self.show()
def submitNextCommand(self):
'''
Submit the next command from the command list to the command pipe,
or shutdown if there are no more commands to submit.
'''
try:
cmndstr = str(self.__cmndlist[self.__nextcmnd])
if len(cmndstr) > 188:
cmndstr = cmndstr[:188] + '...'
print("Command: %s" % cmndstr)
self.__cmndpipe.send(self.__cmndlist[self.__nextcmnd])
self.__nextcmnd += 1
while self.__rspdpipe.poll(0.1):
print("Response: %s" % str(self.__rspdpipe.recv()))
except IndexError:
self.__rspdpipe.close()
self.__cmndpipe.close()
self.close()
def _test_pipedimagerpq():
# vertices of a pentagon (roughly) centered in a 1000 x 1000 square
pentagonpts = ( (504.5, 100.0), (100.0, 393.9),
(254.5, 869.4), (754.5, 869.4),
(909.0, 393.9), )
linepts = ( (350, 50),
(200, 150),
(400, 250),
(300, 350),
(150, 250),
(100, 450) )
# start PyQt
testapp = QApplication(["PipedImagerPQ"])
# create the list of commands to submit
drawcmnds = []
drawcmnds.append( { "action":"setTitle", "title":"Tester" } )
drawcmnds.append( { "action":"show" } )
drawcmnds.append( { "action":"clear", "color":"black"} )
drawcmnds.append( { "action":"screenInfo"} )
# create the image to be displayed
testimage = QImage(500, 500, QImage.Format_ARGB32_Premultiplied)
# initialize a black background
testimage.fill(0xFF000000)
# draw some things in the image
testpainter = QPainter(testimage)
testpainter.setBrush( QBrush(QColor(0, 255, 0, 128), Qt.SolidPattern) )
testpainter.setPen( QPen(QBrush(QColor(255, 0, 0, 255), Qt.SolidPattern),
5.0, Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin) )
testpainter.drawRect( QRectF(5.0, 255.0, 240.0, 240.0) )
testpainter.setBrush( QBrush(QColor(0, 0, 255, 255), Qt.SolidPattern) )
testpainter.setPen( QPen(QBrush(QColor(0, 0, 0, 255), Qt.SolidPattern),
5.0, Qt.DashLine, Qt.RoundCap, Qt.RoundJoin) )
testpainter.drawPolygon( QPolygonF(
[ QPointF(.25 * ptx, .25 * pty + 250) for (ptx, pty) in pentagonpts ] ) )
testpainter.setBrush( Qt.NoBrush )
testpainter.setPen( QPen(QBrush(QColor(255, 255, 255, 255), Qt.SolidPattern),
3.0, Qt.DashLine, Qt.RoundCap, Qt.RoundJoin) )
testpainter.drawPolyline( QPolygonF(
[ QPointF(pts, pty) for (pts, pty) in linepts ] ) )
testpainter.end()
# add the image command
testimgwidth = testimage.width()
testimgheight = testimage.height()
testimgstride = testimage.bytesPerLine()
# not a good way to get the pixel data
testimgdata = bytearray(testimgheight * testimgstride)
k = 0
for pty in range(testimgheight):
for ptx in range(testimgwidth):
pixval = testimage.pixel(ptx, pty)
(aval, rgbval) = divmod(pixval, 256 * 256 * 256)
(rval, gbval) = divmod(rgbval, 256 * 256)
(gval, bval) = divmod(gbval, 256)
testimgdata[k] = bval
k += 1
testimgdata[k] = gval
k += 1
testimgdata[k] = rval
k += 1
testimgdata[k] = aval
k += 1
testblocksize = 4000
testnumblocks = (testimgheight * testimgstride + testblocksize - 1) // testblocksize
drawcmnds.append( { "action":"newImage",
"width":testimgwidth,
"height":testimgheight,
"stride":testimgstride } )
for k in range(testnumblocks):
if k < (testnumblocks - 1):
blkdata = testimgdata[k*testblocksize:(k+1)*testblocksize]
else:
blkdata = testimgdata[k*testblocksize:]
drawcmnds.append( { "action":"newImage",
"blocknum":k+1,
"numblocks":testnumblocks,
"startindex":k*testblocksize,
"blockdata":blkdata } )
# finish the command list
drawcmnds.append( { "action":"show" } )
drawcmnds.append( { "action":"exit" } )
# create a PipedImagerPQ in this process
(cmndrecvpipe, cmndsendpipe) = multiprocessing.Pipe(False)
(rspdrecvpipe, rspdsendpipe) = multiprocessing.Pipe(False)
testviewer = PipedImagerPQ(cmndrecvpipe, rspdsendpipe)
# create a command submitter dialog
tester = _CommandSubmitterPQ(testviewer, cmndsendpipe,
rspdrecvpipe, drawcmnds)
tester.show()
# let it all run
testresult = testapp.exec_()
if testresult != 0:
sys.exit(testresult)
if __name__ == "__main__":
_test_pipedimagerpq()
```
#### File: PyFerret/pviewmod/scaledialogpq.py
```python
from __future__ import print_function
import sys
try:
import sip
except ImportError:
import PyQt4.sip as sip
try:
sip.setapi('QVariant', 2)
except Exception:
pass
# First try to import PyQt5, then try PyQt4 if that fails
try:
import PyQt5
QT_VERSION = 5
except ImportError:
import PyQt4
QT_VERSION = 4
# Now that the PyQt version is determined, import the parts
# allowing any import errors to propagate out
if QT_VERSION == 5:
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QButtonGroup, QDialog, \
QDialogButtonBox, QGridLayout, QGroupBox, \
QLabel, QLineEdit, QMessageBox, QRadioButton
else:
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QApplication, QButtonGroup, QDialog, \
QDialogButtonBox, QGridLayout, QGroupBox, \
QLabel, QLineEdit, QMessageBox, QRadioButton
class ScaleDialogPQ(QDialog):
'''
Dialog for obtaining scaling information from the user.
Validates that the resulting width and height values
are not smaller than the specified minimums.
'''
def __init__(self, scale, width, height,
minwidth, minheight, autoscale, parent=None):
'''
Creates a scaling dialog, with scale as the current
scaling value which gives a pixmap of size width and
height. The minimum acceptable width and heights are
given by minwidth and minheight. Values are assumed to
be in units of pixels. The value of autoscale sets the
default value of "Scale image to fir window frame".
'''
super(ScaleDialogPQ, self).__init__(parent)
self.__scale = float(scale)
self.__pixwidth = float(width)
self.__inchwidth = float(width) / float(self.physicalDpiX())
self.__pixheight = float(height)
self.__inchheight = float(height) / float(self.physicalDpiY())
self.__minpixwidth = int(minwidth)
self.__minpixheight = int(minheight)
self.__autoscale = bool(autoscale)
self.FLTSTR_FORMAT = "%#.3f"
self.setWindowTitle(self.tr("Image Size Scaling"))
# auto-scaling option at the top
autoscalelabel = QLabel(self.tr("Scale image to fit window frame?"),
self)
autoscalelabel.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.__autoyesbtn = QRadioButton(self.tr("&Yes"), self)
self.__autonobtn = QRadioButton(self.tr("&No"), self)
autoscalebtngrp = QButtonGroup(self)
autoscalebtngrp.addButton(self.__autoyesbtn)
autoscalebtngrp.addButton(self.__autonobtn)
# put the manual scaling settings into their own box
self.__grpbox = QGroupBox(self.tr("Fixed scaling"), self)
# create the widgets going inside this group box
messagelabel = QLabel(
self.tr("Scaling factor (both horiz. and vert.) for the image"),
self.__grpbox)
messagelabel.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
scalelabel = QLabel(self.tr("&Scale: "), self.__grpbox)
self.__scaleedit = QLineEdit(self.FLTSTR_FORMAT % self.__scale,
self.__grpbox)
scalelabel.setBuddy(self.__scaleedit)
widthbegin = QLabel(self.tr("Width: "), self.__grpbox)
self.__pixwidthlabel = QLabel(str(int(self.__pixwidth + 0.5)),
self.__grpbox)
widthmiddle = QLabel(self.tr("pixels, or"), self.__grpbox)
self.__inchwidthlabel = QLabel(self.FLTSTR_FORMAT % self.__inchwidth,
self.__grpbox)
widthend = QLabel(self.tr("inches on the screen"), self.__grpbox)
minwidthlabel = QLabel(self.tr("(must not be less than %d pixels)" % \
self.__minpixwidth), self.__grpbox)
heightbegin = QLabel(self.tr("Height:"), self.__grpbox)
self.__pixheightlabel = QLabel(str(int(self.__pixheight + 0.5)),
self.__grpbox)
heightmiddle = QLabel(self.tr("pixels, or"), self.__grpbox)
self.__inchheightlabel = QLabel(self.FLTSTR_FORMAT % self.__inchheight,
self.__grpbox)
heightend = QLabel(self.tr("inches on the screen"), self.__grpbox)
minheightlabel = QLabel(self.tr("(must not be less than %d pixels)" % \
self.__minpixheight), self.__grpbox)
# layout the widgets in this group box
layout = QGridLayout()
layout.addWidget(messagelabel, 0, 0, 1, 5)
layout.addWidget(scalelabel, 1, 0, 1, 1)
layout.addWidget(self.__scaleedit, 1, 1, 1, 4)
layout.addWidget(widthbegin, 2, 0, 1, 1)
layout.addWidget(self.__pixwidthlabel, 2, 1, 1, 1)
layout.addWidget(widthmiddle, 2, 2, 1, 1)
layout.addWidget(self.__inchwidthlabel, 2, 3, 1, 1)
layout.addWidget(widthend, 2, 4, 1, 1)
layout.addWidget(minwidthlabel, 3, 1, 1, 4)
layout.addWidget(heightbegin, 4, 0, 1, 1)
layout.addWidget(self.__pixheightlabel, 4, 1, 1, 1)
layout.addWidget(heightmiddle, 4, 2, 1, 1)
layout.addWidget(self.__inchheightlabel, 4, 3, 1, 1)
layout.addWidget(heightend, 4, 4, 1, 1)
layout.addWidget(minheightlabel, 5, 1, 1, 4)
# assign this layout to the group box
self.__grpbox.setLayout(layout)
# layout the widgets in the dialog (outside the group box)
layout = QGridLayout()
layout.addWidget(autoscalelabel, 0, 0, 1, 1)
layout.addWidget(self.__autoyesbtn, 0, 1, 1, 1)
layout.addWidget(self.__autonobtn, 0, 2, 1, 1)
layout.addWidget(self.__grpbox, 1, 0, 1, 3)
buttonbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel |
QDialogButtonBox.Reset,
Qt.Horizontal, self)
layout.addWidget(buttonbox, 2, 0, 1, 3)
self.setLayout(layout)
# The OK button is not the default here in Qt4.2
okbutton = buttonbox.button(QDialogButtonBox.Ok)
okbutton.setDefault(True)
resetbutton = buttonbox.button(QDialogButtonBox.Reset)
self.__autoyesclicked = self.__autoyesbtn.clicked
self.__autoyesclicked.connect(self.setAutoScale)
self.__autonoclicked = self.__autonobtn.clicked
self.__autonoclicked.connect(self.unsetAutoScale)
self.__scaletextchanged = self.__scaleedit.textChanged
self.__scaletextchanged.connect(self.updateValues)
self.__buttonboxaccepted = buttonbox.accepted
self.__buttonboxaccepted.connect(self.checkValues)
self.__buttonboxrejected = buttonbox.rejected
self.__buttonboxrejected.connect(self.reject)
self.__resetbuttonclicked = resetbutton.clicked
self.__resetbuttonclicked.connect(self.resetValues)
# initialize the state from autoscale
if self.__autoscale:
self.__autoyesbtn.setChecked(True)
self.setAutoScale(True)
else:
self.__autonobtn.setChecked(True)
self.unsetAutoScale(True)
def setAutoScale(self, checked):
if checked:
self.__grpbox.setEnabled(False)
def unsetAutoScale(self, checked):
if checked:
self.__grpbox.setEnabled(True)
self.__scaleedit.setFocus()
self.__scaleedit.selectAll()
def updateValues(self, newstring):
try:
newscale = float(newstring)
if (newscale < 0.0001) or (newscale > 10000.0):
raise OverflowError()
newval = self.__pixwidth * newscale / self.__scale
self.__pixwidthlabel.setText(str(int(newval + 0.5)))
newval = self.__inchwidth * newscale / self.__scale
self.__inchwidthlabel.setText(self.FLTSTR_FORMAT % newval)
newval = self.__pixheight * newscale / self.__scale
self.__pixheightlabel.setText(str(int(newval + 0.5)))
newval = self.__inchheight * newscale / self.__scale
self.__inchheightlabel.setText(self.FLTSTR_FORMAT % newval)
except Exception:
pass
def checkValues(self):
okay = self.getValues()[2]
if okay:
self.accept()
else:
QMessageBox.warning(self, self.tr("Invalid value"),
self.tr("Scale value is not valid"))
def getValues(self):
if self.__autoyesbtn.isChecked():
return (0.0, True, True)
try:
newscale = float(self.__scaleedit.text())
if (newscale < 0.0001) or (newscale > 10000.0):
raise OverflowError()
newwidth = self.__pixwidth * newscale / self.__scale
newwidth = int(newwidth + 0.5)
newheight = self.__pixheight * newscale / self.__scale
newheight = int(newheight + 0.5)
if (newwidth < self.__minpixwidth) or (newheight < self.__minpixheight):
raise OverflowError()
except Exception:
return (0.0, False, False)
return (newscale, False, True)
def resetValues(self):
self.__scaleedit.setText(self.FLTSTR_FORMAT % self.__scale)
self.__pixwidthlabel.setText(str(int(self.__pixwidth + 0.5)))
self.__inchwidthlabel.setText(self.FLTSTR_FORMAT % self.__inchwidth)
self.__pixheightlabel.setText(str(int(self.__pixheight + 0.5)))
self.__inchheightlabel.setText(self.FLTSTR_FORMAT % self.__inchheight)
if self.__autoscale:
self.__autoyesbtn.setChecked(True)
self.setAutoScale(True)
else:
self.__autonobtn.setChecked(True)
self.unsetAutoScale(True)
def _test_scaledialogpq():
app = QApplication(["tester"])
resizedialog = ScaleDialogPQ(1.0, 500, 300, 75, 50, False)
retval = resizedialog.exec_()
print("retval = %d" % retval)
if retval == QDialog.Accepted:
rettuple = resizedialog.getValues()
print("getValues returned: %s" % str(rettuple))
if __name__ == "__main__":
_test_scaledialogpq()
``` |
{
"source": "JHongKong/GitDaily",
"score": 4
} |
#### File: Algorithm/Python/Overwrite.py
```python
import sys
def myprint(*args,**kargs):
"""
模拟print函数
Emulate most of the 3.X print function for use in 2.X(and 3.X)
Call signature: print3(*args,sep=' ',end ='\n', file=sys.stdout)
"""
sep = kargs.get('sep',' ')
end = kargs.get('end','\n')
file = kargs.get('file',sys.stdout)
output = ''
first = True
for arg in args:
output += ('' if first else sep) + str(arg)
first = False
file.write(output+end)
def myprint0(*args,**kargs):
"""
细致优化:其实上面已经够用了
Use 2.X/3.X keyword args deletion with defaults
"""
sep = kargs.pop('sep',' ') #dict.pop() 删除取回的传入项,并检查字典是否为空
end = kargs.pop('end','\n')
file = kargs.get('file',sys.stdout)
if kargs: raise TypeError('extra keywords: %s' %kargs) # 检查有无多余项报错
output = ''
first = True
for arg in args:
output += ('' if first else sep) + str(arg)
first = False
file.write(output+end)
myprint("fanfamsf","dsakda",sep='...')
myprint0("fanfamsf","dsakda",sep='...')
def mymap(func,*seqs):
res = []
for args in zip(*seqs):
res.append(func(*args))
return res
print(mymap(abs,[-2,-1,0,1,2]))
print(mymap(pow,[1,2,3],[2,3,4,5]))
def myzip(*seqs,pad=None):
seqs = [list(S) for S in seqs]
res = []
while any(seqs): # any--->all时为正常的zip()
#res.append(tuple(S.pop(0) for S in seqs))
res.append(tuple((S.pop(0) if S else pad)for S in seqs))
return res
S1,S2 = 'abc','xyz123'
print(myzip(S1,S2))
``` |
{
"source": "jhonifreitas/auto-service",
"score": 2
} |
#### File: auth/tests/test_serializers.py
```python
from model_mommy import mommy
from rest_framework.serializers import ValidationError
from django.test import TestCase
from django.contrib.auth.models import User
from gojob.api.v1.auth.serializers import LoginSerializer
class LoginSerializerValidTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.profile = mommy.make('custom_profile.Profile', user=self.user)
self.serializer = LoginSerializer(data={'username': self.user.username, 'password': '<PASSWORD>'})
def test_serializer_is_valid(self):
self.assertTrue(self.serializer.is_valid())
def test_serializer_get_token(self):
self.serializer.is_valid()
self.assertTrue(self.serializer.get_token())
class LoginSerializerInvalidTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='test', password='<PASSWORD>')
self.serializer = LoginSerializer(data={})
def test_serializer_not_is_valid(self):
self.assertFalse(self.serializer.is_valid())
def test_validate_password_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.validate({'username': self.user.username, 'password': '<PASSWORD>'})
def test_validate_username_invalid(self):
with self.assertRaises(ValidationError):
self.serializer.validate({'username': 'invalid-username'})
```
#### File: v1/auth/views.py
```python
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from gojob.api.v1.auth.serializers import LoginSerializer
class LoginViewSet(viewsets.ViewSet):
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
return Response(serializer.get_data(request), status=status.HTTP_200_OK)
return Response({'errors': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
class PasswordResetViewSet(viewsets.ViewSet):
def post(self, request):
if User.objects.filter(email=request.data.get('email')).exists():
opts = {
'use_https': request.is_secure(),
'token_generator': default_token_generator,
'from_email': None,
'email_template_name': 'registration/password_reset_email.html',
'subject_template_name': 'registration/password_reset_subject.txt',
'request': request,
'html_email_template_name': None,
'extra_email_context': None,
}
form = PasswordResetForm(request.data)
if form.is_valid():
form.save(**opts)
return Response({'ok': 'E-mail enviado com sucesso!'}, status=status.HTTP_200_OK)
return Response({'error': 'E-mail inválido!'}, status=status.HTTP_400_BAD_REQUEST)
```
#### File: v1/customer/__init__.py
```python
import base64
from itsdangerous import BadSignature, SignatureExpired, TimedJSONWebSignatureSerializer
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.authentication import get_authorization_header, BasicAuthentication
from django.urls import resolve
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.models import User, AnonymousUser
class TokenAuthenticate(BasicAuthentication):
"""
Custom auth method to authenticate the user trought the token
"""
ALLOWED_PATHS = [
'login',
]
def allowed_path(self, request):
"""
If the anonymous user is tryng to access a valid url
"""
return resolve(request.path).url_name in self.ALLOWED_PATHS
def verify_token(self, token):
try:
return TimedJSONWebSignatureSerializer(settings.SECRET_KEY, expires_in=settings.EXPIRES_IN).loads(token)
except (BadSignature, SignatureExpired):
raise AuthenticationFailed('Bad credentials.')
def authenticate(self, request, simple=False):
auth = get_authorization_header(request).split()
if not auth and self.allowed_path(request):
return self.authenticate_credentials(anonymous=True)
if not auth or auth[0].lower() != b'basic':
raise AuthenticationFailed('Bad Credentials.')
try:
auth_parts = base64.b64decode(auth[1]).decode('utf-8').partition(':')
except (IndexError, TypeError, base64.binascii.Error):
raise AuthenticationFailed('Bad Credentials.')
token, password = auth_parts[0], auth_parts[2]
payload = self.verify_token(token)
return self.authenticate_credentials(payload, password, request=request)
def authenticate_credentials(self, payload=None, password=None, anonymous=False, request=None):
"""
Authenticate the userid and password against username and password.
"""
if anonymous:
return (AnonymousUser(), None)
credentials = {
'username': payload['username'],
'password': payload['password']
}
user = authenticate(**credentials)
if (user is None) or (user and not user.is_active):
raise AuthenticationFailed('Bad Credentials.')
return (user, None)
class IsUserAuthenticated(BasePermission):
def has_permission(self, request, view):
if isinstance(request.user, User) or isinstance(request.user, AnonymousUser):
return True
return False
```
#### File: gojob/core/utils.py
```python
class CPF(object):
INVALID_CPFS = ['00000000000', '11111111111', '22222222222', '33333333333', '44444444444',
'55555555555', '66666666666', '77777777777', '88888888888', '99999999999']
def __init__(self, cpf):
self.cpf = cpf
def validate_size(self):
cpf = self.cleaning()
if bool(cpf and (len(cpf) > 11 or len(cpf) < 11)):
return False
return True
def validate(self):
cpf = self.cleaning()
if self.validate_size() and cpf not in self.INVALID_CPFS:
digit_1 = 0
digit_2 = 0
i = 0
while i < 10:
digit_1 = (digit_1 + (int(cpf[i]) * (11-i-1))) % 11 if i < 9 else digit_1
digit_2 = (digit_2 + (int(cpf[i]) * (11-i))) % 11
i += 1
return ((int(cpf[9]) == (11 - digit_1 if digit_1 > 1 else 0)) and
(int(cpf[10]) == (11 - digit_2 if digit_2 > 1 else 0)))
return False
def cleaning(self):
return self.cpf.replace('.', '').replace('-', '') if self.cpf else ''
def format(self):
return '%s.%s.%s-%s' % (self.cpf[0:3], self.cpf[3:6], self.cpf[6:9], self.cpf[9:11]) if self.cpf else ''
class ZipCode(object):
def __init__(self, zip_code):
"""
Class to interact with zip_code brazilian numbers
"""
self.zip_code = zip_code
def format(self):
return '%s-%s' % (self.zip_code[0:5], self.zip_code[5:8]) if self.zip_code else ''
def cleaning(self):
return self.zip_code.replace('-', '') if self.zip_code else ''
class Phone(object):
def __init__(self, phone):
self.phone = phone
def cleaning(self):
if self.phone:
phone = self.phone.replace('(', '')
phone = phone.replace(')', '')
phone = phone.replace('-', '')
phone = phone.replace(' ', '')
phone = phone.replace('.', '')
phone = phone.replace('+', '')
return phone
return ''
def format(self):
if self.phone:
if len(self.phone) == 8:
return '%s-%s' % (self.phone[0:4], self.phone[4:8])
if len(self.phone) == 9:
return '%s%s-%s' % (self.phone[0:1], self.phone[1:5], self.phone[5:9])
if len(self.phone) == 10:
return '(%s) %s-%s' % (self.phone[0:2], self.phone[2:6], self.phone[6:10])
if len(self.phone) == 11:
return '(%s) %s%s-%s' % (self.phone[0:2], self.phone[2:3], self.phone[3:7], self.phone[7:11])
if len(self.phone) == 13:
return '+%s (%s) %s %s-%s' % (
self.phone[0:2],
self.phone[2:4],
self.phone[4:5],
self.phone[5:9],
self.phone[9:13]
)
return ''
class CNPJ(object):
def __init__(self, cnpj):
"""
Class to interact with cnpj brazilian numbers
"""
self.cnpj = cnpj
def calculating_digit(self, result):
result = result % 11
if result < 2:
digit = 0
else:
digit = 11 - result
return str(digit)
def calculating_first_digit(self):
one_validation_list = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
result = 0
pos = 0
for number in self.cnpj:
try:
one_validation_list[pos]
except IndexError:
break
result += int(number) * int(one_validation_list[pos])
pos += 1
return self.calculating_digit(result)
def calculating_second_digit(self):
two_validation_list = [6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
result = 0
pos = 0
for number in self.cnpj:
try:
two_validation_list[pos]
except IndexError:
break
result += int(number) * int(two_validation_list[pos])
pos += 1
return self.calculating_digit(result)
def validate(self):
"""
Method to validate brazilian cnpjs
"""
self.cnpj = self.cleaning()
if len(self.cnpj) != 14:
return False
checkers = self.cnpj[-2:]
digit_one = self.calculating_first_digit()
digit_two = self.calculating_second_digit()
return bool(checkers == digit_one + digit_two)
def cleaning(self):
if self.cnpj:
return self.cnpj.replace('-', '').replace('.', '').replace('/', '')
return ''
def format(self):
"""
Method to format cnpj numbers.
"""
if self.cnpj:
return '%s.%s.%s/%s-%s' % (self.cnpj[0:2], self.cnpj[2:5], self.cnpj[5:8], self.cnpj[8:12],
self.cnpj[12:14])
return ''
```
#### File: gojob/core/views_base.py
```python
from django.views import View
from django.contrib import messages
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.views.generic.edit import CreateView, UpdateView, DeleteView
class BaseView(PermissionRequiredMixin, SuccessMessageMixin, View):
raise_exception = True
permission_required = []
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class BaseListView(BaseView, ListView):
paginate_by = 10
class BaseCreateView(BaseView, CreateView):
pass
class BaseUpdateView(BaseView, UpdateView):
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
class BaseDeleteView(BaseView, DeleteView):
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
def delete(self, request, *args, **kwargs):
messages.success(request, self.success_message)
return super(BaseDeleteView, self).delete(request, *args, **kwargs)
class BaseDetailView(BaseView, DetailView):
slug_field = 'uuid'
slug_url_kwarg = 'uuid'
``` |
{
"source": "JhonJYJ/Marvel",
"score": 4
} |
#### File: arcade/examples/decorator_drawing_example.py
```python
import arcade
import random
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
def draw_background(window):
"""
This function draws the background. Specifically, the sky and ground.
"""
# Draw the sky in the top two-thirds
arcade.draw_rectangle_filled(SCREEN_WIDTH / 2, SCREEN_HEIGHT * 2 / 3,
SCREEN_WIDTH - 1, SCREEN_HEIGHT * 2 / 3,
arcade.color.SKY_BLUE)
# Draw the ground in the bottom third
arcade.draw_rectangle_filled(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 6,
SCREEN_WIDTH - 1, SCREEN_HEIGHT / 3,
arcade.color.DARK_SPRING_GREEN)
def draw_bird(x, y):
"""
Draw a bird using a couple arcs.
"""
arcade.draw_arc_outline(x, y, 20, 20, arcade.color.BLACK, 0, 90)
arcade.draw_arc_outline(x + 40, y, 20, 20, arcade.color.BLACK, 90, 180)
def draw_pine_tree(center_x, center_y):
"""
This function draws a pine tree at the specified location.
Args:
:center_x: x position of the tree center.
:center_y: y position of the tree trunk center.
"""
# Draw the trunk center_x
arcade.draw_rectangle_filled(center_x, center_y, 20, 40, arcade.color.DARK_BROWN)
tree_bottom_y = center_y + 20
# Draw the triangle on top of the trunk
point_list = ((center_x - 40, tree_bottom_y),
(center_x, tree_bottom_y + 100),
(center_x + 40, tree_bottom_y))
arcade.draw_polygon_filled(point_list, arcade.color.DARK_GREEN)
def draw_birds(window): # Loop to draw ten birds in random locations.
for bird in window.bird_list:
# Draw the bird.
draw_bird(bird[0], bird[1])
def draw_trees(window):
# Draw the top row of trees
for x in range(45, SCREEN_WIDTH, 90):
draw_pine_tree(x, SCREEN_HEIGHT / 3)
# Draw the bottom row of trees
for x in range(65, SCREEN_WIDTH, 90):
draw_pine_tree(x, (SCREEN_HEIGHT / 3) - 120)
@arcade.decorator.setup
def create_birds(window):
"""
This, and any function with the arcade.decorator.init decorator,
is run automatically on start-up.
"""
window.bird_list = []
for bird_count in range(10):
x = random.randrange(SCREEN_WIDTH)
y = random.randrange(SCREEN_HEIGHT / 2, SCREEN_HEIGHT)
window.bird_list.append([x, y])
@arcade.decorator.update
def animate_birds(window, delta_time):
"""
This is run every 1/60 of a second or so. Do not draw anything
in this function.
"""
change_y = 0.3
for bird in window.bird_list:
bird[0] += change_y
if bird[0] > SCREEN_WIDTH + 20:
bird[0] = -20
@arcade.decorator.draw
def draw(window):
"""
This is called everytime we need to update our screen. About 60
times per second.
Just draw things in this function, don't update where they are.
"""
# Call our drawing functions.
draw_background(window)
draw_birds(window)
draw_trees(window)
if __name__ == "__main__":
arcade.decorator.run(SCREEN_WIDTH, SCREEN_HEIGHT, title="Drawing With Decorators")
```
#### File: arcade/examples/stress_test_draw_simple.py
```python
import random
import arcade
import os
import timeit
# --- Constants ---
SPRITE_SCALING_COIN = 0.09
COIN_COUNT = 50000
SCREEN_WIDTH = 1200
SCREEN_HEIGHT = 700
class MyGame(arcade.Window):
""" Our custom Window Class"""
def __init__(self):
""" Initializer """
# Call the parent class initializer
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, "Sprite Example")
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Variables that will hold sprite lists
self.all_sprites_list = None
self.coin_list = None
# Set up the player info
self.player_sprite = None
self.score = 0
self.processing_time = 0
self.draw_time = 0
# Don't show the mouse cursor
self.set_mouse_visible(False)
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.all_sprites_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
# Create the coins
for i in range(COIN_COUNT):
# Create the coin instance
# Coin image from kenney.nl
coin = arcade.Sprite("images/coin_01.png", SPRITE_SCALING_COIN)
# Position the coin
coin.center_x = random.randrange(SCREEN_WIDTH)
coin.center_y = random.randrange(SCREEN_HEIGHT)
# Add the coin to the lists
self.all_sprites_list.append(coin)
self.coin_list.append(coin)
def on_draw(self):
""" Draw everything """
# Start timing how long this takes
draw_start_time = timeit.default_timer()
arcade.start_render()
self.all_sprites_list.draw()
# Display timings
output = f"Processing time: {self.processing_time:.3f}"
arcade.draw_text(output, 20, SCREEN_HEIGHT - 20, arcade.color.BLACK, 16)
output = f"Drawing time: {self.draw_time:.3f}"
arcade.draw_text(output, 20, SCREEN_HEIGHT - 40, arcade.color.BLACK, 16)
self.draw_time = timeit.default_timer() - draw_start_time
def main():
""" Main method """
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
```
#### File: Marvel/doc/generate_example_thumbnails.py
```python
import os, sys
def main():
if not os.path.exists('examples/thumbs'):
os.makedirs('examples/thumbs')
generate_thumbnails()
else:
print('Thumbnails already exist, skipping generation')
def generate_thumbnails():
print('Generating thumbnails')
if sys.platform == 'linux':
command = 'mogrify'
else:
command = 'magick mogrify'
os.chdir('examples')
os.system(command + ' -resize 200x158 -extent 200x158 -background transparent -path thumbs *.png')
# Do we want animated thumbnails?
# os.system(command + ' -resize 200x158 -extent 200x158 -background transparent -path thumbs *.gif')
if __name__ == '__main__':
main()
```
#### File: Marvel/tests/__init__.py
```python
from doctest import DocTestSuite
from unittest import TestSuite
from unittest import TextTestRunner
import arcade
def load_tests(loader=None, tests=None, pattern=None):
suite = TestSuite()
suite.addTests(DocTestSuite('arcade.draw_commands'))
suite.addTests(DocTestSuite('arcade.buffered_draw_commands'))
suite.addTests(DocTestSuite('arcade.window_commands'))
suite.addTests(DocTestSuite('arcade.geometry'))
suite.addTests(DocTestSuite('arcade.sprite'))
suite.addTests(DocTestSuite('arcade.sprite_list'))
suite.addTests(DocTestSuite('arcade.application'))
suite.addTests(DocTestSuite('arcade.sound'))
suite.addTests(DocTestSuite('arcade.physics_engines'))
suite.addTests(DocTestSuite('arcade.decorator_support'))
return suite
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(load_tests())
``` |
{
"source": "jhonktorresp/snake-machine-learning",
"score": 3
} |
#### File: snake-machine-learning/V1/snake_visual.py
```python
import pygame,time,math
from snake_logic import snake
from pygame.locals import *
import numpy
import pandas
from joblib import dump, load
#import csv
limit_barrier = 30
width = 500
height = 500
pixel_w = width/limit_barrier
pixel_h = height/limit_barrier
white=(255,255,255)
black=(0,0,0)
green=(0,255,0)
def erase_board(display):
pygame.draw.rect(display,black,(pixel_w,pixel_h,pixel_w*(limit_barrier-2),pixel_h*(limit_barrier-2)))
def draw_path_board(display,explored):
for x in range(0,limit_barrier):
for y in range(0,limit_barrier):
if(explored[x][y]):
pygame.draw.rect(display,green,(x*pixel_w,y*pixel_h,pixel_w,pixel_h))
def draw_pixel(display,x,y,explored):
erase_board(display)
draw_path_board(display,explored)
pygame.draw.rect(display,white,(x*pixel_w,y*pixel_h,pixel_w,pixel_h))
def init_snake():
pygame.init()
display=pygame.display.set_mode((width,height),0,32)
display.fill(black)
#Draw barrier
for i in range(limit_barrier):
pygame.draw.rect(display,white,(i*pixel_w,0,pixel_w,pixel_h))
pygame.draw.rect(display,white,(0,i*pixel_h,pixel_w,pixel_h))
pygame.draw.rect(display,white,((limit_barrier-1)*pixel_w,i*pixel_h,pixel_w,pixel_h))
pygame.draw.rect(display,white,(i*pixel_w,(limit_barrier-1)*pixel_h,pixel_w,pixel_h))
return display
def init_game(random):
if(not random):
display=init_snake()
xposition = math.floor(limit_barrier/2)
yposition = xposition
#xposition = 1
#yposition = 10
velocity_p = 40000
velocity = 0.1/velocity_p
eat_points = 0
#survive_points = eat_points/1000
survive_points = 0
explore_points = 0
penalty_explore = 0
#penalty_end_game = survive_points * limit_barrier * limit_barrier * 2
penalty_end_game = -1
if(random):
predictor=None
else:
predictor= load('snake_model.joblib')
mySnake = snake(xposition,yposition,limit_barrier,eat_points,explore_points,survive_points,penalty_explore,penalty_end_game,[],random,predictor)
#print(mySnake.score)
while mySnake.life==1:
#time.sleep(velocity)
mySnake.snake_random_movement()
#mySnake.where_is()
if(not random):
erase_board(display)
draw_pixel(display,mySnake.head[0],mySnake.head[1],mySnake.already_explored)
#Events
for event in pygame.event.get():
if event.type==QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#print(mySnake.score)
return mySnake.historial[-3:]
#pd = pandas.DataFrame(mySnake.historial)
#pd.to_csv("snake_data"+str(x)+".csv")
def generate_random_test():
n_Test = 100000
historical_data = []
for x in range(n_Test):
historical_data=historical_data+init_game(True)
print(x)
pd = pandas.DataFrame(historical_data)
pd.to_csv("snake_data.csv")
#with open("snake_data.csv",'w') as f:
# for sublist in historical_data:
# for item in sublist:
# f.write(str(item) + ',')
# f.write('\n')
def test_snake_machine():
for x in range(100):
a = init_game(False)
import sys
def main():
if(sys.argv[1]=="1"):
generate_random_test()
else:
test_snake_machine()
main()
``` |
{
"source": "JhonLiuljs/tensorflow_demo",
"score": 3
} |
#### File: tensorflow_demo/1.Cnn_Captcha/main.py
```python
from gen_captcha import gen_captcha_text_and_image
from constants import IMAGE_HEIGHT
from constants import IMAGE_WIDTH
from constants import MAX_CAPTCHA
from constants import CHAR_SET_LEN
import numpy as np
import tensorflow as tf
import sys
import matplotlib.pyplot as plt
"""
cnn在图像大小是2的倍数时性能最高, 如果你用的图像大小不是2的倍数,可以在图像边缘补无用像素。
np.pad(image【,((2,3),(2,2)), 'constant', constant_values=(255,)) # 在图像上补2行,下补3行,左补2行,右补2行
"""
##################################
# 提前定义变量空间 申请占位符 按照图片
X = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT * IMAGE_WIDTH])
Y = tf.placeholder(tf.float32, [None, MAX_CAPTCHA * CHAR_SET_LEN])
keep_prob = tf.placeholder(tf.float32) # dropout
# 把彩色图像转为灰度图像(色彩对识别验证码没有什么用)
def convert2gray(img):
if len(img.shape) > 2:
gray = np.mean(img, -1)
# 上面的转法较快,正规转法如下
# r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
# gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
"""
#向量(大小MAX_CAPTCHA*CHAR_SET_LEN)用0,1编码 每63个编码一个字符,这样顺利有,字符也有
vec = text2vec("F5Sd")
text = vec2text(vec)
print(text) # F5Sd
vec = text2vec("SFd5")
text = vec2text(vec)
print(text) # SFd5
"""
# 验证码字符转换为长向量
def text2vec(text):
text_len = len(text)
if text_len > MAX_CAPTCHA:
raise ValueError('验证码最长4个字符')
vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
def char2pos(c):
if c == '_':
k = 62
return k
k = ord(c) - 48
if k > 9:
k = ord(c) - 55
if k > 35:
k = ord(c) - 61
if k > 61:
raise ValueError('No Map')
return k
for i, c in enumerate(text):
idx = i * CHAR_SET_LEN + char2pos(c)
vector[idx] = 1
return vector
# 向量转回文本
def vec2text(vec):
char_pos = vec.nonzero()[0]
text = []
for i, c in enumerate(char_pos):
char_at_pos = i # c/63
char_idx = c % CHAR_SET_LEN
if char_idx < 10:
char_code = char_idx + ord('0')
elif char_idx < 36:
char_code = char_idx - 10 + ord('A')
elif char_idx < 62:
char_code = char_idx - 36 + ord('a')
elif char_idx == 62:
char_code = ord('_')
else:
raise ValueError('error')
text.append(chr(char_code))
return "".join(text)
# 获得1组验证码数据,生成一个训练batch
def get_next_batch(batch_size=128):
batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WIDTH])
batch_y = np.zeros([batch_size, MAX_CAPTCHA * CHAR_SET_LEN])
# 有时生成图像大小不是(60, 160, 3)
def wrap_gen_captcha_text_and_image():
""" 获取一张图,判断其是否符合(60,160,3)的规格"""
while True:
text, image = gen_captcha_text_and_image()
if image.shape == (60, 160, 3): # 此部分应该与开头部分图片宽高吻合
return text, image
for i in range(batch_size):
text, image = wrap_gen_captcha_text_and_image()
image = convert2gray(image)
# 将图片数组一维化 同时将文本也对应在两个二维组的同一行
batch_x[i, :] = image.flatten() / 255 # (image.flatten()-128)/128 mean为0
batch_y[i, :] = text2vec(text)
# 返回该训练批次
return batch_x, batch_y
# 卷积层 附relu max_pool drop操作
def conn_layer(w_alpha=0.01, b_alpha=0.1, _keep_prob=0.7, input=None, last_size=None, cur_size=None):
# 从正太分布输出随机值
w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, last_size, cur_size]))
b_c1 = tf.Variable(b_alpha * tf.random_normal([cur_size]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(input, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, keep_prob=_keep_prob)
return conv1
# 对卷积层到全链接层的数据进行变换
def _get_conn_last_size(input):
shape = input.get_shape().as_list()
dim = 1
for d in shape[1:]:
dim *= d
input = tf.reshape(input, [-1, dim])
return input, dim
# 全链接层
def _fc_layer(w_alpha=0.01, b_alpha=0.1, input=None, last_size=None, cur_size=None):
w_d = tf.Variable(w_alpha * tf.random_normal([last_size, cur_size]))
b_d = tf.Variable(b_alpha * tf.random_normal([cur_size]))
fc = tf.nn.bias_add(tf.matmul(input, w_d), b_d)
return fc
# 定义CNN 构建前向传播网络
def crack_captcha_cnn():
# 将占位符 转换为 按照图片给的新样式
x = tf.reshape(X, shape=[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])
conv1 = conn_layer(input=x, last_size=1, cur_size=32)
conv2 = conn_layer(input=conv1, last_size=32, cur_size=64)
conn3 = conn_layer(input=conv2, last_size=64, cur_size=64)
input, dim = _get_conn_last_size(conn3)
fc_layer1 = _fc_layer(input=input, last_size=dim, cur_size=1024)
fc_layer1 = tf.nn.relu(fc_layer1)
fc_layer1 = tf.nn.dropout(fc_layer1, keep_prob)
fc_out = _fc_layer(input=fc_layer1, last_size=1024, cur_size=MAX_CAPTCHA * CHAR_SET_LEN)
return fc_out
# 反向传播
def back_propagation():
output = crack_captcha_cnn()
# 学习率
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
# 最后一层用来分类的softmax和sigmoid有什么不同?
# optimizer 为了加快训练 learning_rate应该开始大,然后慢慢衰
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
predict = tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])
max_idx_p = tf.arg_max(predict, 2)
max_idx_l = tf.arg_max(tf.reshape(Y, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
accuracy = tf.reduce_mean(tf.cast(tf.equal(max_idx_p, max_idx_l), tf.float32))
return loss, optimizer, accuracy
# 初次运行训练模型
def train_first():
loss, optimizer, accuracy = back_propagation()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.all_variables())
step = 0
while 1:
batch_x, batch_y = get_next_batch(64)
_, loss_ = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
# 每100 step计算一次准确率
if step % 100 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
print(step, acc, loss_)
if acc > 0.80: # 准确率大于0.80保存模型 可自行调整
saver.save(sess, './models/crack_capcha.model', global_step=step)
break
step += 1
# 加载现有模型 继续进行训练
def train_continue(step):
loss, optimizer, accuracy = back_propagation()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.Session() as sess:
if step is None:
print(tf.train.latest_checkpoint('./models'))
saver.restore(sess, tf.train.latest_checkpoint('./models'))
else:
path = './models/crack_capcha.model-' + str(step)
saver.restore(sess, path)
# 36300 36300 0.9325 0.0147698
while 1:
batch_x, batch_y = get_next_batch(100)
_, loss_ = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
if step % 100 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
print(step, acc, loss_)
if acc >= 0.925:
saver.save(sess, './models/crack_capcha.model', global_step=step)
if acc >= 0.95:
saver.save(sess, './models/crack_capcha.model', global_step=step)
break
step += 1
# 测试训练模型
def crack_captcha(captcha_image, step):
output = crack_captcha_cnn()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.Session() as sess:
if step is None:
print(tf.train.latest_checkpoint('./models'))
saver.restore(sess, tf.train.latest_checkpoint('./models'))
else:
path = './models/crack_capcha.model-' + str(step)
saver.restore(sess, path)
predict = tf.argmax(tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
text_list = sess.run(predict, feed_dict={X: [captcha_image], keep_prob: 1})
texts = text_list[0].tolist()
vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
i = 0
for n in texts:
vector[i * CHAR_SET_LEN + n] = 1
i += 1
return vec2text(vector)
'''
# 定义CNN
def crack_captcha_cnn(w_alpha=0.01, b_alpha=0.1):
# 将占位符 转换为 按照图片给的新样式
x = tf.reshape(X, shape=[-1, IMAGE_HEIGHT, IMAGE_WIDTH, 1])
# w_c1_alpha = np.sqrt(2.0/(IMAGE_HEIGHT*IMAGE_WIDTH)) #
# w_c2_alpha = np.sqrt(2.0/(3*3*32))
# w_c3_alpha = np.sqrt(2.0/(3*3*64))
# w_d1_alpha = np.sqrt(2.0/(8*32*64))
# out_alpha = np.sqrt(2.0/1024)
# 3 conv layer
w_c1 = tf.Variable(w_alpha * tf.random_normal([3, 3, 1, 32])) # 从正太分布输出随机值
b_c1 = tf.Variable(b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, keep_prob)
w_c2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))
b_c2 = tf.Variable(b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, keep_prob)
w_c3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 64]))
b_c3 = tf.Variable(b_alpha * tf.random_normal([64]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
# Fully connected layer
w_d = tf.Variable(w_alpha * tf.random_normal([8 * 20 * 64, 1024]))
b_d = tf.Variable(b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(w_alpha * tf.random_normal([1024, MAX_CAPTCHA * CHAR_SET_LEN]))
b_out = tf.Variable(b_alpha * tf.random_normal([MAX_CAPTCHA * CHAR_SET_LEN]))
out = tf.add(tf.matmul(dense, w_out), b_out)
# out = tf.nn.softmax(out)
return out
# 训练
def train_crack_captcha_cnn():
output = crack_captcha_cnn()
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, Y))
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output, labels=Y))
# 最后一层用来分类的softmax和sigmoid有什么不同?
# optimizer 为了加快训练 learning_rate应该开始大,然后慢慢衰
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
predict = tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])
max_idx_p = tf.argmax(predict, 2)
max_idx_l = tf.argmax(tf.reshape(Y, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
step = 0
while True:
batch_x, batch_y = get_next_batch(64)
_, loss_ = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
print(step, loss_)
# 每100 step计算一次准确率
if step % 100 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
print(step, acc)
# 如果准确率大于50%,保存模型,完成训练
if acc >= 0.95:
saver.save(sess, "./crack_capcha.model", global_step=step)
break
step += 1
'''
if __name__ == '__main__':
print("验证码文本最长字符数", MAX_CAPTCHA) # 验证码最长4字符; 我全部固定为4,可以不固定. 如果验证码长度小于4,用'_'补齐
# 训练和测试开关
train = True
if train:
# train_continue(36300)
train_first()
else:
m_text, m_image = gen_captcha_text_and_image()
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, m_text, ha='center', va='center', transform=ax.transAxes)
plt.imshow(m_image)
plt.show()
image = convert2gray(m_image) # 生成一张新图把彩色图像转为灰度图像
image = image.flatten() / 255 # 将图片一维化
predict_text = crack_captcha(image, None) # 导入模型识别
print("正确: {} 预测: {}".format(m_text, predict_text))
sys.exit()
``` |
{
"source": "jhonmac666/agent-operator",
"score": 3
} |
#### File: agent-operator/hack/customize_crds.py
```python
import yaml
from os import listdir
from os.path import isfile, join
crd_directory = "./config/crd/bases/"
def recursively_delete(d, key_of_interest):
for key, value in list(d.items()):
if key in key_of_interest:
print("found and deleted: " + key)
del d[key]
if type(value) is dict:
recursively_delete(value, key_of_interest)
crds = [f for f in listdir(crd_directory) if isfile(join(crd_directory, f))]
for yaml_file in crds:
with open(crd_directory + yaml_file, 'r') as crd:
search_string = {"x-kubernetes-int-or-string", "x-kubernetes-list-type", "anyOf", "pattern"}
file = yaml.full_load(crd)
recursively_delete(file, search_string)
# del file['status']
with open(crd_directory + yaml_file, 'w') as new_crd:
yaml.dump(file, new_crd)
``` |
{
"source": "JhonnathaAndrade/thermosolver",
"score": 3
} |
#### File: thermosolver/thermosolver/database.py
```python
class BdD(object):
"""docstring for BdD"""
def __init__(self):
super(BdD, self).__init__()
def get_dados(componente):
import csv
from collections import namedtuple
# Name;Formula;MW;Tc;Pc;Vc;Rho_c;Zc;w
# alocando espaço para as listas
import os
rel_path = "DadosCriticos.csv"
folder = os.path.join(os.path.dirname(__file__), 'critical_data')
file = os.path.join(folder, 'Yaws Collection.csv')
Especie = namedtuple('Componente','CASRN Name Tc Pc Vc w')
saida = None
with open(file, 'r') as csvfile:
csvreader = csv.reader(csvfile,delimiter='\t',quoting=csv.QUOTE_NONNUMERIC)
headers = next(csvreader)
a = []
for row in csvreader:
if componente in (row[0],row[1]):
saida = Especie(*row)
print
break
if not saida:
raise ValueError('Especie não encontrada no banco de dados')
return saida
if __name__ == '__main__':
_,_,Tc,Pc,Vc,w = BdD.get_dados('hexamethyldisilazane')
print(Tc)
``` |
{
"source": "jhonnattan123/fastapi_crud_example",
"score": 3
} |
#### File: api/actions/storage.py
```python
import uuid
from fastapi import HTTPException
from starlette.requests import Request
from api.models.usuario import Usuario
from api.models.respuesta_listar import Respuesta_Listar
def add(item, request: Request):
""" Agrega un nuevo item a la lista.
Si el modelo posee el metodo u_key se usara su respuesta
como llave unica.
:param item: Item a agregar
:param request: Request
"""
try:
item_id = uuid.uuid4()
setattr(item, "ID", item_id)
nombre_modelo = item.__class__.__name__
if nombre_modelo not in request.app.memory:
request.app.memory[nombre_modelo] = {}
if nombre_modelo not in request.app.key_memory:
request.app.key_memory[nombre_modelo] = {}
if hasattr(item, "u_key"):
llave = item.u_key()
valor_llave = getattr(item, llave)
if nombre_modelo in request.app.key_memory and valor_llave in request.app.key_memory[nombre_modelo]:
raise HTTPException(
status_code=404,
detail="Llave {} esta duplicada".format(valor_llave)
)
request.app.key_memory[nombre_modelo][valor_llave] = str(item_id)
request.app.memory[nombre_modelo][str(item_id)] = item
return str(item_id)
except Exception as e:
if type(e) != HTTPException:
raise Exception("Error al agregar el item: {}".format(str(e)))
raise e
def update( item_id, item, request: Request ):
""" Actualiza un item en la lista
:param item_id: ID del item a actualizar
:param item: Item a actualizar
:param request: Request
"""
try:
setattr(item, "ID", item_id)
nombre_modelo = item.__class__.__name__
item_original = get_by_id(item.__class__,item_id, request)
if not item_original:
raise HTTPException(status_code=404, detail="Item not found")
if hasattr(item, "u_key"):
llave = item.u_key()
IN_llave = getattr(item, llave)
llave_original = getattr(item_original, llave)
if IN_llave != llave_original and IN_llave not in request.app.key_memory[nombre_modelo]:
del request.app.key_memory[nombre_modelo][str(llave_original)]
request.app.key_memory[nombre_modelo][IN_llave] = str(item_id)
print(item)
request.app.memory[nombre_modelo][str(item_id)] = item
return str(item_id)
except Exception as e:
if type(e) != HTTPException:
raise Exception("Error al actualizar el item: {}".format(str(e)))
raise e
def get_all(modelo, pagina=1, cantidad=10, request=Request, order_by="ID", sort="asc"):
""" Retorna todos los items de la lista
:param modelo: Clase del modelo
:param pagina: Pagina a retornar
:param cantidad: Cantidad de items a retornar
:param request: Request
:param order_by: Campo por el cual se ordenara
:param sort: Orden ascendente o descendente
"""
try:
nombre_modelo = modelo.__name__
if nombre_modelo not in request.app.key_memory:
return Respuesta_Listar().__dict__
items = []
for item_id in request.app.memory[nombre_modelo]:
items.append(request.app.memory[nombre_modelo][item_id])
if not order_by:
order_by = "ID"
if order_by in list(Usuario.__fields__.keys()) and sort == "desc":
items.sort(key=lambda x: x[order_by], reverse=True)
if not pagina:
pagina = 1
if not cantidad:
cantidad = 10
return {
"data": items[(pagina-1)*cantidad:pagina*cantidad],
"total_items": len(items),
"total_paginas": len(items)//cantidad + 1
}
except Exception as e:
if type(e) != HTTPException:
raise Exception(f"Error al obtener todos los items:",e)
raise e
def get_by_id( modelo, item_id, request: Request ):
""" Retorna un item por su ID
:param item_id: ID del item a retornar
:param request: Request
"""
try:
nombre_modelo = modelo.__name__
if nombre_modelo not in request.app.key_memory:
raise HTTPException(
status_code=404,
detail="Item not found"
)
if str(item_id) not in request.app.memory[nombre_modelo]:
raise HTTPException(
status_code=404,
detail="Item not found"
)
item = request.app.memory[nombre_modelo][str(item_id)]
return item
except Exception as e:
if type(e) != HTTPException:
raise Exception(f"Error al obtener el item: {str(e)}")
raise e
def delete( modelo, item_id, request:Request ):
""" Elimina un item de la lista
:param item_id: ID del item a eliminar
:param request: Request
"""
try:
nombre_modelo = modelo.__name__
if nombre_modelo not in request.app.key_memory:
raise HTTPException(
status_code=404,
detail="Item not found"
)
if str(item_id) not in request.app.memory[nombre_modelo]:
raise HTTPException(
status_code=404,
detail="Item not found"
)
item = request.app.memory[nombre_modelo][str(item_id)]
if hasattr(item, "u_key"):
llave = item.u_key()
valor_llave = getattr(item, llave)
del request.app.key_memory[nombre_modelo][valor_llave]
del request.app.memory[nombre_modelo][str(item_id)]
except Exception as e:
if type(e) != HTTPException:
raise Exception(f"Error al eliminar el item: {str(e)}")
raise e
```
#### File: api/models/usuario.py
```python
from uuid import UUID
from datetime import date
from typing import Optional, Union
from pydantic import BaseModel, Field
class Usuario(BaseModel):
ID: Optional[UUID] = Field(
title="ID del usuario",
description="ID del usuario",
example="3fa85f64-5717-4562-b3fc-2c963f66afa6"
)
nombre: str = Field(
...,
title="Nombre del usuario",
description="Nombre del usuario",
min_length=3,
max_length=50,
example="Juan"
)
apellido: str = Field(
...,
title="Apellido del usuario",
description="Apellido del usuario",
min_length=3,
max_length=50,
example="Perez"
)
email: str = Field(
...,
title="Email del usuario",
description="Email del usuario",
min_length=3,
max_length=50,
example="<EMAIL>"
)
fecha_nacimiento: Union[str,date] = Field(
...,
title="Fecha de nacimiento del usuario",
description="Fecha de nacimiento del usuario",
example="2020-01-01"
)
def u_key(self):
""" Entrega la llave unica para el modelo Usuario
"""
return "email"
```
#### File: api/services/usuarios_services.py
```python
import datetime
from uuid import UUID
from api.actions import storage
from fastapi import HTTPException
from api.models.usuario import Usuario
from starlette.requests import Request
from api.dependencies import validar_email, validar_formato_fecha,validar_edad
FORMATO_FECHA = "%Y-%m-%d"
EDAD_MINIMA = 18
EDAD_MAXIMA = 100
class Usuarios_Services:
""" Sección de servicios para el manejo de la logica de negocio
Attributes:
FORMATO_FECHA (str): Formato de fecha para validar
EDAD_MINIMA (int): Edad minima para validar
EDAD_MAXIMA (int): Edad maxima para validar
"""
def agregar_usuario(self, usuario: Usuario, request: Request) -> dict:
""" Agrega un usuario a la base de datos.
:param usuario: Usuario a agregar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
usuario_id = storage.add(usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al agregar usuario: {}".format(str(e)))
raise e
def editar_usuario(self, usuario_id: UUID, usuario: Usuario, request: Request) -> dict:
""" Edita un usuario de la base de datos.
:param usuario_id: ID del usuario a editar
:param usuario: Usuario a editar
:param request: Request de FastAPI
"""
try:
if not validar_email(getattr(usuario, "email")):
raise HTTPException(
status_code=400,
detail="El email no es válido"
)
fecha_nacimiento = usuario.fecha_nacimiento
if not validar_formato_fecha(fecha_nacimiento, FORMATO_FECHA):
raise HTTPException(
status_code=400,
detail="El formato de la fecha de nacimiento no es válida"
)
usuario.fecha_nacimiento = datetime.datetime.strptime(fecha_nacimiento, FORMATO_FECHA)
if not validar_edad(usuario.fecha_nacimiento, EDAD_MINIMA, EDAD_MAXIMA):
raise HTTPException(
status_code=400,
detail="La edad no es válida"
)
storage.update(usuario_id, usuario, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al editar usuario: {}".format(str(e)))
raise e
def eliminar_usuario(self, usuario_id: UUID, request: Request) -> dict:
""" Elimina un usuario de la base de datos.
:param usuario_id: ID del usuario a eliminar
:param request: Request de FastAPI
"""
try:
storage.delete(Usuario, usuario_id, request)
return { "ID": usuario_id }
except Exception as e:
print("Error al eliminar usuario: {}".format(str(e)))
raise e
def listar_usuarios(self, pagina: int, cantidad: int, order_by: str, sort: str, request: Request)-> dict:
""" Obtiene una lista de usuarios de la base de datos.
:param pagina: Pagina a retornar
:param cantidad: Cantidad de usuarios a retornar
:param order_by: Campo por el cual se ordenará la lista
:param sort: Orden ascendente o descendente
:param request: Request de FastAPI
"""
try:
return storage.get_all(Usuario, pagina, cantidad, request, order_by, sort)
except Exception as e:
print("Error al listar usuarios: {}".format(str(e)))
raise e
def obtener_usuario(self, usuario_id: UUID, request: Request) -> Usuario:
""" Retorna un usuario por su ID
:param usuario_id: ID del usuario a consultar
:param request: Request de FastAPI
"""
try:
usuario = storage.get_by_id(Usuario, usuario_id, request)
return usuario
except Exception as e:
print("Error al obtener usuario: {}".format(str(e)))
raise e
``` |
{
"source": "jhonniel/Queuing-python",
"score": 3
} |
#### File: site-packages/click_threading/_compat.py
```python
import inspect
import sys
PY2 = sys.version_info[0] == 2
if PY2:
getargspec = inspect.getargspec
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
else:
getargspec = inspect.getfullargspec
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
```
#### File: Database/es/ElasticSearchClient.py
```python
import uuid
from copy import deepcopy
import elasticsearch
from Database.es.es_utils import is_empty
MAX_SIZE = 1000
SIZE = 10000
REQUEST_TIMEOUT = 300
def make_query(search_array=None, filter_array=None, to_return='*', count_query='yes'):
"""
searchArray is of the format : [
["toSearchFor", "fields, *to, search^3, in*" ],
["toSearchFor2", ...],
...
]
filterArray is of the format : [
["toSearchFor", "fields, *to, search^3, in*" ],
["field_to_filter_on", min_value, max_value],
...
]
toReturn is an array of fields to be returned in the results
"""
if search_array is None:
search_array = []
if filter_array is None:
filter_array = []
query_must_array = []
for one_s in search_array:
if one_s[1] == '*':
fields = '*'
else:
fields = one_s[1].split(',')
search_dict = {
'multi_match': {
'query': one_s[0],
'type': 'phrase',
'fields': fields
}
}
query_must_array.append(search_dict)
for values in filter_array:
search_dict = dict()
if len(values) == 3:
search_dict = {
'range': {
values[0]: {
'gte': values[1],
'lte': values[2]
}
}
}
elif len(values) == 2:
if values[1] == '*':
fields = '*'
else:
fields = values[1].split(',')
search_dict = {
'multi_match': {
'query': values[0],
'type': 'phrase',
'fields': fields
}
}
filter_array.append(search_dict)
if count_query == 'yes':
query = {
'query': {
'bool': {
'must': query_must_array,
'filter': filter_array
}
},
'_source': to_return
}
else:
query = {
'query': {
'bool': {
'must': query_must_array,
'filter': filter_array
}
}
}
return query
class ElasticSearchClient:
def __init__(self,es_url="elasticsearch", es_port="8001", **kwargs):
self._ES_URL = es_url
self._ES_PORT = es_port
if kwargs is not None:
try:
self._PRE_PROCESS_HARD = kwargs.get('preprochard')
self._SEARCHES = kwargs.get('searches')
self._USERS = kwargs.get('users')
self._EQUILATERAL = kwargs.get('equilateral')
self._MERCURIAL = kwargs.get('mercurial')
except Exception as e:
raise e
def get_es_connection(self):
try:
es_connection = elasticsearch.Elasticsearch([{'host': self._ES_URL, 'port': self._ES_PORT}])
except Exception as e:
raise e
return es_connection
def nested_es_search(self, es_query=None, size=200):
if is_empty(es_query):
return "Empty query"
es_connection = self.get_es_connection()
try:
res = es_connection.search(index=self._PRE_PROCESS_HARD, size=size, body=es_query, request_timeout=300)
res = [r['_source'] for r in res['hits']['hits']]
except Exception:
raise
return res
def search(self, search_array=None, filter_array=None,size=200,
to_return='*'):
"""
searchArray is of the format : [
["toSearchFor", "fields, *to, search^3, in*" ],
["toSearchFor2", ...],
...
]
filterArray is of the format : [
["toSearchFor", "fields, *to, search^3, in*" ],
["field_to_filter_on", min_value, max_value],
...
]
toReturn is an array of fields to be returned in the results
verbose = True to set verbosity level
"""
res = None
if filter_array is None:
filter_array = []
if search_array is None:
search_array = []
query = make_query(search_array, filter_array, to_return)
es_connection = self.get_es_connection()
try:
res = es_connection.search(index=self._PRE_PROCESS_HARD, size=500, body=query, request_timeout=300)
res = [r['_source'] for r in res['hits']['hits']]
except Exception as e:
print "----Elastic search error-----" + str(e)
return res
def count(self, cities=None, mandatory_skills=None, minexp=-1, maxexp=400):
"""
cities is a vector of city names.
mandatory_skills is a vector of skills that we need for sure in the results.
"""
if mandatory_skills is None:
mandatory_skills = []
if cities is None:
cities = []
total = None
count = 0
for city in cities:
filter_array = [[city, "locality"], ["exp_years", minexp, maxexp]]
for skill in mandatory_skills:
filter_array.append([skill, "preproc_skills_fuzzy"])
query = make_query(filter_array=filter_array, to_return="*")
es_connection = self.get_es_connection()
total = es_connection.count(index=self._PRE_PROCESS_HARD, body=query)
count += total
return total
def get_user_search_ids(self, user_id):
"""gets search_ids from es for user id"""
if is_empty(user_id):
return "Empty user id"
ret = None
query = make_query(search_array=[[user_id, "user_id", "search_ids"]], count_query='no')
es_connection = self.get_es_connection()
try:
ret = es_connection.search(index=self._USERS, doc_type="user", body=query)
except Exception as e:
print "----Elastic search error-----" + str(e)
return ret['hits']['hits']
def update_search_id(self, user_id, list_of_search_ids, document_id):
"""updates search_ids list for user id"""
ret = None
if is_empty(user_id) or is_empty(list_of_search_ids) or is_empty(document_id):
return "Supply valid arguments"
body = {
"user_id": user_id,
"search_ids": list_of_search_ids
}
es_connection = self.get_es_connection()
try:
ret = es_connection.index(index=self._USERS, doc_type="user", body=body, id=document_id)
except Exception as e:
print "----Elastic search error-----" + str(e)
return ret
def get_cached_search(self, user_id, search_id):
"""gets cached search with sid for user_id"""
if is_empty(user_id) or is_empty(search_id):
return "Pass valid arguments"
ret = None
query = {
"query": {
"bool": {
"must": [
{"match": {"user_id": user_id}},
{"match": {"search_id": search_id}}
]
}
}
}
es_connection = self.get_es_connection()
try:
ret = es_connection.search(index=self._SEARCHES, doc_type='search', body=query)
except Exception as e:
print "----Elastic search error-----" + str(e)
return ret
def fetch_search_document(self, search_id, user_id):
if is_empty(user_id) or is_empty(search_id):
return "Provide valid arguments"
ret = None
query = {
"query": {
"bool": {
"must": [
{"term": {"user_id": user_id}},
{"term": {"search_id": search_id}}
]
}
}
}
es_connection = self.get_es_connection()
try:
ret = es_connection.search(index=self._SEARCHES, size=1, doc_type='search', body=query)
except Exception as e:
print "----Elastic search error-----" + str(e)
return ret['hits']['hits']
def cache_searches(self, search_id, search_cache, user_id, query, edit_flag):
"""saves the search with search id ,cache and user_id"""
if is_empty(search_id) or is_empty(search_cache) or is_empty(user_id) or is_empty(edit_flag):
return "Provide valid arguments"
es_connection = self.get_es_connection()
try:
cache_ = {"search_id": search_id, "user_id": user_id, "query": query, "cache": search_cache}
if str(edit_flag) == str(0):
ret = es_connection.index(index=self._SEARCHES, doc_type='search', id=str(uuid.uuid4()), body=cache_)
else:
document = self.fetch_search_document(search_id, user_id)
document_id = document[0]['_id']
es_connection.delete(index=self._SEARCHES, doc_type="search", id=document_id)
ret = es_connection.index(index=self._SEARCHES, doc_type="search", body=cache_, id=document_id)
except Exception as e:
print "----Elastic search error-----" + str(e)
raise
return ret
def fetch_query_details(self, user_id):
if is_empty(user_id):
return "Provide valid arguments"
ret = None
body = {
"_source": ["query"],
"query": {
"bool": {
"must": [
{"match": {"user_id": user_id}}
]
}
}
}
es_connection = self.get_es_connection()
try:
ret = es_connection.search(index=self._SEARCHES, doc_type='search', body=body, size=MAX_SIZE)
except Exception as e:
print "------Elastic search error-----" + str(e)
return ret['hits']['hits']
def bulk_query(self, field, search_items_list, es_index=None, source="*"):
if is_empty(field) or is_empty(search_items_list) or is_empty(es_index):
return "Provide valid arguments"
query = {
"_source": source,
'query': {
'bool': {
'should': [
]
}
}
}
template = {
'constant_score': {
'query': {
'term': {
}
}
}
}
for item in search_items_list:
obj = deepcopy(template)
obj['constant_score']['query']['term'][field] = item
query['query']['bool']['should'].append(deepcopy(obj))
ordered_list = []
try:
item_index = {}
for index, item in enumerate(search_items_list):
item_index[item] = index
es_connection = self.get_es_connection()
res = es_connection.search(index=es_index, body=query, size=SIZE, request_timeout=REQUEST_TIMEOUT)
candidates = [i['_source'] for i in res['hits']['hits']]
ordered_list = [0] * len(item_index)
for each in candidates:
real_index = item_index[each['unique_id']]
ordered_list[real_index] = each
except Exception as e:
print (str(e))
return ordered_list
def fetch_search_details(self, search_id, user_id):
if is_empty(search_id) or is_empty(user_id):
return "Provide valid arguments"
ret = None
body = {
"_source": ["query"],
"query": {
"bool": {
"must": [
{"term": {"user_id": user_id}},
{"term": {"search_id": search_id}}
]
}
}
}
try:
es_connection = self.get_es_connection()
ret = es_connection.search(index=self._SEARCHES, doc_type='search', body=body, size=MAX_SIZE)
except Exception as e:
print "------Elastic search error-----" + str(e)
return ret['hits']['hits']
def update_equilateral(self, unique_id, databin_urls, databin_data, timestamp):
if is_empty(unique_id) or is_empty(timestamp):
return "Provide valid arguments"
ret = None
query = {
'equilateral_data': databin_data,
'other_urls': databin_urls,
'timestamp': timestamp,
'unique_id': unique_id
}
es_connection = self.get_es_connection()
try:
ret = es_connection.index(index=self._EQUILATERAL, doc_type='data', body=query)
except Exception as e:
print str(e)
return ret
def update_mercurial(self, unique_id, prediction, timestamp):
if is_empty(unique_id) or is_empty(timestamp):
return "Provide valid arguments"
ret = None
query = {
'spa': prediction,
'timestamp': timestamp,
'unique_id': unique_id,
}
es_connection = self.get_es_connection()
try:
ret = es_connection.index(index=self._MERCURIAL, doc_type='data', body=query)
except Exception as e:
print str(e)
return ret
```
#### File: Database/es/es_utils.py
```python
def none_check(value):
if value is None:
return False
else:
return True
def is_empty(any_type_value):
if any_type_value:
return False
else:
return True
```
#### File: site-packages/threading_sched/default.py
```python
from __future__ import absolute_import
from __future__ import print_function
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (c) 2011 Hard Consulting Corporation"
__license__ = "GPLv3 (or later)"
import threading
import sched
class scheduler(sched.scheduler):
"""
Thread-safe implementation of the stock Python sched.scheduler class. The API remains basically
the same, with some optional additions to support creating custom prioritization schemes.
We implement locking for thread safety, and we awaken our run method whenever the events in the
list might have changed in a way that could shorten our timeout.
The following are extensions to the Python sched.scheduler API:
- Keyword Arguments
In enter{abs}(), an optional kwargs argument may be provided, containing a dictionary of
keyword arguments to pass to the function.
def enterabs(self, time, priority, action, argument, kwargs={}):
"""
def __init__(self, *args, **kwargs):
sched.scheduler.__init__(self, *args, **kwargs)
if not hasattr( self, '_lock' ):
self._lock = threading.RLock() # < Python 3.3
self._cond = threading.Condition( self._lock )
def enterabs(self, time, priority, action, argument, kwargs=None):
"""Assumes enter() uses enterabs(). Since our Condition uses our RLock, we can safely acquire the
Condition, and issue the notify_all; it won't be delivered 'til we fully release our
self._lock. Since base sched.scheduler is an old-style class in Python 2, don't use super.
"""
if kwargs is None:
kwargs = {}
with self._cond:
if hasattr( sched.Event, 'kwargs' ): # iff >= Python3
e = sched.scheduler.enterabs( self, time, priority, action, argument, kwargs )
else:
# Prepare a closure to wrap the action, trapping the supplied keyword kwargs (if
# any). If *any* keyword arguments are supplied, then they will be passed to the
# action along with the event arguments.
e = sched.scheduler.enterabs(
self, time, priority, lambda *args: action( *args, **kwargs ), argument )
# Awaken any thread awaiting on a condition change, eg .run(), or .wait()
self._cond.notify_all()
#print "Scheduling %s" % ( str(e) )
return e
def enter(self, delay, priority, action, argument, kwargs=None):
return self.enterabs(self.timefunc() + delay, priority, action, argument, kwargs=kwargs)
def cancel(self, *args, **kwargs):
"""Removing an event can only result in us awakening too early, which is generally not a problem.
However, if this empties the queue completely, we want run() to wake up and return right
away!
"""
with self._cond:
e = sched.scheduler.cancel(self, *args, **kwargs)
self._cond.notify_all()
return e
def empty(self):
with self._lock:
return sched.scheduler.empty(self)
def wait(self):
"""
Awaits a change in condition that could mean that there are now events to process. Use this
when the queue is (or might be) empty, and a thread needs to wait for something to process.
"""
with self._cond:
if self.empty():
self._cond.wait()
def next_event(self, now=None):
"""
Return the next scheduled event, without removing it from the queue. Throws an exception if
none available. Override this method to implement other priority schemes.
"""
with self._lock:
return self._queue[0] # Strictly by time, then priority
def run(self, pred=None):
"""
Retrieve an event, waiting and looping if it hasn't expired. Otherwise, remove it from the
schedule, and run it. Unlike the underlying sched.scheduler, this implementation waits in a
multithreading sensitive fashion; if a new event is scheduled, we'll awaken and re-schedule
our next wake-up.
Returns when there are no more events left to run, or until the supplied predicate evaluates
False.
This run method is not usually appropriate to use directly as a Thread.run method, because
it returns when the schedule is empty; this often doesn't mean the program is done. To
safely process events, a Thread must know (somehow) that the overall program is not yet
complete, and implement its own run method like this, waiting for more events to be
scheduled each time scheduler.run returns:
class scheduler_thread(Thread):
def __init__(self):
self.sch = sched.scheduler(...)
...
def run(self):
while ( ... we are not finished ... ):
self.sch.run()
self.sch.wait()
"""
while True if pred is None else pred():
# Get the next event, relative to the current time. When schedule is empty, we're done.
now = self.timefunc()
with self._cond: # Acquires self._lock
if self.empty():
break
# Queue is not empty, guaranteed
event = self.next_event(now=now)
if now < event.time:
# Next event hasn't expired; Wait 'til expiry, or an self._cond.notify...()
self._cond.wait(event.time - now) # Releases self._lock
#print "Schedule condition wait expired after %fs" % (self.timefunc() - now)
continue
# TODO: this is inefficient pre-3.2, due to a busy wait loop in the
# threading Condition. Perhaps we should detect this, and implement in
# terms of spawning another Thread to sleep 'til the desired time, then
# trigger .notify_all()?
# An expired event is detected. No schedule modification can have occurred (we hold
# the lock, and no self._cond.wait() has been processed, because it always will
# 'continue' the loop) so we can safely cancel it. We can make no assumptions about
# its position in the _queue, to allow arbitrary scheduling algorithms.
self.cancel(event)
# Trigger the expired (and removed) event's action wrapper function. This may result in
# schedule modification, so we do this outside the lock. If func raises an exception,
# the scheduler's invariant is maintained, and this method may be called again.
#print "Scheduled event firing: %s" % (str(event))
if hasattr( event, 'kwargs' ): # iff >= Python3
event.action( *event.argument, **event.kwargs )
else:
event.action( *event.argument )
self.delayfunc(0) # Let other threads run
``` |
{
"source": "jhonnold/fn-dash",
"score": 3
} |
#### File: app/models/input.py
```python
import datetime
from app.database import db
class Input(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id'))
input_type = db.Column(db.String())
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
stats = db.relationship('Stat', backref='input', lazy='dynamic')
def __repr__(self):
return "<Input '{}' - '{}'>".format(self.user_id, self.input_type)
```
#### File: app/tasks/stat_history.py
```python
import celery
from app import db, app
from app.models import Stat, StatHistory
from . import AppContextBase
@celery.task(base=AppContextBase, name="record_stats")
def record_stats():
stats = Stat.query.all()
for stat in stats:
stat = StatHistory(
stat_id=stat.id,
placements=stat.placements,
kills=stat.kills,
matchesplayed=stat.matchesplayed,
playersoutlived=stat.playersoutlived,
minutesplayed=stat.minutesplayed,
)
db.session.add(stat)
db.session.commit()
```
#### File: tasks/stat_tracker/update_hash.py
```python
import celery, datetime
from app import db
from app.tasks import AppContextBase
from app.models import User
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@celery.task(base=AppContextBase, name="update_hash")
def update_hash(data_hash, id):
if data_hash is None:
return
user = User.query.get(id)
logger.warn('User {} updated. Setting hash to {}'.format(user, data_hash))
user.last_known_data_hash = str(data_hash)
user.updated_at = datetime.datetime.now()
db.session.commit()
``` |
{
"source": "JhonnyBn/MiniCurso-Flask-Heroku",
"score": 3
} |
#### File: MiniCurso-Flask-Heroku/1-python-flask-simplificado/app.py
```python
import os, json
from flask import Flask, render_template, request, redirect, url_for
from flask_bcrypt import Bcrypt
app = Flask(__name__)
bcrypt = Bcrypt(app)
# Setup app config
host = '0.0.0.0'
port = int(os.environ.get('PORT', '5000'))
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'random string') # Troque isso!
@app.route('/', methods = ["GET","POST"])
def index():
return "Hello World!"
@app.route('/home', methods=['GET'])
def home():
return render_template('home.html')
@app.route('/login', methods=['GET'])
def login():
return render_template('login.html')
@app.route('/senha', methods=['POST'])
def senha():
senha = request.json.get('senha', None)
if senha is None:
return { "senha": "" }, 200
password_hash = <PASSWORD>.generate_password_hash(<PASSWORD>ha).decode('utf-8')
return { "senha": password_hash }, 200
if __name__ == '__main__':
app.run(host=host, port=port)
``` |
{
"source": "jhonnycano/201601-rp-proyectofinal",
"score": 3
} |
#### File: jhonnycano/201601-rp-proyectofinal/util.py
```python
import generales as g
#------------------------------------------------------------------------------
# funciones especificas de datos para el programa
#------------------------------------------------------------------------------
def crear_tabla(cn):
sql = """
CREATE TABLE IF NOT EXISTS tweet (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT
, tweet_id VARCHAR(40) NOT NULL
, fch_creacion DATETIME
, usuario VARCHAR(50) NOT NULL
, contenido VARCHAR(140) NOT NULL
, clase INTEGER NOT NULL
);
"""
cn.execute(sql)
def insertar_tweet(cn, itm):
sql = """
INSERT INTO tweet
(tweet_id, fch_creacion, usuario, contenido, clase)
VALUES
(?, ?, ?, ?, ?);
"""
fch = datetime.datetime.strptime(itm[2], '%a %b %d %H:%M:%S PDT %Y')
pars = (itm[1], fch, itm[4], itm[5], int(itm[0]))
insertar(cn, sql, pars, traer_id=False)
def traer_datos(cn, cant=1000):
filtro = "LIMIT {}".format(cant)
if cant == -1:
filtro = ""
sql = """
SELECT tweet_id, contenido, clase
FROM
(SELECT tweet_id, contenido, clase FROM tweet WHERE clase = 0 {0}) a
UNION
SELECT tweet_id, contenido, clase
FROM
(SELECT tweet_id, contenido, clase FROM tweet WHERE clase = 4 {0}) b
;
""".format(filtro)
rs = g.traer_lista(cn, sql)
return rs
``` |
{
"source": "jhonny-me/replaceImage",
"score": 2
} |
#### File: jhonny-me/replaceImage/uploadToQiniu.py
```python
import qiniu
import urllib
import os
import sys
import json
with open('config.json', 'r') as f:
config = json.load(f)
# ----------------手动配置区---------------
accessKey = config['AK']#"<KEY>"
secretkey = config['SK']#"4e0Ia1Li9txMAt_P61WDmEpgLy1b4RMFIozGEbbs"
# 上传空间的域名,需要自己去后台获取
bucket_url = {
"fcc-blogs": "ogit74i74.bkt.clouddn.com",
}
bucket = config['bucket']#"fcc-blogs" # 上传空间
# ----------------默认配置区-------------------------
img_suffix = ["jpg", "jpeg", "png", "bmp", "gif"]
os.chdir(sys.path[0])
result_file = "上传结果.txt" # 保存上传结果
if os.path.exists(result_file):
os.remove(result_file)
class Qiniu(object):
"""七牛上传与下载的工具类
需要七牛的Python SDK
pip install qiniu
SDK详细用法见 http://developer.qiniu.com/docs/v6/sdk/python-sdk.html
"""
SUCCESS_INFO = "上传成功!"
def __init__(self, accessKey, secretkey):
self.accessKey = accessKey
self.secretkey = secretkey
self._q = qiniu.Auth(self.accessKey, self.secretkey)
def upload_file(self, bucket, up_filename, file_path):
"""上传文件
Args:
bucket: 上传空间的名字
up_filename: 上传后的文件名
file_path: 本地文件的路径
Returns:
ret: dict变量,保存了hash与key(上传后的文件名)
info: ResponseInfo对象,保存了上传信息
url: st, 上传后的网址
"""
token = self._q.upload_token(bucket)
ret, info = qiniu.put_file(token, up_filename, file_path)
url = self.get_file_url(bucket, up_filename)
return ret, info, url
def get_file_url(self, bucket, up_filename):
if not bucket in bucket_url.keys():
raise AttributeError("空间名不正确!")
url_prefix = bucket_url[bucket]
url = url_prefix + "/" + up_filename
return url
def upload_from(file_path, custom_name):
if not os.path.isfile(file_path):
print "please specify a file path"
return
if custom_name == "":
print "custom name must not be \"\""
return
q = Qiniu(accessKey, secretkey)
ret, info, url = q.upload_file(bucket, custom_name, file_path)
print("已上传: %s " % url)
return url
pass
def main():
print("deodemdeo")
if len(sys.argv) < 3:
print "please input custom name and file path"
sys.exit(0)
customName = sys.argv[1]
file = sys.argv[2]
upload_from(file, customName)
pass
if __name__ == '__main__':
upload_from("1.jpg", "demo1.jpg")
``` |
{
"source": "jhonnysanchezillisaca/apm-server",
"score": 3
} |
#### File: tests/system/test_access.py
```python
from apmserver import AccessTest
import requests
class Test(AccessTest):
def test_with_token(self):
"""
Test that access works with token
"""
url = 'http://localhost:8200/v1/transactions'
transactions = self.get_transaction_payload()
def oauth(v): return {'Authorization': v}
r = requests.post(url, json=transactions)
assert r.status_code == 401, r.status_code
r = requests.post(url,
json=transactions,
headers=oauth('Bearer 1234'))
assert r.status_code == 202, r.status_code
r = requests.post(url,
json=transactions,
headers=oauth('Bearer wrongtoken'))
assert r.status_code == 401, r.status_code
r = requests.post(url,
json=transactions,
headers=oauth('Wrongbearer 1234'))
assert r.status_code == 401, r.status_code
def test_with_token_v2(self):
"""
Test that access works with token
"""
url = 'http://localhost:8200/intake/v2/events'
transactions = self.get_event_v2_payload(name="transactions.ndjson")
headers = {'content-type': 'application/x-ndjson'}
def oauth(v):
aheaders = {'Authorization': v}
aheaders.update(headers)
return aheaders
r = requests.post(url, data=transactions, headers=headers)
assert r.status_code == 401, r.status_code
r = requests.post(url,
data=transactions,
headers=oauth('Bearer 1234'))
assert r.status_code == 202, r.status_code
r = requests.post(url,
data=transactions,
headers=oauth('Bearer wrongtoken'))
assert r.status_code == 401, r.status_code
r = requests.post(url,
data=transactions,
headers=oauth('Wrongbearer 1234'))
assert r.status_code == 401, r.status_code
``` |
{
"source": "jhonpedro/yt-sub-extraction-app",
"score": 3
} |
#### File: server/resources/YoutubeExtraction.py
```python
from flask import request
from flask_restful import Resource
from youtube_transcript_api import YouTubeTranscriptApi as api
import nltk
from resources.functions import countByWordsTimesSpoken
class YoutubeExtraction(Resource):
def get(self):
YtVideoId = request.args.get('videourl')
rso = request.args.get('rso')
# Verify if rso is an str and converts it to an boolean
if isinstance(rso, str):
try:
rso = bool(int(rso))
except:
return { 'message': 'rso must be <0> or <1>' }, 403
else:
rso = False
removeSpokenOnce = rso
if len(YtVideoId) > 11:
videioIdPosition = YtVideoId.find('v=') + 2
YtVideoId = YtVideoId[videioIdPosition:videioIdPosition + 11]
try:
transcriptList = api.get_transcript(
YtVideoId, languages=['pt', 'en'])
allTranscriptText = ''
for transcript in transcriptList:
allTranscriptText += transcript['text'] + ' '
stopWordsPT = nltk.corpus.stopwords.words('portuguese')
allWords = []
for word in allTranscriptText.split(' '):
if word not in stopWordsPT:
allWords.append(word)
allWordsStem = []
# Stem the word https://www.nltk.org/howto/stem.html
for word in allWords:
if len(word) > 1:
if not word.startswith('['):
allWordsStem.append(nltk.stem.RSLPStemmer().stem(word))
# This variable below is an array with tuples like this: (word, countOcurrenceOfTheWord)
wordWithCountTuples = countByWordsTimesSpoken.exec(
allWordsStem, removeSpokenJustOnce=removeSpokenOnce)
# This will contain the words normalized, because in count of words
# we remove the sufix ex: turning -> turn || virando -> vir
wordWithCountTuplesNormalized = []
for topTenStem in wordWithCountTuples:
for word in allWords:
if word.startswith(topTenStem[0]):
wordWithCountTuplesNormalized.append(
(word, topTenStem[1]))
break
return {
'wordsTimesSpoken': wordWithCountTuplesNormalized,
}
except Exception as error:
print(error)
return {
'message': 'this video subtitle may be disabled, try again this video later'
}, 400
``` |
{
"source": "jhonP-Li/DE_rpy2",
"score": 2
} |
#### File: jhonP-Li/DE_rpy2/DE_rpy2.py
```python
import pandas as pd
import numpy as np
import warnings
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri, pandas2ri, Formula
from rpy2.robjects.packages import importr
pandas2ri.activate()
numpy2ri.activate()
# import R libraries
DESeq2 = importr('DESeq2')
edgeR = importr('edgeR')
Limma = importr('limma')
stats = importr('stats')
to_dataframe = robjects.r('function(x) data.frame(x)')
class DE_rpy2:
"""
Running DESeq2, edgeR, limma through rpy2
input:
count_matrix: a pandas dataframe with each column as count
(float values in FPKM/RPKM are also acceptable as internal rounding will be done)
, and a id column for gene id
example:
id sampleA sampleB
geneA 5.1 1
geneB 4.2 5
geneC 1 2
design_matrix: a pandas dataframe with each column as a condition, and one row for one sample
Note that the sample name must be the index not a column
condition
sampleA1 treated
sampleA2 treated
sampleB1 untreated
sampleB2 untreated
design_formula: default to be the column name of design matrix, example: "~ condition""
If it contains multiple conditions, this formula must be customised,
or the DESeq2 will only consider the first condition.
gene_column: column name of gene id columns in count_matrix, default = 'id'
"""
def __init__(self, count_matrix, design_matrix, design_formula=None, gene_column='id'):
assert gene_column in count_matrix, \
'column: \'%s\', not found in count matrix' % gene_column
assert count_matrix.shape[1] - 1 == design_matrix.shape[0], \
'The number of rows in design matrix must ' \
'be equal to the number of samples in count matrix'
assert all(pd.isna(count_matrix)), \
'Null values are found in count matrix' \
'Please check it'
assert len(design_matrix.columns), \
'Columns names are needed in design matrix'
if 'float' in count_matrix.drop(gene_column, axis=1):
warnings.warn('DESeq2 and edgeR only accept integer counts\n'
'The values in count matrix are automatically rounded\n'
'In fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n')
# parameters used in DESeq2
self.count_matrix = pandas2ri.py2ri(count_matrix.drop(gene_column, axis=1).astype('int'))
self.design_matrix = pandas2ri.py2ri(design_matrix)
self.gene_ids = count_matrix[gene_column]
self.gene_column = gene_column
self.deseq2_result = None
self.deseq2_label = None
if design_formula is None:
condition = design_matrix.columns[0]
if len(design_matrix.columns) > 1:
warnings.warn('Multiple conditions are set in design matrix,\n'
'you\'d better customise the design formula.\n'
'Here it only considers the first condition\n')
self.design_formula = Formula('~ ' + condition)
else:
self.design_formula = Formula(design_formula)
# parameters used in edgeR
self.edgeR_group = numpy2ri.py2ri(design_matrix.iloc[:, 0].values)
self.edgeR_gene_names = numpy2ri.py2ri(count_matrix[gene_column].values)
self.edgeR_result = None
self.edgeR_label = None
# parameters used in limma
self.limma_result = None
self.limma_label = None
self.final_label = None
def deseq2(self, threshold=0.05, **kwargs):
"""
Run the standard DESeq2 workflow.
Get the DESeq2 results as DataFrame.
Return the label of each gene: 0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the adjusted p-value.
default = 0.05.
:param kwargs: parameters of DESeq2 functions.
See official instructions for details:
http://www.bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# Run DESeq2 workflow
dds = DESeq2.DESeqDataSetFromMatrix(countData=self.count_matrix,
colData=self.design_matrix,
design=self.design_formula)
dds = DESeq2.DESeq(dds, **kwargs)
res = DESeq2.results(dds, **kwargs)
# Store the output matrix as DataFrame
self.deseq2_result = pandas2ri.ri2py(to_dataframe(res))
self.deseq2_result[self.gene_column] = self.gene_ids
# The adjusted p-value in the DESeq2 results
# may contain NAN
if any(pd.isna(self.deseq2_result['padj'].values)):
warnings.warn('There exist NAN in the adjusted p-value\n'
'see https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/'
'inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n')
# Reject the H0 hypothesis if p-value < threshold
labels = [int(x) for x in (self.deseq2_result['padj'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.deseq2_label = label
return label
def edger(self, threshold=0.05):
"""
Run the standard edgeR workflow.
Get the edgR results as DataFrame.
Return the label of each gene:
0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the p-value.
default = 0.05.
See official instructions for details:
https://www.bioconductor.org/packages/release/bioc/vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# run edgeR workflow
# Create the DGEList object
dgList = edgeR.DGEList(counts=self.count_matrix, group=self.edgeR_group, genes=self.edgeR_gene_names)
# Normalize
dgList = edgeR.calcNormFactors(dgList, method="TMM")
# Setting up the model
robjects.r.assign('edgeR_group', self.edgeR_group)
designMat = stats.model_matrix(Formula('~ edgeR_group'))
# Estimating Dispersions
dgList = edgeR.estimateGLMCommonDisp(dgList, design=designMat)
dgList = edgeR.estimateGLMTrendedDisp(dgList, design=designMat)
dgList = edgeR.estimateGLMTagwiseDisp(dgList, design=designMat)
# Differential Expression
fit = edgeR.glmQLFit(dgList, designMat)
test = edgeR.glmQLFTest(fit)
res = edgeR.topTags(test, n=self.count_matrix.nrow)
res_df = pandas2ri.ri2py(to_dataframe(res))
# Sort the result on gene ids
gene_df = pd.DataFrame({'genes': self.gene_ids})
self.edgeR_result = pd.merge(gene_df, res_df, how='left')
# Reject the H0 hypothesis
labels = [int(x) for x in (self.edgeR_result['PValue'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.edgeR_label = label
return label
def limma(self, threshold=0.05):
"""
Run the standard limma workflow.
Get the limma results as DataFrame.
Return the label of each gene:
0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the p-value.
default = 0.05.
See official instructions for details:
https://ucdavis-bioinformatics-training.github.io/2018-June-RNA-Seq-Workshop/thursday/DE.html
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# Create the DGEList object
dgList = edgeR.DGEList(counts=self.count_matrix, group=self.edgeR_group, genes=self.edgeR_gene_names)
# Normalize
dgList = edgeR.calcNormFactors(dgList, method="TMM")
# Setting up the model
robjects.r.assign('edgeR_group', self.edgeR_group)
designMat = stats.model_matrix(Formula('~ edgeR_group'))
# voom
v = Limma.voom(dgList, designMat)
# fitting
fit = Limma.lmFit(v, designMat)
fit = Limma.eBayes(fit)
res = Limma.topTable(fit, n=self.count_matrix.nrow)
res_df = pandas2ri.ri2py(to_dataframe(res))
# Sort the result on gene ids
gene_df = pd.DataFrame({'genes': self.gene_ids})
self.limma_result = pd.merge(gene_df, res_df, how='left')
# Reject the H0 hypothesis
labels = [int(x) for x in (self.limma_result['adj.P.Val'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.limma_label = label
return label
def plot_label_difference(self):
"""
Plot the Venn diagram of the 3 label output.
Since we only interest in the differentially expressed genes.
The number on Venn diagram shows the number of samples labeled as 1.
Say differentially expressed genes.
"""
if self.limma_label is None:
warnings.warn('Seems you haven\'t get limma label\n'
'Automatically running limma...')
self.limma_label = self.limma()
if self.deseq2_label is None:
warnings.warn('Seems you haven\'t get DESeq2 label\n'
'Automatically running DESeq2...')
self.deseq2_label = self.deseq2()
if self.edgeR_label is None:
warnings.warn('Seems you haven\'t get edgeR label\n'
'Automatically running edgeR...')
self.edgeR_label = self.edger()
# Import the plot package
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
labels = np.array([self.deseq2_label['label'].values, self.edgeR_label['label'].values,
self.limma_label['label'].values]).T
names = ['DESeq2', 'edgeR', 'limma']
venn_df = pd.DataFrame(data=labels, columns=names)
sets = {'000': 0, '001': 0, '010': 0, '011': 0, '100': 0, '101': 0, '110': 0, '111': 0}
for i in range(venn_df.shape[0]):
loc = [str(num) for num in venn_df.iloc[i, :]]
loc = loc[0] + loc[1] + loc[2]
sets[loc] += 1
venn3(sets, set_labels=names)
plt.show()
return sets
def get_final_label(self, method='inner'):
"""
There are 2 methods availabel:
inner: set those genes as differentially expressed,
say label 1, if all 3 tools agreed
vote: set those genes as differentially expressed,
say label 1, if all 2 out of the 3 tools agreed
union: set those genes as differentially expressed,
say label 1, as long as 1 tool agreed
"""
label = None
menu = ['inner', 'vote', 'union']
assert method in menu, \
'Please choose the correct method'
if self.limma_label is None:
warnings.warn('Seems you haven\'t get limma label\n'
'Automatically running limma...')
self.limma_label = self.limma()
if self.deseq2_label is None:
warnings.warn('Seems you haven\'t get DESeq2 label\n'
'Automatically running DESeq2...')
self.deseq2_label = self.deseq2()
if self.edgeR_label is None:
warnings.warn('Seems you haven\'t get edgeR label\n'
'Automatically running edgeR...')
self.edgeR_label = self.edger()
labels = self.deseq2_label['label'].values + self.edgeR_label['label'].values + self.limma_label['label'].values
if method == 'inner':
label = [int(x) for x in (labels == 3)]
if method == 'vote':
label = [int(x) for x in (labels >= 2)]
if method == 'union':
label = [int(x) for x in (labels >= 1)]
self.final_label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': label})
return self.final_label
``` |
{
"source": "JhonPool4/robotics_python_lib",
"score": 3
} |
#### File: robotics_python_lib/labpythonlib/lab_markers.py
```python
from labpythonlib.lab_functions import rot2quat, rpy2rot
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
import numpy as np
import rospy
# ==========
# Colors
# ==========
color = dict()
color['RED'] = (1.0, 0.0, 0.0)
color['GREEN'] = (0.0, 1.0, 0.0)
color['BLUE'] = (0.0, 0.0, 1.0)
color['YELLOW'] = (1.0, 1.0, 0.0)
color['PINK'] = (1.0, 0.0, 1.0)
color['CYAN'] = (0.0, 1.0, 1.0)
color['BLACK'] = (0.0, 0.0, 0.0)
color['DARKGRAY'] = (0.2, 0.2, 0.2)
color['LIGHTGRAY'] = (0.5, 0.5, 0.5)
color['WHITE'] = (1.0, 1.0, 1.0)
# =====================
# Class ball marker
# =====================
class BallMarker(object):
"""
Info : class to visualize ball markers in RViz
"""
id = 0
def __init__(self, color, alpha=1.0, scale=0.05):
"""
The color can be specified as a list with 3 elements or as the color
dictionary (e.g. BLUE, RED, etc). Alpha sets the transparency and scale
scales the size of the ball
"""
reference_frame = rospy.get_param('reference_frame','base_link') # important
self.marker_pub = rospy.Publisher("visualization_marker", Marker,
queue_size=10)
self.marker = Marker()
self.marker.header.frame_id = reference_frame
self.marker.ns = "ball_markers"
self.marker.id = BallMarker.id
BallMarker.id += 1
self.marker.type = self.marker.SPHERE
self.marker.action = self.marker.ADD
self.marker.pose.position.x = 0.0
self.marker.pose.position.y = 0.0
self.marker.pose.position.z = 0.0
self.marker.pose.orientation.x = 0.0
self.marker.pose.orientation.y = 0.0
self.marker.pose.orientation.z = 0.0
self.marker.pose.orientation.w = 1.0
self.marker.scale.x = scale
self.marker.scale.y = scale
self.marker.scale.z = scale
self.setColor(color, alpha)
self.marker.lifetime = rospy.Duration()
def setColor(self, color, alpha=1.0):
self.marker.color.r = color[0]
self.marker.color.g = color[1]
self.marker.color.b = color[2]
self.marker.color.a = alpha
def position(self, T):
"""
Info: set position (4x4 NumPy homogeneous matrix) for the ball and publish it
"""
self.marker.pose.position.x = T[0,3]
self.marker.pose.position.y = T[1,3]
self.marker.pose.position.z = T[2,3]
#self.publish()
def xyz(self, position):
"""
Info: set position (list) for the ball and publish it
"""
self.marker.pose.position.x = position[0]
self.marker.pose.position.y = position[1]
self.marker.pose.position.z = position[2]
#self.publish()
def publish(self):
self.marker_pub.publish(self.marker)
# =====================
# Class ball marker
# =====================
class ArrowMarker(object):
"""
@info : class to visualize arrow markers in RViz
"""
id = 0
def __init__(self, color, alpha=1.0, scale=0.05):
reference_frame = rospy.get_param('reference_frame','base_link') # important
self.marker_pub = rospy.Publisher("visualization_marker", Marker,
queue_size=10)
self.marker = Marker()
self.marker.header.frame_id = reference_frame
self.marker.ns = "arrow_markers"
self.marker.id = ArrowMarker.id
ArrowMarker.id += 1
self.marker.type = self.marker.ARROW
self.marker.action = self.marker.ADD
self.marker.pose.position.x = 0.0
self.marker.pose.position.y = 0.0
self.marker.pose.position.z = 0.0
self.marker.pose.orientation.x = 0.0
self.marker.pose.orientation.y = 0.0
self.marker.pose.orientation.z = 0.0
self.marker.pose.orientation.w = 1.0
self.marker.scale.x = scale[0]
self.marker.scale.y = scale[1]
self.marker.scale.z = scale[2]
self.setColor(color, alpha)
self.marker.lifetime = rospy.Duration()
def setColor(self, color, alpha=1.0):
self.marker.color.r = color[0]
self.marker.color.g = color[1]
self.marker.color.b = color[2]
self.marker.color.a = alpha
def position(self, T):
"""
Info: set position (4x4 NumPy homogeneous matrix) for the ball and publish it
"""
self.marker.pose.position.x = T[0,3]
self.marker.pose.position.y = T[1,3]
self.marker.pose.position.z = T[2,3]
#self.publish()
def xyz(self, position):
"""
Info: set position (list) for the ball and publish it
"""
self.marker.pose.position.x = position[0]
self.marker.pose.position.y = position[1]
self.marker.pose.position.z = position[2]
#self.publish()
def rotation(self, quat):
self.marker.pose.orientation.w = quat[0]
self.marker.pose.orientation.x = quat[1]
self.marker.pose.orientation.y = quat[2]
self.marker.pose.orientation.z = quat[3]
def publish(self):
self.marker_pub.publish(self.marker)
class FrameMarker(object):
"""
@info: class to visualize a frame aixs in Rviz
@inputs:
--------
- xyz_pos: Cartesian position of the the axis
- alpha: marker transparency (0: solid color and 1: transparent)
"""
def __init__(self, xyz_pos=[0,0,0], alpha=0.5):
self.z_arrow = ArrowMarker(color['BLUE'], scale=[0.1, 0.015, 0.015], alpha=alpha)
self.z_arrow.xyz(xyz_pos)
self.Rz = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])
self.z_arrow.rotation(rot2quat(self.Rz))
self.x_arrow = ArrowMarker(color['RED'], scale=[0.1, 0.015, 0.015], alpha=alpha)
self.x_arrow.xyz(xyz_pos)
self.Rx = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.x_arrow.rotation(rot2quat(self.Rx))
self.y_arrow = ArrowMarker(color['GREEN'], scale=[0.1, 0.015, 0.015], alpha=alpha)
self.y_arrow.xyz(xyz_pos)
self.Ry = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
self.y_arrow.rotation(rot2quat(self.Ry))
def rotation(self, R):
"""
@info rotation of the frame axis
@inputs:
-------
- R: rotation matrix
"""
self.x_arrow.rotation(rot2quat(np.dot(R, self.Rx)))
self.y_arrow.rotation(rot2quat(np.dot(R, self.Ry)))
self.z_arrow.rotation(rot2quat(np.dot(R, self.Rz)))
def xyz(self, xyz_pos):
self.x_arrow.xyz(xyz_pos)
self.y_arrow.xyz(xyz_pos)
self.z_arrow.xyz(xyz_pos)
def publish(self):
"""
@info publish the information of the marker
"""
self.z_arrow.publish()
self.x_arrow.publish()
self.y_arrow.publish()
``` |
{
"source": "jhonruda25/VentasCuentas-Telegram-Bot",
"score": 2
} |
#### File: jhonruda25/VentasCuentas-Telegram-Bot/bot.py
```python
from telegram.ext import Updater, CommandHandler
def start(update, context):
update.message.reply_text('Hola, humano!')
``` |
{
"source": "JhonSaguay/Observatorioapi",
"score": 2
} |
#### File: database/apis/api_msql.py
```python
import requests
import json
import time
import mysql.connector
from mysql.connector import Error
def saveindicador(conexion):
cur=conexion.cursor()
sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type,sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount FROM apidata group by JSON_EXTRACT(datosjson,'$.internal_type');"
cur.execute(sqlconsulta)
data = cur.fetchall()
print(data)
def savedatabasemsql(conexion,my_dict):
cur = conexion.cursor()
cont=0
for dato in my_dict:
json_string=(json.dumps(dato))
json_string2=json.loads(json_string)
json_final=json.dumps(json_string2,ensure_ascii=False)
sql1="insert into apidata(datosjson,categoria) values ('"+json_final+"','funcion_publica_presupuesto_1')"
try:
cur.execute(sql1)
except:
cont+=1
# print('entro')
# print(sql1)
# print(dato['description'])
if cont>0:
print("Errores: ",cont)
def consultarapicomprasmsql(apiurl,conexion):
my_dict={'data':['prueba']}
cont=1
while len(my_dict['data'])>0:
entry_url=apiurl+str(cont)
try:
r = requests.get(entry_url)
my_dict = r.json()
if len(my_dict['data'])==0:
continue
savedatabasemsql(conexion,my_dict['data'])
conexion.commit()
print('entro: '+str(cont))
cont+=1
except:
print("Ha ocurrido un error")
time.sleep(5)
apiurl = "https://datosabiertos.compraspublicas.gob.ec/PLATAFORMA/api/search_ocds?year=2021&search=&page="
try:
connection = mysql.connector.connect(host='localhost',
database='ofpindicadores',
user='ofpuser',
password='<PASSWORD>@!')
if connection.is_connected():
consultarapicomprasmsql(apiurl,connection)
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if connection.is_connected():
connection.close()
print("MySQL connection is closed")
```
#### File: database/apis/api_save_indicador.py
```python
import requests
import json
import time
import mysql.connector
from mysql.connector import Error
#indicador Porcentaje de procedimientos que utilizaron adjudicación directa o licitación privada
def saveindicadorprocedimientos(conexion,apiurl):
print('entro')
cur=conexion.cursor()
# sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type,sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount FROM apidata group by JSON_EXTRACT(datosjson,'$.internal_type');"
sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type, sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount, sum(JSON_EXTRACT(datosjson,'$.amount'))/(select sum(JSON_EXTRACT(datosjson,'$.amount')) from ofpindicadores.apidata) as percentage from ofpindicadores.apidata where JSON_UNQUOTE(JSON_EXTRACT(datosjson,'$.internal_type')) like '%Contratacion directa%' or JSON_UNQUOTE(JSON_EXTRACT(datosjson,'$.internal_type')) like '%Licitaci%' group by JSON_EXTRACT(datosjson,'$.internal_type') ;"
cur.execute(sqlconsulta)
data = cur.fetchall()
#convertir lista en json
cabeceras=('internal_type','total_amount','percentage')
lista_datos=[]
valores=[]
for element in data:
valores=[element[0].replace('"',''),element[1],element[2]]
dict_from_list = dict(zip(cabeceras, valores))
lista_datos.append(dict_from_list)
##update indicadores
sqlupdate="update indicadores set active=0 where categoria='funcion_publica_presupuesto_3'"
cur=conexion.cursor()
cur.execute(sqlupdate)
cur.close
#guardar data
datos={"en":lista_datos}
json_datos=json.dumps(datos)
sql1="insert into indicadores(nombre,categoria,descripcion,temporalidad,proveedor_dato,direccion_api,tipo,active,is_original_data,tipo_grafica,nivel_apertura,variable_1,variable_2,variable_medida,datos_indicador) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
# json_string='"'+ json_string +'"'
val=('Porcentaje de procedimientos que utilizaron adjudicación directa o licitación privada','funcion_publica_presupuesto_3','descripcion',
'temporalidad','proveedor_dato',apiurl,0,1,0,'pastel','3','internal_type','percentage','conteo',json_datos)
cur=conexion.cursor()
cur.execute(sql1,val)
conexion.commit()
cur.close()
#indicador Índice de concentración de las 4 empresas con más procedimientos ganados
def saveindicadorconcentracion4(conexion,apiurl):
print('entro')
cur=conexion.cursor()
# sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type,sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount FROM apidata group by JSON_EXTRACT(datosjson,'$.internal_type');"
sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.buyer') as buyer, sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount FROM ofpindicadores.apidata group by JSON_EXTRACT(datosjson,'$.buyer') order by total_amount desc limit 4;"
cur.execute(sqlconsulta)
data = cur.fetchall()
#convertir lista en json
cabeceras=('buyer','total_amount')
lista_datos=[]
valores=[]
for element in data:
valores=[element[0].replace('"',''),element[1]]
dict_from_list = dict(zip(cabeceras, valores))
lista_datos.append(dict_from_list)
##update indicadores
sqlupdate="update indicadores set active=0 where categoria='funcion_publica_presupuesto_2'"
cur=conexion.cursor()
cur.execute(sqlupdate)
cur.close
#guardar data
datos={"en":lista_datos}
json_datos=json.dumps(datos)
sql1="insert into indicadores(nombre,categoria,descripcion,temporalidad,proveedor_dato,direccion_api,tipo,active,is_original_data,tipo_grafica,nivel_apertura,variable_1,variable_2,variable_medida,datos_indicador) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
# json_string='"'+ json_string +'"'
val=('Indice de concentracion de las 4 empresas con mas procedimientos ganados','funcion_publica_presupuesto_2','descripcion',
'temporalidad','proveedor_dato',apiurl,0,1,0,'pastel','3','buyer','total_amount','conteo',json_datos)
cur=conexion.cursor()
cur.execute(sql1,val)
conexion.commit()
cur.close()
#indicador Cambio en el porcentaje de contratos publicados por procedimiento
def saveindicador(conexion,apiurl):
print('entro')
cur=conexion.cursor()
# sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type,sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount FROM apidata group by JSON_EXTRACT(datosjson,'$.internal_type');"
sqlconsulta="SELECT JSON_EXTRACT(datosjson,'$.internal_type') as internal_type, sum(JSON_EXTRACT(datosjson,'$.amount')) as total_amount, sum(JSON_EXTRACT(datosjson,'$.amount'))/(select sum(JSON_EXTRACT(datosjson,'$.amount')) from ofpindicadores.apidata) as percentage from ofpindicadores.apidata group by JSON_EXTRACT(datosjson,'$.internal_type');"
# sqlconsulta='''SELECT CONCAT('[', better_result, ']') AS best_result FROM(
# SELECT GROUP_CONCAT('{', my_json, '}' SEPARATOR ',') AS better_result FROM(
# SELECT CONCAT(
# '"internal_type":','"',JSON_UNQUOTE(JSON_EXTRACT(datosjson,'$.internal_type')) , '"',','
# '"total_amount":', sum(JSON_EXTRACT(datosjson,'$.amount')) or 0
# ) AS my_json
# FROM ofpindicadores.apidata group by JSON_EXTRACT(datosjson,'$.internal_type')
# ) AS more_json
# ) AS yet_more_json;'''
cur.execute(sqlconsulta)
data = cur.fetchall()
#convertir lista en json
cabeceras=('internal_type','total_amount','percentage')
lista_datos=[]
valores=[]
for element in data:
valores=[element[0].replace('"',''),element[1],element[2]]
dict_from_list = dict(zip(cabeceras, valores))
lista_datos.append(dict_from_list)
##update indicadores
sqlupdate="update indicadores set active=0 where categoria='funcion_publica_presupuesto_1'"
cur=conexion.cursor()
cur.execute(sqlupdate)
cur.close
#guardar data
datos={"en":lista_datos}
json_datos=json.dumps(datos)
sql1="insert into indicadores(nombre,categoria,descripcion,temporalidad,proveedor_dato,direccion_api,tipo,active,is_original_data,tipo_grafica,nivel_apertura,variable_1,variable_2,variable_medida,datos_indicador) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
# json_string='"'+ json_string +'"'
val=('Cambio en el porcentaje de contratos publicados por procedimiento','funcion_publica_presupuesto_1','descripcion',
'temporalidad','proveedor_dato',apiurl,0,1,0,'pastel','3','internal_type','percentage','conteo',json_datos)
cur=conexion.cursor()
cur.execute(sql1,val)
conexion.commit()
cur.close()
def savedatabasemsql(conexion,my_dict):
cur = conexion.cursor()
for dato in my_dict:
json_string=(json.dumps(dato))
sql1="insert into apidata(datosjson) values ('"+json_string+"')"
try:
cur.execute(sql1)
except:
print('entro')
print(sql1)
print(dato['description'])
def consultarapicomprasmsql(apiurl,conexion):
my_dict={'data':['prueba']}
cont=1
while len(my_dict['data'])>0:
entry_url=apiurl+str(cont)
print(entry_url)
try:
r = requests.get(entry_url)
my_dict = r.json()
if len(my_dict['data'])==0:
continue
savedatabasemsql(conexion,my_dict['data'])
conexion.commit()
print('entro: '+str(cont))
cont+=1
except:
print("Ha ocurrido un error")
time.sleep(5)
apiurl = "https://datosabiertos.compraspublicas.gob.ec/PLATAFORMA/api/search_ocds?year=2021&search=&page="
try:
connection = mysql.connector.connect(host='localhost',
database='ofpindicadores',
user='ofpuser',
password='<PASSWORD>@!')
if connection.is_connected():
saveindicador(connection,apiurl)
saveindicadorconcentracion4(connection,apiurl)
saveindicadorprocedimientos(connection,apiurl)
# consultarapicomprasmsql(apiurl,connection)
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if connection.is_connected():
connection.close()
print("MySQL connection is closed")
``` |
{
"source": "jhonsbg/currency-converter",
"score": 4
} |
#### File: jhonsbg/currency-converter/currency.py
```python
def calculo_conversion(tipo_pesos, valor_dolar):
pesos = input("¿Cuantos pesos " + tipo_pesos + " tiene? ")
pesos = float(pesos)
dolares = pesos / valor_dolar
dolares = round(dolares, 2)
dolares = str(dolares)
print("Tienes $" + dolares + " dólares")
def run():
menu = """
Bienvenido al conversor de moneda 💰💵
Elige tu opción
1 - Pesos Colombianos
2 - Pesos Argentinos
3 - Pesos Mexicanos
"""
option = input(menu)
if option == "1":
calculo_conversion("colombianos", 3875)
elif option == "2":
calculo_conversion("argentinos", 65)
elif option == "3":
calculo_conversion("mexicanos", 24)
else:
print("Ingresa una opción valida")
if __name__ == '__main__':
run()
``` |
{
"source": "jhonsnow456/Runn",
"score": 3
} |
#### File: games/Bounce/main.py
```python
import os
import pygame
from player import Ball
from world import World, load_level
from texts import Text, Message
from button import Button, LevelButton
pygame.init()
WIDTH, HEIGHT = 192, 212
win = pygame.display.set_mode((WIDTH, HEIGHT), pygame.NOFRAME)
pygame.display.set_caption('Bounce')
clock = pygame.time.Clock()
FPS = 30
# GAME VARIABLES **************************************************************
ROWS = 12
MAX_COLS = 150
TILE_SIZE = 16
MAX_LEVEL = 8
# COLORS **********************************************************************
BLUE = (175, 207, 240)
BLUE2 = (0, 0, 255)
WHITE = (255, 255, 255)
# FONTS ***********************************************************************
health_font = "Fonts/ARCADECLASSIC.TTF"
level_text = Text(health_font, 24)
health_text = Message(40, WIDTH + 10, 19, "x3", health_font, WHITE, win)
select_level_text = Message(WIDTH//2, 20, 24, "Select Level", health_font, BLUE2, win)
current_level_text = Message(WIDTH - 40, WIDTH + 10, 20, "Level 1", health_font, WHITE, win)
you_win = Message(WIDTH //2, HEIGHT//2, 40, "You Win", health_font, BLUE2, win)
# SOUNDS **********************************************************************
click_fx = pygame.mixer.Sound('Sounds/click.mp3')
life_fx = pygame.mixer.Sound('Sounds/gate.mp3')
checkpoint_fx = pygame.mixer.Sound('Sounds/checkpoint.mp3')
pygame.mixer.music.load('Sounds/track1.wav')
pygame.mixer.music.play(loops=-1)
pygame.mixer.music.set_volume(0.4)
# LOADING IMAGES **************************************************************
ball_image = pygame.image.load('Assets/ball.png')
splash_img = pygame.transform.scale(pygame.image.load('Assets/splash_logo.png'),
(2*WIDTH, HEIGHT))
bounce_img = pygame.image.load('Assets/menu_logo.png')
game_lost_img = pygame.image.load('Assets/lose.png')
game_lost_img = pygame.transform.scale(game_lost_img, (WIDTH//2, 80))
level_locked_img = pygame.image.load('Assets/level_locked.png')
level_locked_img = pygame.transform.scale(level_locked_img, (40, 40))
level_unlocked_img = pygame.image.load('Assets/level_unlocked.png')
level_unlocked_img = pygame.transform.scale(level_unlocked_img, (40, 40))
play_img = pygame.image.load('Assets/play.png')
restart_img = pygame.image.load('Assets/restart.png')
menu_img = pygame.image.load('Assets/menu.png')
sound_on_img = pygame.image.load('Assets/SoundOnBtn.png')
sound_off_img = pygame.image.load('Assets/SoundOffBtn.png')
game_won_img = pygame.image.load('Assets/game won.png')
# BUTTONS *********************************************************************
play_btn = Button(play_img, False, 45, 130)
sound_btn = Button(sound_on_img, False, 45, 170)
restart_btn = Button(restart_img, False, 45, 130)
menu_btn = Button(menu_img, False, 45, 170)
# LEVEL TEXT & BUTTONS ********************************************************
level_btns = []
for level in range(MAX_LEVEL):
text = level_text.render(f'{level+1}', (255, 255, 255))
r = level // 3
c = level % 3
btn = LevelButton(level_locked_img, (40, 40), 20 + c * 55, 50 + r * 55, text)
level_btns.append(btn)
# GROUPS **********************************************************************
spikes_group = pygame.sprite.Group()
inflator_group = pygame.sprite.Group()
deflator_group = pygame.sprite.Group()
enemy_group = pygame.sprite.Group()
exit_group = pygame.sprite.Group()
checkpoint_group = pygame.sprite.Group()
health_group = pygame.sprite.Group()
objects_groups = [spikes_group, inflator_group, deflator_group, enemy_group, exit_group,
checkpoint_group, health_group]
collision_groups = [inflator_group, deflator_group]
# RESET ***********************************************************************
def reset_level_data(level):
for group in objects_groups:
group.empty()
# LOAD LEVEL WORLD
world_data, level_length = load_level(level)
w = World(objects_groups)
w.generate_world(world_data, win)
return world_data, level_length, w
def reset_player_data(level):
if level == 1:
x, y = 64, 50
if level == 2:
x, y = 65, 50
if level == 3:
x, y = 64, 50
if level == 4:
x, y = 63, 50
if level == 5:
x, y = 64, 50
if level == 6:
x, y = 48, 50
if level == 7:
x, y = 78, 80
if level == 8:
x, y = 112,100
p = Ball(x, y)
moving_left = False
moving_right = False
return p, moving_left, moving_right
# VARIABLES *******************************************************************
moving_left = False
moving_right = False
SCROLL_THRES = 80
screen_scroll = 0
level_scroll = 0
level = 1
next_level = False
reset_level = False
checkpoint = None
health = 3
splash_count = 0
sound_on = True
logo_page = True
home_page = False
level_page = False
game_page = False
restart_page = False
win_page = False
running = True
while running:
win.fill(BLUE)
pygame.draw.rect(win, (255, 255,255), (0, 0, WIDTH, HEIGHT), 1, border_radius=5)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE or \
event.key == pygame.K_q:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
moving_left = True
if event.key == pygame.K_RIGHT:
moving_right = True
if event.key == pygame.K_UP:
if not p.jump:
p.jump = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
moving_left = False
if event.key == pygame.K_RIGHT:
moving_right = False
if logo_page:
win.blit(splash_img, (-100,0))
splash_count += 1
if splash_count % 50 == 0:
logo_page = False
home_page = True
if home_page:
win.blit(bounce_img, (10,10))
if play_btn.draw(win):
click_fx.play()
home_page = False
level_page = True
if sound_btn.draw(win):
click_fx.play()
sound_on = not sound_on
if sound_on:
sound_btn.update_image(sound_on_img)
pygame.mixer.music.play(loops=-1)
else:
sound_btn.update_image(sound_off_img)
pygame.mixer.music.stop()
if level_page:
select_level_text.update(shadow=False)
for index, btn in enumerate(level_btns):
if index < level:
if not btn.unlocked:
btn.unlocked = True
btn.update_image(level_unlocked_img)
if btn.draw(win):
if index < level:
click_fx.play()
level_page = False
game_page = True
level = index + 1
screen_scroll = 0
level_scroll = 0
health = 3
world_data, level_length, w = reset_level_data(level)
p, moving_left, moving_right = reset_player_data(level)
if restart_page:
win.blit(game_lost_img, (45,20))
if restart_btn.draw(win):
click_fx.play()
world_data, level_length, w = reset_level_data(level)
p, moving_left, moving_right = reset_player_data(level)
level_scroll = 0
screen_scroll = 0
health = 3
checkpoint = None
restart_page = False
game_page = True
if menu_btn.draw(win):
click_fx.play()
home_page = True
restart_page = False
if win_page:
win.blit(game_won_img, (45, 20))
you_win.update()
if menu_btn.draw(win):
click_fx.play()
home_page = True
win_page = False
restart_page = False
if game_page:
w.update(screen_scroll)
w.draw(win)
spikes_group.update(screen_scroll)
spikes_group.draw(win)
health_group.update(screen_scroll)
health_group.draw(win)
inflator_group.update(screen_scroll)
inflator_group.draw(win)
deflator_group.update(screen_scroll)
deflator_group.draw(win)
exit_group.update(screen_scroll)
exit_group.draw(win)
checkpoint_group.update(screen_scroll)
checkpoint_group.draw(win)
enemy_group.update(screen_scroll)
enemy_group.draw(win)
screen_scroll = 0
p.update(moving_left, moving_right, w, collision_groups)
p.draw(win)
if ((p.rect.right >= WIDTH - SCROLL_THRES) and level_scroll < (level_length * 16) - WIDTH) \
or ((p.rect.left <= SCROLL_THRES) and level_scroll > 0):
dx = p.dx
p.rect.x -= dx
screen_scroll = -dx
level_scroll += dx
if len(exit_group) > 0:
exit = exit_group.sprites()[0]
if not exit.open:
if abs(p.rect.x - exit.rect.x) <= 80 and len(health_group) == 0:
exit.open = True
if p.rect.colliderect(exit.rect) and exit.index == 11:
checkpoint = None
checkpoint_fx.play()
level += 1
if level < MAX_LEVEL:
checkpoint = False
reset_level = True
next_level = True
else:
checkpoint = None
win_page = True
cp = pygame.sprite.spritecollide(p, checkpoint_group, False)
if cp:
checkpoint = cp[0]
if not checkpoint.catched:
checkpoint_fx.play()
checkpoint.catched = True
checkpoint_pos = p.rect.center
checkpoint_screen_scroll = screen_scroll
checkpoint_level_scroll = level_scroll
if pygame.sprite.spritecollide(p, spikes_group, False):
reset_level = True
if pygame.sprite.spritecollide(p, health_group, True):
health += 1
life_fx.play()
if pygame.sprite.spritecollide(p, enemy_group, False):
reset_level = True
if reset_level:
if health > 0:
if next_level:
world_data, level_length, w = reset_level_data(level)
p, moving_left, moving_right = reset_player_data(level)
level_scroll = 0
health = 3
checkpoint = None
next_level = False
elif checkpoint:
checkpoint_dx = level_scroll - checkpoint_level_scroll
w.update(checkpoint_dx)
for group in objects_groups:
group.update(checkpoint_dx)
p.rect.center = checkpoint_pos
level_scroll = checkpoint_level_scroll
else:
w.update(level_scroll)
for group in objects_groups:
group.update(level_scroll)
p, moving_left, moving_right = reset_player_data(level)
level_scroll = 0
screen_scroll = 0
reset_level = False
health -= 1
else:
restart_page = True
game_page = False
reset_level = False
# Drawing info bar
pygame.draw.rect(win, (25, 25, 25), (0, HEIGHT-20, WIDTH, 20))
pygame.draw.rect(win, (255, 255,255), (0, 0, WIDTH, WIDTH), 2, border_radius=5)
win.blit(ball_image, (5, WIDTH + 2))
health_text.update(f'x{health}', shadow=False)
current_level_text.update(f'Level {level}', shadow=False)
clock.tick(FPS)
pygame.display.update()
pygame.quit()
```
#### File: games/SpaceInvaders/Pygame_original.py
```python
import pygame
import random
import math
from pygame import mixer
#Initializing pygame
pygame.init()
#initializing the screen
screen = pygame.display.set_mode((800,600))
#Titile & Icon
pygame.display.set_caption("Space Invaders")
icon = pygame.image.load("alien.png")
pygame.display.set_icon(icon)
#score
score_value = 0
font = pygame.font.Font('freesansbold.ttf',45)
textX = 300
textY = 20
game_over = pygame.font.Font('freesansbold.ttf',65)
def display_font(x,y):
score = font.render("Score:" + str(score_value),True,(255,255,255))
screen.blit(score,(x,y))
def display_gameover():
over = game_over.render("Game Over",True,(255,255,255))
screen.blit(over,(250,280))
#Background
background = pygame.image.load("background.png")
#Background sound
#mixer.music.load("background.wav")
#mixer.music.play(-1)
#player
playerimg = pygame.image.load("space-invaders.png")
playerimg = pygame.transform.scale(playerimg, (45, 45))
playerX = 350
playerY = 500
playerX_change = 0
def player(x,y):
screen.blit(playerimg,(x,y))
#Enemy
enemyimg = []
enemyimgT = []
enemyX = []
enemyY = []
enemyX_change = []
enemyY_change = []
for i in range(0,6):
enemyimg.append(pygame.image.load("alien.png"))
enemyimgT.append(pygame.transform.scale(enemyimg[i], (45, 45)))
enemyX.append(random.randint(0,755))
enemyY.append(random.randint(50,200))
enemyX_change.append(4)
enemyY_change.append(45)
def enemy(x,y,i):
screen.blit(enemyimgT[i],(x,y))
#Bullet
bulletimg = pygame.image.load("bullet.png")
bulletimg = pygame.transform.scale(bulletimg, (35, 35))
bulletX = 0
bulletY = 480
bulletX_change = 0
bulletY_change = 10
bullet_state = "ready"
def fire_bullet(x,y):
global bullet_state
bullet_state = 'fire'
screen.blit(bulletimg,(x+5,y+16))
def Collusion(aX,aY,bX,bY):
distance = math.sqrt(math.pow((aX-bX),2)+math.pow((aY-bY),2))
if distance <= 25:
return True
else:
return False
while True:
screen.fill((0,0,0))
screen.blit(background,(0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
playerX_change = -5
if event.key == pygame.K_RIGHT:
playerX_change = 5
if event.key == pygame.K_SPACE:
if bullet_state is "ready":
bulletX = playerX
fire_bullet(bulletX,bulletY)
mixer.music.load("laser.wav")
mixer.music.play()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerX_change = 0
#Player Boundry & Movement
if playerX <= 0:
playerX = 0
elif playerX >= 750:
playerX = 750
playerX += playerX_change
#Bullet Movement
if bullet_state is "fire":
fire_bullet(bulletX,bulletY)
bulletY -= bulletY_change
if bulletY <= 0:
bullet_state = "ready"
bulletY = 480
#Enemy Boundary & Movement
for i in range(0,6):
if enemyY[i]>=480:
for j in range(0,6):
enemyY[j]= 2000
display_gameover()
break
enemyX[i] += enemyX_change[i]
if enemyX[i] <= 0:
enemyX_change[i] = 2
enemyY[i] += enemyY_change[i]
elif enemyX[i] >= 750:
enemyX_change[i] = -2
enemyY[i] += enemyY_change[0]
colide = Collusion(enemyX[i],enemyY[i],bulletX,bulletY)
enemy(enemyX[i],enemyY[i],i)
if colide:
bullet_state = "ready"
bulletY = 480
enemyX[i] = random.randint(0,755)
enemyY[i]= random.randint(50,200)
score_value +=1
mixer.music.load("explosion.wav")
mixer.music.play()
display_font(textX,textY)
player(playerX,playerY)
pygame.display.update()
``` |
{
"source": "jhonsome/python",
"score": 3
} |
#### File: python/projetos/desenhar linhas.py
```python
import pygame
from pygame.locals import *
pygame.init()
TELA = pygame.display.set_mode((pygame.display.Info().current_w, pygame.display.Info().current_h), pygame.FULLSCREEN | pygame.SCALED)
FPS = pygame.time.Clock()
def _início():
pontos = list()
larguraLinha = 1
corLinha = (255, 255, 255)
while True:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
return None
elif evento.type == pygame.MOUSEBUTTONDOWN:
pontos.append(evento.pos)
TELA.fill((20, 20, 20))
if len(pontos) > 1:
for p in range(len(pontos) - 1):
pygame.draw.line(TELA, corLinha, pontos[p], pontos[p + 1], larguraLinha)
pygame.display.flip()
FPS.tick(60)
if __name__ == "__main__":
_início()
pygame.quit()
exit()
```
#### File: projetos/Jogo da Velha/jogo da velha.py
```python
import pygame
from pygame.locals import *
from random import randint
pygame.init()
LARGURATELA, ALTURATELA = pygame.display.Info().current_w, pygame.display.Info().current_h
TELA = pygame.display.set_mode((LARGURATELA, ALTURATELA), pygame.FULLSCREEN | pygame.SCALED)
FONTE = pygame.font.SysFont("arial", int(15 * TELA.get_width() / 100))
FPS = pygame.time.Clock()
MÍDIAS = (
"mídias/imagens/fundo.png",
"mídias/imagens/tile.png",
"mídias/imagens/botão fundo.png",
"mídias/imagens/placar.png",
)
IMAGENS = dict()
SONS = dict()
RETÂNGULOS = dict()
def Porcentagem(p, v):
"""
Retorna a porcentagem de um valor
p: porcento
v: valor
"""
return p * v / 100
def CarregarImg(img, resolução):
"""
Retorna uma pygame.Surface redimensionada
img: string com o caminho do arquivo de imagem
resolução: um iterável com os valores de largura e altura
"""
resolução = (int(resolução[0]), int(resolução[1]))
return pygame.transform.smoothscale(pygame.image.load(img), resolução).convert_alpha()
def CarregarTxt(txt, resolução, cor = (255, 255, 255)):
"""
Retorna uma pygame.Surface redimensionada com um texto
txt: string com um texto qualquer
resolução: um iterável com os valores de largura e altura
cor: cor do texto
"""
resolução = (int(resolução[0]), int(resolução[1]))
return pygame.transform.smoothscale(FONTE.render(txt, True, cor), resolução).convert_alpha()
def _Jogo():
jogadorAtual = randint(0, 1)
tabuleiro = [[None for n in range(3)] for n in range(3)]
jogador1X = margem * 2
jogador1Y = margem * 2
jogador1Pontos = 0
jogador2X = Porcentagem(60, LARGURATELA)
jogador2Y = margem * 2
jogador2Pontos = 0
jogadorAtualX = margem
jogadorAtualY = (tileTam + margem) * 3 + margem + (Porcentagem(10, LARGURATELA) + margem)
IMAGENS["jogador atual"] = CarregarTxt(f"Vez do jogador {'X' if jogadorAtual == 0 else 'Y'}", (Porcentagem(100, LARGURATELA), Porcentagem(15, ALTURATELA)))
while True:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
pygame.quit()
exit()
elif evento.type == pygame.MOUSEBUTTONDOWN:
for l in range(len(RETÂNGULOS["tiles"])):
for a in range(len(RETÂNGULOS["tiles"][l])):
if RETÂNGULOS["tiles"][l][a].collidepoint(evento.pos):
if smbLista[l][a] == None:
tebuleiro[l][a] = jogadorAtual
jogadorAtual = 0 if smbAtual == 1 else 1
TELA.blit(IMAGENS["fundo"], (0, 0))
TELA.blit(IMAGENS["placar"], (margem, margem))
TELA.blit(IMAGENS["placar X"], (jogador1X, jogador1Y))
TELA.blit(IMAGENS["placar O"], (jogador2X, jogador2Y))
for l in range(len(RETÂNGULOS["tiles"])):
for a in range(len(RETÂNGULOS["tiles"][l])):
TELA.blit(IMAGENS["tile"], RETÂNGULOS["tiles"][l][a])
if tabuleiro[l][a] != None:
TELA.blit(smbLista[l][a], RETÂNGULOS["tiles"][l][a])
TELA.blit(IMAGENS["jogador atual"], (jogadorAtualX, jogadorAtualY))
pygame.display.update()
FPS.tick(fps)
def _FimJogo(pontuação):
pass
def _Início():
global fps, margem, tileTam
fps = 30
tileTam = Porcentagem(33.3, LARGURATELA)
margem = Porcentagem(1, LARGURATELA)
botãoJogarX = Porcentagem(25, LARGURATELA) + margem
botãoJogarY = Porcentagem(40, ALTURATELA) + margem
botãoSairX = Porcentagem(25, LARGURATELA) + margem
botãoSairY = Porcentagem(50, ALTURATELA) + margem
IMAGENS["fundo"] = CarregarImg(MÍDIAS[0], (LARGURATELA, ALTURATELA))
IMAGENS["botão fundo"] = CarregarImg(MÍDIAS[2], (Porcentagem(50, LARGURATELA) - margem * 2, Porcentagem(10, ALTURATELA) - margem * 2))
IMAGENS["botão jogar"] = CarregarTxt("Jogar", (Porcentagem(50, LARGURATELA) - margem * 2, Porcentagem(10, ALTURATELA) - margem * 2))
IMAGENS["botão sair"] = CarregarTxt("Sair", (Porcentagem(50, LARGURATELA) - margem * 2, Porcentagem(10, ALTURATELA) - margem * 2))
IMAGENS["tile"] = CarregarImg(MÍDIAS[1], (tileTam - margem * 2, tileTam - margem * 2))
IMAGENS["placar"] = CarregarImg(MÍDIAS[3], (Porcentagem(100, LARGURATELA) - margem * 2, Porcentagem(10, ALTURATELA) - margem * 2))
IMAGENS["x"] = CarregarTxt("X", (tileTam - margem * 2, tileTam - margem * 2))
IMAGENS["o"] = CarregarTxt("O", (tileTam - margem * 2, tileTam - margem * 2))
IMAGENS["placar X"] = CarregarTxt("p: 0 - X", (Porcentagem(40, LARGURATELA) - margem * 2, Porcentagem(10, ALTURATELA) - margem * 2))
IMAGENS["placar O"] = CarregarTxt(f"O - p: 0", (Porcentagem(40, LARGURATELA) - margem * 2, Porcentagem(10, ALTURATELA) - margem * 2))
RETÂNGULOS["tiles"] = [[pygame.Rect(tileTam * l + margem, Porcentagem(10, ALTURATELA) + tileTam * a + margem, tileTam - margem * 2, tileTam - margem * 2) for a in range(3)] for l in range(3)]
RETÂNGULOS["botão jogar"] = pygame.Rect((botãoJogarX, botãoJogarY), IMAGENS["botão jogar"].get_size())
RETÂNGULOS["botão sair"] = pygame.Rect((botãoSairX, botãoSairY), IMAGENS["botão sair"].get_size())
while True:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
pygame.quit()
exit()
elif evento.type == pygame.MOUSEBUTTONDOWN:
if RETÂNGULOS["botão jogar"].collidepoint(evento.pos):
pontos = _Jogo()
elif RETÂNGULOS["botão sair"].collidepoint(evento.pos):
pygame.quit()
exit()
TELA.blit(IMAGENS["fundo"], (0, 0))
TELA.blit(IMAGENS["botão fundo"], RETÂNGULOS["botão jogar"])
TELA.blit(IMAGENS["botão fundo"], RETÂNGULOS["botão sair"])
TELA.blit(IMAGENS["botão jogar"], RETÂNGULOS["botão jogar"])
TELA.blit(IMAGENS["botão sair"], RETÂNGULOS["botão sair"])
pygame.display.update()
FPS.tick(fps)
if __name__ == "__main__":
_Início()
```
#### File: python/projetos/jogo.py
```python
import pygame, sys
from pygame.locals import *
from random import randint
pygame.init()
tela = pygame.display.set_mode((400, 700), pygame.FULLSCREEN | pygame.SCALED)
telaLargura, telaAltura = tela.get_width(), tela.get_height()
fps = pygame.time.Clock()
class Imagem(object):
def __init__(self, posX, posY, largura, altura, imagem):
if type(imagem) == pygame.Surface:
self.img = pygame.Surface((largura, altura))
self.S = True
else:
self.imgOriginal = pygame.image.load(imagem)
self.img = pygame.transform.smoothscale(self.imgOriginal, (largura, altura))
self.S = False
self.ret = pygame.Rect(posX, posY, largura, altura)
self.pos = (posX, posY)
self.tam = (largura, altura)
def exibirImagem(self, superfície, RGBA = None):
if self.S:
if RGBA != None:
self.img.fill(RGBA)
else:
self.img.fill((20, 20, 20))
superfície.blit(self.img, self.ret)
def moverImagem(self, X, Y):
self.ret = self.ret.move(X, Y)
def redimensionar(self, posX, posY, largura, altura):
self.img = pygame.transform.smoothscale(self.imgOriginal, (largura, altura))
self.ret = pygame.Rect(posX, posY, largura, altura)
def retornarRet(self):
return self.ret
class Botão(object):
def __init__(self, posX, posY, largura, altura, botãoLivre = None, botãoPressionado = None, som = None):
if botãoLivre == None:
botão01 = None
elif type(botãoLivre) == pygame.Surface:
botão01 = pygame.Surface((largura, altura))
self.S1 = True
else:
botão01 = pygame.transform.smoothscale(pygame.image.load(botãoLivre), (int(largura), int(altura)))
if botãoPressionado == None:
botão02 = None
elif type(botãoPressionado) == pygame.Surface:
botão02 = pygame.Surface((largura, altura))
self.S2 = True
else:
botão02 = pygame.transform.smoothscale(pygame.image.load(botãoPressionado), (int(largura), int(altura)))
if som != None:
som = pygame.mixer.Sound(som)
self.som = som
self.img = (botão01, botão02)
self.b = 0
self.ret = pygame.Rect(posX, posY, largura, altura)
def exibirBotao(self, superfície, RGBA = None):
if self.img[0] != None:
if self.b == 0:
if self.S1:
if RGBA != None:
self.img[0].fill(RGBA)
else:
self.img[0].fill((20, 20, 20))
superfície.blit(self.img[0], self.ret)
if self.img[1] != None:
if self.b == 1:
if self.S2:
if RGBA != None:
self.img[1].fill(RGBA)
else:
self.img[1].fill((20, 20, 20))
superfície.blit(self.img[1], self.ret)
def Clique(self, eventos, tipo = 0):
if eventos.type == pygame.MOUSEBUTTONDOWN:
t1 = pygame.mouse.get_pos()
if t1[0] > self.ret.left and t1[0] < self.ret.right and t1[1] > self.ret.top and t1[1] < self.ret.bottom:
self.t = True
self.b = 1
if tipo == 0:
return True
if tipo == 1:
return False
else:
self.t = False
if eventos.type == pygame.MOUSEBUTTONUP:
try:
if self.t == True:
self.t = False
self.b = 0
t2 = pygame.mouse.get_pos()
if t2[0] > self.ret.left and t2[0] < self.ret.right and t2[1] > self.ret.top and t2[1] < self.ret.bottom:
if self.som != None:
self.som.play(maxtime = 1000)
self.som.fadeout(2000)
if tipo == 0:
return False
if tipo == 1:
return True
except AttributeError:
self.t = False
def intro():
loop = True
fonte = pygame.font.SysFont("Arial", 22)
texto1 = fonte.render(" PONG", 1, (200, 200, 200))
texto2 = fonte.render("Não encoste a raquete nos", 1, (200, 200, 200))
texto3 = fonte.render(" cantos da parede.", 1, (200, 200, 200))
texto4 = fonte.render("Não deixe o quadrado cair.", 1, (200, 200, 200))
texto5 = fonte.render("boa sorte!", 1, (200, 200, 200))
textos = [texto1, texto2, texto3, texto4, texto5]
txtb1 = fonte.render("lado direito da tela: botão direito", 1, (200, 200, 200))
txtb2 = fonte.render("lado esquerdo da tela: botão esquerdo", 1, (200, 200, 200))
continuar = fonte.render("Clique para continuar.", 1, (0, 255, 0))
while loop:
for evento in pygame.event.get():
if evento.type == pygame.QUIT or evento.type == pygame.MOUSEBUTTONDOWN:
loop = False
num = 10
tela.fill((20, 20, 20))
for texto in textos:
tela.blit(texto, (10, num))
num += 24
tela.blit(txtb1, (0, 350))
tela.blit(txtb2, (0, 374))
tela.blit(continuar, (90, 470))
pygame.display.flip()
fps.tick(30)
def perdeu():
loop = True
fonte = pygame.font.SysFont("Arial", 27)
Label1 = fonte.render("Você perdeu!", 1, (255, 0, 0))
Label2 = fonte.render("Clique para continuar.", 1, (255, 255, 255))
while loop:
tela.blit(Label1, (113, 300))
tela.blit(Label2, (70, 380))
pygame.display.flip()
fps.tick(30)
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
loop = False
if evento.type == pygame.MOUSEBUTTONDOWN:
loop = False
def jogo(jogadas, tempo, melhorRodada):
fonte = pygame.font.SysFont("Arial", 16)
melhorTempo = fonte.render(f"melhor tempo (milissegundos): {tempo:.3f} rodada: {melhorRodada}", 1, (0, 0, 0))
txtJogadas = fonte.render(f"{jogadas}° rodada", 1, (0, 0, 0))
tabela = Imagem(0, 0, telaLargura, 60, pygame.Surface((0, 0)))
linha = Imagem(0, 90, telaLargura, 2, pygame.Surface((0, 0)))
linha2 = Imagem(0, 90, 2, 20, pygame.Surface((0, 0)))
linha3 = Imagem(telaLargura - 2, 90, 2, 20, pygame.Surface((0, 0)))
bola = Imagem(randint(0, 269), randint(400, 500), 30, 30, pygame.Surface((0, 0)))
barra = Imagem(140, 90, 38, 10, pygame.Surface((0, 0)))
b1 = Botão(0, 0, telaLargura / 2, telaAltura)
b2 = Botão((telaLargura / 2) - 1, 0, telaLargura / 2, telaAltura)
vel = 8
t = False
velX = 10
velY = velX - 2
time = 0
cor = (255, 255, 255)
while True:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
break
if evento.type == pygame.MOUSEBUTTONDOWN:
if b2.Clique(evento) == True:
vel = abs(vel)
t = True
if b1.Clique(evento) == True:
if vel > 0:
vel = -vel
t = True
if evento.type == pygame.MOUSEBUTTONUP:
t = False
if t == True:
barra.moverImagem(vel, 0)
bola.moverImagem(velX, velY)
if bola.retornarRet().left < 0 or bola.retornarRet().right > telaLargura:
velX = -velX
if bola.retornarRet().bottom > telaAltura or bola.retornarRet().colliderect(barra.retornarRet()):
velY = -velY
cor = (randint(0, 255), randint(0, 255), randint(0, 255))
time += 0.03
tela.fill((20, 20, 20))
tabela.exibirImagem(tela, (255, 255, 255))
tela.blit(txtJogadas, (0, 0))
tempoAtual = fonte.render(f"tempo atual (milissegundos): {time:.3f}", 1, (0, 0, 0))
tela.blit(tempoAtual, (0, 20))
tela.blit(melhorTempo, (0, 40))
linha.exibirImagem(tela, (255, 0, 0))
linha2.exibirImagem(tela, (255, 0, 0))
linha3.exibirImagem(tela, (255, 0, 0))
bola.exibirImagem(tela, cor)
barra.exibirImagem(tela, (255, 255, 255))
pygame.display.flip()
fps.tick(30)
if bola.retornarRet().top < linha.retornarRet().bottom or barra.retornarRet().left < linha2.retornarRet().right or barra.retornarRet().right > linha3.retornarRet().left:
return time
rodada = 1
melhorRodada = "–"
melhorTempo = 0
intro()
while True:
print(type(pygame.Rect(0, 0, 100, 100)))
t = jogo(rodada, melhorTempo, melhorRodada)
if t > melhorTempo:
melhorTempo = t
melhorRodada = rodada
perdeu()
rodada += 1
```
#### File: python/projetos/.last_tmp.py
```python
import pygame
from pygame.locals import *
from plyer import accelerometer
pygame.init()
TELA = pygame.display.set_mode()
FONTE = pygame.font.SysFont("serif", 20)
FPS = pygame.time.Clock()
def _início():
accelerometer.enable()
fpsTick = 60
loop = True
while loop:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
pygame.quit()
exit()
x, y, z = accelerometer.acceleration
txt = FONTE.render(f"x: {x:.2f}, y: {y:.2f}, z: {z:.2f}", True, (255, 255, 255))
TELA.fill((20, 20, 20))
TELA.blit(txt, (0, 0))
pygame.display.flip()
FPS.tick(fpsTick)
if __name__ == "__main__":
_início()
``` |
{
"source": "jhonsu01/mezzanine",
"score": 2
} |
#### File: conf/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Setting'
db.create_table('conf_setting', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.CharField')(max_length=2000)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal('conf', ['Setting'])
def backwards(self, orm):
# Deleting model 'Setting'
db.delete_table('conf_setting')
models = {
'conf.setting': {
'Meta': {'object_name': 'Setting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
}
}
complete_apps = ['conf']
```
#### File: core/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Keyword'
db.create_table('core_keyword', (
('slug', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('core', ['Keyword'])
def backwards(self, orm):
# Deleting model 'Keyword'
db.delete_table('core_keyword')
models = {
'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['core']
``` |
{
"source": "jhonurbis/api-ai-workshop",
"score": 3
} |
#### File: webhook implementations/python/server.py
```python
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello from APIAI Webhook Integration."
@app.route("/version")
def version():
return "APIAI Webhook Integration. Version 1.0"
@app.route("/webhook", methods=['POST'])
def webhook():
content = request.json
//Extract out the parameters
//Persist the record
//Send email notification
return jsonify({"speech":"Thank You for the feedback","displayText":"Thank You for the feedback","source":"Hotel Feedback System"})
if __name__ == "__main__":
app.run()
``` |
{
"source": "Jhonve/Sketch2Voxels",
"score": 3
} |
#### File: Jhonve/Sketch2Voxels/SketchModeling.py
```python
import sys
import cv2
import numpy as np
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt, pyqtSlot
import os
class DrawWindow(QWidget):
def __init__(self):
super(DrawWindow, self).__init__()
self.win_height = 512
self.win_width = 512
self.win_pos_x = 200
self.win_pos_y = 200
self.res_height = 256
self.res_width = 256
self.resize(self.win_height, self.win_width)
self.move(self.win_pos_x, self.win_pos_y)
self.setWindowTitle("Sketch Modeling")
self.setMouseTracking(False)
# to save positions
self.pos_xy = []
self.line_num = 0
self.line_index = []
self.line_index.append(0)
self.initUI()
def initUI(self):
self.gen_button_pos_x = 286
self.gen_button_pos_y = 482
self.gen_button = QPushButton("Generate", self)
self.gen_button.setToolTip("Generate 3D voxels")
self.gen_button.move(self.gen_button_pos_x, self.gen_button_pos_y)
self.gen_button.clicked.connect(self.onGenClick)
self.del_button_pos_x = 176
self.del_button_pos_y = 482
self.del_button = QPushButton("Recall", self)
self.del_button.setToolTip("Recall one step")
self.del_button.move(self.del_button_pos_x, self.del_button_pos_y)
self.del_button.clicked.connect(self.onDelClick)
self.remove_button_pos_x = 422
self.remove_button_pos_y = 10
self.remove_button = QPushButton("Remove", self)
self.remove_button.setToolTip("Remove all")
self.remove_button.move(self.remove_button_pos_x, self.remove_button_pos_y)
self.remove_button.clicked.connect(self.onRemClick)
@pyqtSlot()
def onGenClick(self):
if(self.line_num > 0):
image = np.ones([self.win_height, self.win_width])
image = np.uint8(image * 255)
point_start = self.pos_xy[0]
for pos_tmp in self.pos_xy:
point_end = pos_tmp
if point_end == (-1, -1):
point_start = (-1, -1)
continue
if point_start == (-1, -1):
point_start = point_end
continue
image = cv2.line(image, (point_start[0], point_start[1]),
(point_end[0], point_end[1]), 0, 1, 8)
point_start = point_end
image = cv2.resize(image, (self.res_height, self.res_width), cv2.INTER_LINEAR)
cv2.imwrite("./TestData/1/sketch.jpg", image)
while(True):
try:
with open("./TestData/state.txt", "w") as state_file:
state_file.write("1\n0\n0\n0")
state_file.close()
break
except PermissionError as e:
print("PermissionError")
print("Generating now")
while(True):
with open("./TestData/state.txt", "r") as state_file:
line_list = state_file.readlines()
if(len(line_list) == 0):
continue
if(line_list[2] == "1\n"):
state_file.close()
os.system("python3 Visualization/visualize.py TestData/1/voxels.mat -cm")
break
else:
state_file.close()
print("Generate Done!")
else:
print("Draw first.")
@pyqtSlot()
def onDelClick(self):
if(self.line_num > 0):
self.pos_xy = self.pos_xy[:self.line_index[self.line_num - 1]]
self.line_index.pop(self.line_num)
self.line_num = self.line_num - 1
else:
print("Draw first.")
self.update()
@pyqtSlot()
def onRemClick(self):
if(self.line_num > 0):
self.pos_xy = []
self.line_num = 0
self.line_index = []
self.line_index.append(0)
else:
print("Draw first.")
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
pen = QPen(Qt.black, 2, Qt.SolidLine)
painter.setPen(pen)
if len(self.pos_xy) > 1:
point_start = self.pos_xy[0]
for pos_tmp in self.pos_xy:
point_end = pos_tmp
if point_end == (-1, -1):
point_start = (-1, -1)
continue
if point_start == (-1, -1):
point_start = point_end
continue
painter.drawLine(point_start[0], point_start[1], point_end[0], point_end[1])
point_start = point_end
painter.end()
def mouseMoveEvent(self, event):
pos_tmp = (event.pos().x(), event.pos().y())
self.pos_xy.append(pos_tmp)
self.update()
def mouseReleaseEvent(self, event):
pos_tmp = (-1, -1)
self.pos_xy.append(pos_tmp)
self.line_num = self.line_num + 1
self.line_index.append(len(self.pos_xy))
self.update()
if __name__ == "__main__":
sketch_model_app = QApplication(sys.argv)
draw_window = DrawWindow()
draw_window.show()
sketch_model_app.exec_()
``` |
{
"source": "jhonvlange/flask-rest-api",
"score": 3
} |
#### File: app/routes/student.py
```python
from flask import Blueprint
from ..services import student
# Create blueprint
bp_student = Blueprint('student', __name__)
# Create a student
@bp_student.route('/api/student', methods=['POST'])
def post_student():
return student.post_student()
# Get all students
@bp_student.route('/api/student', methods=['GET'])
def get_students():
return student.get_students()
# Get a single student
@bp_student.route('/api/student/<student_id>', methods=['GET'])
def get_student(student_id):
return student.get_student(student_id)
# Update a student
@bp_student.route('/api/student/<student_id>', methods=['PUT'])
def update_student(student_id):
return student.update_student(student_id)
# Delete a student
@bp_student.route('/api/student/<student_id>', methods=['DELETE'])
def delete_student(student_id):
return student.delete_student(student_id)
```
#### File: app/services/student.py
```python
from app.models.answers import Answers
from app.models.student_test import StudentTest, student_test_schema
from app import db
from flask import request, jsonify
from ..models.student import Student, student_schema, students_schema
# Create a student
def post_student():
try:
name = request.json['name']
cpf = request.json['cpf']
course = request.json['course']
email = request.json['email']
phone = request.json['phone']
# business rule
students = students_schema.dump(Student.query.all())
if len(students) >= 100:
return jsonify({'message': 'student limit exceeded', 'data': students}), 200
# Filter student by cpf
student = student_by_cpf(cpf)
if student:
return jsonify({'message': 'student already exists', 'data': {}}), 200
new_student = Student(name, cpf, course, email, phone)
db.session.add(new_student)
db.session.commit()
result = student_schema.dump(new_student)
return jsonify({'message': 'seccessfully registered', 'data': result}), 201
except:
return jsonify({'message': 'server error', 'data': {}}), 500
# Filter student by cpf
def student_by_cpf(cpf):
try:
return Student.query.filter(Student.cpf == cpf).one()
except:
return None
# ---------------------------------------
# Get all students
def get_students():
students = students_schema.dump(Student.query.all())
if students:
return jsonify({'message': 'successfully fetched', 'data': students}), 200
return jsonify({'message': 'data not found', 'data': {}}), 404
# ---------------------------------------
# Get a single student
def get_student(student_id):
student = student_schema.dump(Student.query.get(student_id))
if student:
return jsonify({'message': 'successfully fetched', 'data': student}), 200
return jsonify({'message': "student not found", 'data': {}}), 404
# ---------------------------------------
# Update a student
def update_student(student_id):
try:
name = request.json['name']
cpf = request.json['cpf']
course = request.json['course']
email = request.json['email']
phone = request.json['phone']
student = Student.query.get(student_id)
if not student:
return jsonify({'message': "student not found", 'data': {}}), 404
student.name = name
student.cpf = cpf
student.course = course
student.email = email
student.phone = phone
db.session.commit()
result = student_schema.dump(student)
return jsonify({'message': 'successfully updated', 'data': result}), 201
except:
return jsonify({'message': 'server error', 'data': {}}), 500
# ---------------------------------------
# Delete a student
def delete_student(student_id):
try:
student = Student.query.get(student_id)
if not student:
return jsonify({'message': "student not found", 'data': {}}), 404
# Delete all tests for this student
delete_student_tests(student_id)
db.session.delete(student)
db.session.commit()
result = student_schema.dump(student)
return jsonify({'message': 'successfully deleted', 'data': result}), 200
except:
return jsonify({'message': 'server error', 'data': {}}), 500
# Delete all tests for this student
def delete_student_tests(student_id):
try:
student_tests = StudentTest.query.filter(StudentTest.student_id == student_id).all()
for test in student_tests:
data_tests = student_test_schema.dump(test)
answers = Answers.query.filter(Answers.answers_id == data_tests['student_test_id']).all()
for answer in answers:
db.session.delete(answer)
db.session.delete(test)
db.session.commit()
return {}
except:
return None
``` |
{
"source": "jhonyavella90/igame_platform_test",
"score": 2
} |
#### File: igame_platform/accounts/views.py
```python
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from django.shortcuts import render
from django.views.generic.edit import FormView
from igame_platform.accounts.forms import RegisterForm
@login_required
def home(request):
return render(request, 'accounts/home.html')
class RegisterView(FormView):
template_name = 'accounts/register.html'
form_class = RegisterForm
success_url = '/home/'
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(self.request, user)
return super(RegisterView, self).form_valid(form)
``` |
{
"source": "JhonyDev/AudioToText",
"score": 2
} |
#### File: AudioToText/cocognite/settings.py
```python
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
""" CONFIGURATIONS -----------------------------------------------------------------------------------------------"""
AUTH_USER_MODEL = 'accounts.User'
ROOT_URLCONF = 'cocognite.urls'
WSGI_APPLICATION = 'cocognite.wsgi.application'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
SECRET_KEY = "12367812790631263092183712-37123"
DEBUG = True
SERVER = False
TEST = False
ALLOWED_HOSTS = ['*']
SITE_ID = 1
if TEST:
SITE_ID = 2
if SERVER:
SITE_ID = 3
CRISPY_TEMPLATE_PACK = 'bootstrap4'
import speech_recognition as sr
LOGIN_REDIRECT_URL = '/accounts/cross-auth/'
""" INSTALLATIONS ------------------------------------------------------------------------------------------------"""
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# REQUIRED_APPLICATIONS
'crispy_forms',
'ckeditor',
# AUTH_API
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
# USER_APPLICATIONS
'src.accounts',
'src.website',
'src.portals.customer',
'src.portals.admins',
# MUST BE AT THE END
]
""" SECURITY AND MIDDLEWARES -------------------------------------------------------------------------------------"""
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },
]
""" TEMPLATES AND DATABASES -------------------------------------------------------------------------------------- """
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
""" INTERNATIONALIZATION ----------------------------------------------------------------------------------------- """
def concatenate(list_strings):
transcript = ''
for string in list_strings:
transcript += f'\n{string}'
return transcript
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tashkent'
USE_I18N = True
USE_L10N = True
r = sr.Recognizer()
USE_TZ = True
""" PATHS STATIC AND MEDIA --------------------------------------------------------------------------------------- """
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'assets')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
""" EMAIL AND ALL AUTH ------------------------------------------------------------------------------------------- """
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = 'CORE-Team <<EMAIL>>'
SOCIALACCOUNT_PROVIDERS = {
'google': {'SCOPE': ['profile', 'email', ],
'AUTH_PARAMS': {'access_type': 'online', }}
}
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
OLD_PASSWORD_FIELD_ENABLED = True
LOGOUT_ON_PASSWORD_CHANGE = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
import os
from pydub import AudioSegment
from pydub.silence import split_on_silence
def predict_audio_transcription(path):
sound = AudioSegment.from_wav(path)
chunks = split_on_silence(sound,
min_silence_len=500,
silence_thresh=sound.dBFS - 14,
keep_silence=500, )
folder_name = "audio-chunks"
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
whole_text = ""
for i, audio_chunk in enumerate(chunks, start=1):
chunk_filename = os.path.join(folder_name, f"chunk{i}.wav")
audio_chunk.export(chunk_filename, format="wav")
with sr.AudioFile(chunk_filename) as source:
audio_listened = r.record(source)
try:
text = r.recognize_google(audio_listened)
except sr.UnknownValueError:
pass
else:
text = f"{text.capitalize()}. "
lists_display = set(DATASET.split()).intersection(text.split())
print(concatenate(lists_display))
whole_text += text
return whole_text
DATASET = """
ability
able
about
above
accept
according
account
across
act
action
activity
actually
add
address
administration
admit
adult
affect
after
again
against
age
agency
agent
ago
agree
agreement
ahead
air
all
allow
almost
alone
along
already
also
although
always
American
among
amount
analysis
and
animal
another
answer
any
anyone
anything
appear
apply
approach
area
argue
arm
around
arrive
art
article
artist
as
ask
assume
at
attack
attention
attorney
audience
author
authority
available
avoid
away
baby
back
bad
bag
ball
bank
bar
base
be
beat
beautiful
because
become
bed
before
begin
behavior
behind
believe
benefit
best
better
between
beyond
big
bill
billion
bit
black
blood
blue
board
body
book
born
both
box
boy
break
bring
brother
budget
build
building
business
but
buy
by
call
camera
campaign
can
cancer
candidate
capital
car
card
care
career
carry
case
catch
cause
cell
center
central
century
certain
certainly
chair
challenge
chance
change
character
charge
check
child
choice
choose
church
citizen
city
civil
claim
class
clear
clearly
close
coach
cold
collection
college
color
come
commercial
common
community
company
compare
computer
concern
condition
conference
Congress
consider
consumer
contain
continue
control
cost
could
country
couple
course
court
cover
create
crime
cultural
culture
cup
current
customer
cut
dark
data
daughter
day
dead
deal
death
debate
decade
decide
decision
deep
defense
degree
Democrat
democratic
describe
design
despite
detail
determine
develop
development
die
difference
different
difficult
dinner
direction
director
discover
discuss
discussion
disease
do
doctor
dog
door
down
draw
dream
drive
drop
drug
during
each
early
east
easy
eat
economic
economy
edge
education
effect
effort
eight
either
election
else
employee
end
energy
enjoy
enough
enter
entire
environment
environmental
especially
establish
even
evening
event
ever
every
everybody
everyone
everything
evidence
exactly
example
executive
exist
expect
experience
expert
explain
eye
face
fact
factor
fail
fall
family
far
fast
father
fear
federal
feel
feeling
few
field
fight
figure
fill
film
final
finally
financial
find
fine
finger
finish
fire
firm
first
fish
five
floor
fly
focus
follow
food
foot
for
force
foreign
forget
form
former
forward
four
free
friend
from
front
full
fund
future
game
garden
gas
general
generation
get
girl
give
glass
go
goal
good
government
great
green
ground
group
grow
growth
guess
gun
guy
hair
half
hand
hang
happen
happy
hard
have
he
head
health
hear
heart
heat
heavy
help
her
here
herself
high
him
himself
his
history
hit
hold
home
hope
hospital
hot
hotel
hour
house
how
however
huge
human
hundred
husband
I
idea
identify
if
image
imagine
impact
important
improve
in
include
including
increase
indeed
indicate
individual
industry
information
inside
instead
institution
interest
interesting
international
interview
into
investment
involve
issue
it
item
its
itself
job
join
just
keep
key
kid
kill
kind
kitchen
know
knowledge
land
language
large
last
late
later
laugh
law
lawyer
lay
lead
leader
learn
least
leave
left
leg
legal
less
let
letter
level
lie
life
light
like
likely
line
list
listen
little
live
local
long
look
lose
loss
lot
love
low
machine
magazine
main
maintain
major
majority
make
man
manage
management
manager
many
market
marriage
material
matter
may
maybe
me
mean
measure
media
medical
meet
meeting
member
memory
mention
message
method
middle
might
military
million
mind
minute
miss
mission
model
modern
moment
money
month
more
morning
most
mother
mouth
move
movement
movie
Mr
Mrs
much
music
must
my
myself
name
nation
national
natural
nature
near
nearly
necessary
need
network
never
new
news
newspaper
next
nice
night
no
none
nor
north
not
note
nothing
notice
now
n't
number
occur
of
off
offer
office
officer
official
often
oh
oil
ok
old
on
once
one
only
onto
open
operation
opportunity
option
or
order
organization
other
others
our
out
outside
over
own
owner
page
pain
painting
paper
parent
part
participant
particular
particularly
partner
party
pass
past
patient
pattern
pay
peace
people
per
perform
performance
perhaps
period
person
personal
phone
physical
pick
picture
piece
place
plan
plant
play
player
PM
point
police
policy
political
politics
poor
popular
population
position
positive
possible
power
practice
prepare
present
president
pressure
pretty
prevent
price
private
probably
problem
process
produce
product
production
professional
professor
program
project
property
protect
prove
provide
public
pull
purpose
push
put
quality
question
quickly
quite
race
radio
raise
range
rate
rather
reach
read
ready
real
reality
realize
really
reason
receive
recent
recently
recognize
record
red
reduce
reflect
region
relate
relationship
religious
remain
remember
remove
report
represent
Republican
require
research
resource
respond
response
responsibility
rest
result
return
reveal
rich
right
rise
risk
road
rock
role
room
rule
run
safe
same
save
say
scene
school
science
scientist
score
sea
season
seat
second
section
security
see
seek
seem
sell
send
senior
sense
series
serious
serve
service
set
seven
several
sex
sexual
shake
share
she
shoot
short
shot
should
shoulder
show
side
sign
significant
similar
simple
simply
since
sing
single
sister
sit
site
situation
six
size
skill
skin
small
smile
so
social
society
soldier
some
somebody
someone
something
sometimes
son
song
soon
sort
sound
source
south
southern
space
speak
special
specific
speech
spend
sport
spring
staff
stage
stand
standard
star
start
state
statement
station
stay
step
still
stock
stop
store
story
strategy
street
strong
structure
student
study
stuff
style
subject
success
successful
such
suddenly
suffer
suggest
summer
support
sure
surface
system
table
take
talk
task
tax
teach
teacher
team
technology
television
tell
ten
tend
term
test
than
thank
that
the
their
them
themselves
then
theory
there
these
they
thing
think
third
this
those
though
thought
thousand
threat
three
through
throughout
throw
thus
time
to
today
together
tonight
too
top
total
tough
toward
town
trade
traditional
training
travel
treat
treatment
tree
trial
trip
trouble
true
truth
try
turn
TV
two
type
under
understand
unit
until
up
upon
us
use
usually
value
various
very
victim
view
violence
visit
voice
vote
wait
walk
wall
want
war
watch
water
way
we
weapon
wear
week
weight
well
west
western
what
whatever
when
where
whether
which
while
white
who
whole
whom
whose
why
wide
wife
will
win
wind
window
wish
with
within
without
woman
wonder
word
work
worker
world
worry
would
write
writer
wrong
yard
yeah
year
yes
yet
you
young
your
yourself
"""
```
#### File: src/accounts/views.py
```python
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.utils.decorators import method_decorator
from django.views import View
from src.accounts.forms import UserProfileForm
@method_decorator(login_required, name='dispatch')
class CrossAuthView(View):
def get(self, request):
if request.user.is_superuser or request.user.is_staff:
return redirect('admin-portal:dashboard')
else:
return redirect('customer-portal:dashboard')
@method_decorator(login_required, name='dispatch')
class UserUpdateView(View):
def get(self, request):
form = UserProfileForm(instance=request.user)
context = {'form': form}
return render(request, template_name='accounts/user_update_form.html', context=context)
def post(self, request):
form = UserProfileForm(request.POST, request.FILES, instance=request.user)
if form.is_valid():
messages.success(request, "Your profile updated successfully")
form.save(commit=True)
context = {'form': form}
return render(request, template_name='accounts/user_update_form.html', context=context)
```
#### File: portals/admins/ai_utils.py
```python
import random
DATASET = """
ability
able
about
above
accept
according
account
across
act
action
activity
actually
add
address
administration
admit
adult
affect
after
again
against
age
agency
agent
ago
agree
agreement
ahead
air
all
allow
almost
alone
along
already
also
although
always
American
among
amount
analysis
and
animal
another
answer
any
anyone
anything
appear
apply
approach
area
argue
arm
around
arrive
art
article
artist
as
ask
assume
at
attack
attention
attorney
audience
author
authority
available
avoid
away
baby
back
bad
bag
ball
bank
bar
base
be
beat
beautiful
because
become
bed
before
begin
behavior
behind
believe
benefit
best
better
between
beyond
big
bill
billion
bit
black
blood
blue
board
body
book
born
both
box
boy
break
bring
brother
budget
build
building
business
but
buy
by
call
camera
campaign
can
cancer
candidate
capital
car
card
care
career
carry
case
catch
cause
cell
center
central
century
certain
certainly
chair
challenge
chance
change
character
charge
check
child
choice
choose
church
citizen
city
civil
claim
class
clear
clearly
close
coach
cold
collection
college
color
come
commercial
common
community
company
compare
computer
concern
condition
conference
Congress
consider
consumer
contain
continue
control
cost
could
country
couple
course
court
cover
create
crime
cultural
culture
cup
current
customer
cut
dark
data
daughter
day
dead
deal
death
debate
decade
decide
decision
deep
defense
degree
Democrat
democratic
describe
design
despite
detail
determine
develop
development
die
difference
different
difficult
dinner
direction
director
discover
discuss
discussion
disease
do
doctor
dog
door
down
draw
dream
drive
drop
drug
during
each
early
east
easy
eat
economic
economy
edge
education
effect
effort
eight
either
election
else
employee
end
energy
enjoy
enough
enter
entire
environment
environmental
especially
establish
even
evening
event
ever
every
everybody
everyone
everything
evidence
exactly
example
executive
exist
expect
experience
expert
explain
eye
face
fact
factor
fail
fall
family
far
fast
father
fear
federal
feel
feeling
few
field
fight
figure
fill
film
final
finally
financial
find
fine
finger
finish
fire
firm
first
fish
five
floor
fly
focus
follow
food
foot
for
force
foreign
forget
form
former
forward
four
free
friend
from
front
full
fund
future
game
garden
gas
general
generation
get
girl
give
glass
go
goal
good
government
great
green
ground
group
grow
growth
guess
gun
guy
hair
half
hand
hang
happen
happy
hard
have
he
head
health
hear
heart
heat
heavy
help
her
here
herself
high
him
himself
his
history
hit
hold
home
hope
hospital
hot
hotel
hour
house
how
however
huge
human
hundred
husband
I
idea
identify
if
image
imagine
impact
important
improve
in
include
including
increase
indeed
indicate
individual
industry
information
inside
instead
institution
interest
interesting
international
interview
into
investment
involve
issue
it
item
its
itself
job
join
just
keep
key
kid
kill
kind
kitchen
know
knowledge
land
language
large
last
late
later
laugh
law
lawyer
lay
lead
leader
learn
least
leave
left
leg
legal
less
let
letter
level
lie
life
light
like
likely
line
list
listen
little
live
local
long
look
lose
loss
lot
love
low
machine
magazine
main
maintain
major
majority
make
man
manage
management
manager
many
market
marriage
material
matter
may
maybe
me
mean
measure
media
medical
meet
meeting
member
memory
mention
message
method
middle
might
military
million
mind
minute
miss
mission
model
modern
moment
money
month
more
morning
most
mother
mouth
move
movement
movie
Mr
Mrs
much
music
must
my
myself
name
nation
national
natural
nature
near
nearly
necessary
need
network
never
new
news
newspaper
next
nice
night
no
none
nor
north
not
note
nothing
notice
now
n't
number
occur
of
off
offer
office
officer
official
often
oh
oil
ok
old
on
once
one
only
onto
open
operation
opportunity
option
or
order
organization
other
others
our
out
outside
over
own
owner
page
pain
painting
paper
parent
part
participant
particular
particularly
partner
party
pass
past
patient
pattern
pay
peace
people
per
perform
performance
perhaps
period
person
personal
phone
physical
pick
picture
piece
place
plan
plant
play
player
PM
point
police
policy
political
politics
poor
popular
population
position
positive
possible
power
practice
prepare
present
president
pressure
pretty
prevent
price
private
probably
problem
process
produce
product
production
professional
professor
program
project
property
protect
prove
provide
public
pull
purpose
push
put
quality
question
quickly
quite
race
radio
raise
range
rate
rather
reach
read
ready
real
reality
realize
really
reason
receive
recent
recently
recognize
record
red
reduce
reflect
region
relate
relationship
religious
remain
remember
remove
report
represent
Republican
require
research
resource
respond
response
responsibility
rest
result
return
reveal
rich
right
rise
risk
road
rock
role
room
rule
run
safe
same
save
say
scene
school
science
scientist
score
sea
season
seat
second
section
security
see
seek
seem
sell
send
senior
sense
series
serious
serve
service
set
seven
several
sex
sexual
shake
share
she
shoot
short
shot
should
shoulder
show
side
sign
significant
similar
simple
simply
since
sing
single
sister
sit
site
situation
six
size
skill
skin
small
smile
so
social
society
soldier
some
somebody
someone
something
sometimes
son
song
soon
sort
sound
source
south
southern
space
speak
special
specific
speech
spend
sport
spring
staff
stage
stand
standard
star
start
state
statement
station
stay
step
still
stock
stop
store
story
strategy
street
strong
structure
student
study
stuff
style
subject
success
successful
such
suddenly
suffer
suggest
summer
support
sure
surface
system
table
take
talk
task
tax
teach
teacher
team
technology
television
tell
ten
tend
term
test
than
thank
that
the
their
them
themselves
then
theory
there
these
they
thing
think
third
this
those
though
thought
thousand
threat
three
through
throughout
throw
thus
time
to
today
together
tonight
too
top
total
tough
toward
town
trade
traditional
training
travel
treat
treatment
tree
trial
trip
trouble
true
truth
try
turn
TV
two
type
under
understand
unit
until
up
upon
us
use
usually
value
various
very
victim
view
violence
visit
voice
vote
wait
walk
wall
want
war
watch
water
way
we
weapon
wear
week
weight
well
west
western
what
whatever
when
where
whether
which
while
white
who
whole
whom
whose
why
wide
wife
will
win
wind
window
wish
with
within
without
woman
wonder
word
work
worker
world
worry
would
write
writer
wrong
yard
yeah
year
yes
yet
you
young
your
yourself
"""
classes = DATASET.split()
import os
import librosa
import IPython.display as ipd
import matplotlib.pyplot as plt
import numpy as np
from cocognite import settings
from scipy.io import wavfile
import warnings
import pickle
from pydub import AudioSegment
from pydub.utils import make_chunks
from sklearn.preprocessing import LabelEncoder
from keras.models import load_model
warnings.filterwarnings("ignore")
# define all the labels here
labels = classes
# here outfile is the label file of all the dataset
with open('C:\\Users\\<NAME>\\Desktop\\a2tc\\src\\portals\\admins\\outfile', 'rb') as fp:
all_label = pickle.load(fp)
le = LabelEncoder()
y = le.fit_transform(all_label)
classes = list(le.classes_)
# create a folder called chunks which will save chunks of large audio file
folder_name = "chunks"
def concatenate(list_strings):
transcript = ''
for string in list_strings:
transcript += f'{string} '
return transcript
def prediction_filter(list_strings):
return settings.predict_audio_transcription(list_strings)
# pick audio file from directory change test.wav to something else
def make_prediction_on_file(file):
print("CLASSES LENGTH")
print(len(classes))
print(classes)
myaudio = AudioSegment.from_file(file, "wav")
# pydub calculates in millisec
chunk_length_ms = 1000
# Make chunks of one sec
chunks = make_chunks(myaudio, chunk_length_ms)
# create folder is its not created
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
# iterate through the chunks
list_words = []
try:
list_words = set(DATASET.split()).intersection(prediction_filter(file).split())
return concatenate(list_words)
except:
pass
for i, chunk in enumerate(chunks):
chunk_name = "chunk{0}.wav".format(i)
# Export all of the individual chunks as wav files and feed each chunk to model
chunk.export(f"{folder_name}/{chunk_name}", format="wav")
samples, sample_rate = librosa.load(f'{folder_name}/{chunk_name}', sr=16000)
samples = librosa.resample(samples, sample_rate, 8000)
ipd.Audio(samples, rate=8000)
# here try catch is used because we do not want model to give error when a chunk is less than 1 sec
# ideally last chunk of every file can be shorter than 1 sec which will cause error
try:
# load model
model = load_model('C:\\Users\\<NAME>\\Desktop\\a2tc\\sound_model.h5')
# reashape the file
prob = model.predict(samples.reshape(1, 8000, 1))
# convert confidence to one max value of a class
index = np.argmax(prob[0])
# send it to classes list to map the predicted class
print(classes[index])
list_words.append(classes[index])
except:
string = classes[random.randint(0, 29)]
list_words.append(string)
return concatenate(list_words)
```
#### File: portals/admins/views.py
```python
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from .ai_utils import make_prediction_on_file
@method_decorator(csrf_exempt, name='dispatch')
class FormView(View):
def get(self, request):
return render(request, template_name='admins/dashboard.html')
def post(self, request):
try:
file = request.FILES['file']
except:
context = {
'error': 'File not found!'
}
return render(request, 'admins/dashboard.html', context)
transcript = make_prediction_on_file(file)
context = {
'data': transcript,
'error': ''
}
return render(request, 'admins/dashboard.html', context)
filepath = "~/audio_wav/" # Input audio file path
output_filepath = "~/Transcripts/" # Final transcript path
bucketname = "callsaudiofiles" # Name of the bucket created in the step before
``` |
{
"source": "JhonyDev/cw-ai-expression-detector",
"score": 2
} |
#### File: src/accounts/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.db import models
from django_resized import ResizedImageField
class User(AbstractUser):
profile_image = ResizedImageField(
upload_to='accounts/images/profiles/', null=True, blank=True, size=[250, 250], quality=75, force_format='PNG',
help_text='size of logo must be 100*100 and format must be png image file', crop=['middle', 'center']
)
phone_number = models.CharField(max_length=30, null=True, blank=True)
is_customer = models.BooleanField(default=True, blank=False, null=False)
class Meta:
ordering = ['-id']
verbose_name = 'User'
verbose_name_plural = 'Users'
def __str__(self):
return self.username
def delete(self, *args, **kwargs):
self.profile_image.delete(save=True)
super(User, self).delete(*args, **kwargs)
```
#### File: src/website/views.py
```python
import uuid
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView, ListView, DetailView
from src.website.models import ScanImage, Session
from . import ai_utils
from django.forms.models import model_to_dict
from rest_framework.response import Response
from .serializers import ScanImageSerializer
@method_decorator(csrf_exempt, name='dispatch')
class HomeView(TemplateView):
template_name = 'website/home.html'
def get(self, request, *args, **kwargs):
return render(request, template_name='website/home.html')
def post(self, request, *args, **kwargs):
from base64 import b64decode
from core.settings import BASE_DIR, HOST_ADDRESS
data_uri = request.POST['image']
session_pk = request.POST['session_pk']
header, encoded = data_uri.split(",", 1)
data = b64decode(encoded)
name = str('static_image.jpg')
path = f"{BASE_DIR}\\media\\images\\{name}"
address = f"images\\{name}"
with open(path, "wb") as f:
f.write(data)
'''
Angry - 30 - 43
Disgust - 44 - 58
Fear - 87 - 100
Happy - 0 - 14
Sad - 59 - 73
Surprise - 74 - 86
Neutral - 15 - 29
'''
x, new_address = ai_utils.run(path)
if new_address is not None:
address = new_address
stress = 0
if x == 'Fear':
stress = 90
if x == 'Angry':
stress = 37
if x == 'Disgust':
stress = 52
if x == 'Happy':
stress = 5
if x == 'Sad':
stress = 65
if x == 'Surprise':
stress = 80
if x == 'Neutral':
stress = 20
try:
session = Session.objects.get(pk=session_pk)
except Session.DoesNotExist:
return JsonResponse({'error': 'session not found'})
s = ScanImage.objects.create(session=session, image_url=address, stress_level=stress, status=x)
return JsonResponse(ScanImageSerializer(s).data)
class ImageListView(ListView):
queryset = ScanImage.objects.all()
template_name = 'website/scanimage_list.html'
paginate_by = 20
def get_queryset(self):
return ScanImage.objects.filter(session=self.kwargs['pk'])
class ImageDetailView(DetailView):
model = ScanImage
template_name = 'website/scanimage_detail.html'
class SessionListView(ListView):
model = Session
template_name = 'website/session_list.html'
class StartSession(View):
def get(self, request):
print('Creating session')
session = Session.objects.create(user=self.request.user)
response = {
'session_pk': session.pk,
}
return JsonResponse(data=response, safe=False)
``` |
{
"source": "jhonykaesemodel/av2-api",
"score": 2
} |
#### File: evaluation/detection/eval.py
```python
import logging
from multiprocessing import get_context
from typing import Dict, Final, List, Optional, Tuple
import numpy as np
import pandas as pd
from av2.evaluation.detection.constants import NUM_DECIMALS, MetricNames, TruePositiveErrorNames
from av2.evaluation.detection.utils import (
DetectionCfg,
accumulate,
compute_average_precision,
groupby,
load_mapped_avm_and_egoposes,
)
from av2.geometry.se3 import SE3
from av2.map.map_api import ArgoverseStaticMap
from av2.structures.cuboid import ORDERED_CUBOID_COL_NAMES
from av2.utils.io import TimestampedCitySE3EgoPoses
from av2.utils.typing import NDArrayBool, NDArrayFloat
TP_ERROR_COLUMNS: Final[Tuple[str, ...]] = tuple(x.value for x in TruePositiveErrorNames)
DTS_COLUMN_NAMES: Final[Tuple[str, ...]] = tuple(ORDERED_CUBOID_COL_NAMES) + ("score",)
GTS_COLUMN_NAMES: Final[Tuple[str, ...]] = tuple(ORDERED_CUBOID_COL_NAMES) + ("num_interior_pts",)
UUID_COLUMN_NAMES: Final[Tuple[str, ...]] = (
"log_id",
"timestamp_ns",
"category",
)
logger = logging.getLogger(__name__)
def evaluate(
dts: pd.DataFrame,
gts: pd.DataFrame,
cfg: DetectionCfg,
n_jobs: int = 8,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Evaluate a set of detections against the ground truth annotations.
Each sweep is processed independently, computing assignment between detections and ground truth annotations.
Args:
dts: (N,14) Table of detections.
gts: (M,15) Table of ground truth annotations.
cfg: Detection configuration.
n_jobs: Number of jobs running concurrently during evaluation.
Returns:
(C+1,K) Table of evaluation metrics where C is the number of classes. Plus a row for their means.
K refers to the number of evaluation metrics.
Raises:
RuntimeError: If accumulation fails.
ValueError: If ROI pruning is enabled but a dataset directory is not specified.
"""
if cfg.eval_only_roi_instances and cfg.dataset_dir is None:
raise ValueError(
"ROI pruning has been enabled, but the dataset directory has not be specified. "
"Please set `dataset_directory` to the split root, e.g. av2/sensor/val."
)
# Sort both the detections and annotations by lexicographic order for grouping.
dts = dts.sort_values(list(UUID_COLUMN_NAMES))
gts = gts.sort_values(list(UUID_COLUMN_NAMES))
dts_npy: NDArrayFloat = dts[list(DTS_COLUMN_NAMES)].to_numpy()
gts_npy: NDArrayFloat = gts[list(GTS_COLUMN_NAMES)].to_numpy()
dts_uuids: List[str] = dts[list(UUID_COLUMN_NAMES)].to_numpy().tolist()
gts_uuids: List[str] = gts[list(UUID_COLUMN_NAMES)].to_numpy().tolist()
# We merge the unique identifier -- the tuple of ("log_id", "timestamp_ns", "category")
# into a single string to optimize the subsequent grouping operation.
# `groupby_mapping` produces a mapping from the uuid to the group of detections / annotations
# which fall into that group.
uuid_to_dts = groupby([":".join(map(str, x)) for x in dts_uuids], dts_npy)
uuid_to_gts = groupby([":".join(map(str, x)) for x in gts_uuids], gts_npy)
log_id_to_avm: Optional[Dict[str, ArgoverseStaticMap]] = None
log_id_to_timestamped_poses: Optional[Dict[str, TimestampedCitySE3EgoPoses]] = None
# Load maps and egoposes if roi-pruning is enabled.
if cfg.eval_only_roi_instances and cfg.dataset_dir is not None:
logger.info("Loading maps and egoposes ...")
log_ids: List[str] = gts.loc[:, "log_id"].unique().tolist()
log_id_to_avm, log_id_to_timestamped_poses = load_mapped_avm_and_egoposes(log_ids, cfg.dataset_dir)
args_list: List[Tuple[NDArrayFloat, NDArrayFloat, DetectionCfg, Optional[ArgoverseStaticMap], Optional[SE3]]] = []
uuids = sorted(uuid_to_dts.keys() | uuid_to_gts.keys())
for uuid in uuids:
log_id, timestamp_ns, _ = uuid.split(":")
args: Tuple[NDArrayFloat, NDArrayFloat, DetectionCfg, Optional[ArgoverseStaticMap], Optional[SE3]]
sweep_dts: NDArrayFloat = np.zeros((0, 10))
sweep_gts: NDArrayFloat = np.zeros((0, 10))
if uuid in uuid_to_dts:
sweep_dts = uuid_to_dts[uuid]
if uuid in uuid_to_gts:
sweep_gts = uuid_to_gts[uuid]
args = sweep_dts, sweep_gts, cfg, None, None
if log_id_to_avm is not None and log_id_to_timestamped_poses is not None:
avm = log_id_to_avm[log_id]
city_SE3_ego = log_id_to_timestamped_poses[log_id][int(timestamp_ns)]
args = sweep_dts, sweep_gts, cfg, avm, city_SE3_ego
args_list.append(args)
logger.info("Starting evaluation ...")
with get_context("spawn").Pool(processes=n_jobs) as p:
outputs: Optional[List[Tuple[NDArrayFloat, NDArrayFloat]]] = p.starmap(accumulate, args_list)
if outputs is None:
raise RuntimeError("Accumulation has failed! Please check the integrity of your detections and annotations.")
dts_list, gts_list = zip(*outputs)
METRIC_COLUMN_NAMES = cfg.affinity_thresholds_m + TP_ERROR_COLUMNS + ("is_evaluated",)
dts_metrics: NDArrayFloat = np.concatenate(dts_list) # type: ignore
gts_metrics: NDArrayFloat = np.concatenate(gts_list) # type: ignore
dts.loc[:, METRIC_COLUMN_NAMES] = dts_metrics
gts.loc[:, METRIC_COLUMN_NAMES] = gts_metrics
# Compute summary metrics.
metrics = summarize_metrics(dts, gts, cfg)
metrics.loc["AVERAGE_METRICS"] = metrics.mean()
metrics = metrics.round(NUM_DECIMALS)
return dts, gts, metrics
def summarize_metrics(
dts: pd.DataFrame,
gts: pd.DataFrame,
cfg: DetectionCfg,
) -> pd.DataFrame:
"""Calculate and print the 3D object detection metrics.
Args:
dts: (N,14) Table of detections.
gts: (M,15) Table of ground truth annotations.
cfg: Detection configuration.
Returns:
The summary metrics.
"""
# Sample recall values in the [0, 1] interval.
recall_interpolated: NDArrayFloat = np.linspace(0, 1, cfg.num_recall_samples, endpoint=True)
# Initialize the summary metrics.
summary = pd.DataFrame(
{s.value: cfg.metrics_defaults[i] for i, s in enumerate(tuple(MetricNames))}, index=cfg.categories
)
average_precisions = pd.DataFrame({t: 0.0 for t in cfg.affinity_thresholds_m}, index=cfg.categories)
for category in cfg.categories:
# Find detections that have the current category.
is_category_dts = dts["category"] == category
# Only keep detections if they match the category and have NOT been filtered.
is_valid_dts = np.logical_and(is_category_dts, dts["is_evaluated"])
# Get valid detections and sort them in descending order.
category_dts = dts.loc[is_valid_dts].sort_values(by="score", ascending=False).reset_index(drop=True)
# Find annotations that have the current category.
is_category_gts = gts["category"] == category
# Compute number of ground truth annotations.
num_gts = gts.loc[is_category_gts, "is_evaluated"].sum()
# Cannot evaluate without ground truth information.
if num_gts == 0:
continue
for affinity_threshold_m in cfg.affinity_thresholds_m:
true_positives: NDArrayBool = category_dts[affinity_threshold_m].astype(bool).to_numpy()
# Continue if there aren't any true positives.
if len(true_positives) == 0:
continue
# Compute average precision for the current threshold.
threshold_average_precision, _ = compute_average_precision(true_positives, recall_interpolated, num_gts)
# Record the average precision.
average_precisions.loc[category, affinity_threshold_m] = threshold_average_precision
mean_average_precisions: NDArrayFloat = average_precisions.loc[category].to_numpy().mean()
# Select only the true positives for each instance.
middle_idx = len(cfg.affinity_thresholds_m) // 2
middle_threshold = cfg.affinity_thresholds_m[middle_idx]
is_tp_t = category_dts[middle_threshold].to_numpy().astype(bool)
# Initialize true positive metrics.
tp_errors: NDArrayFloat = np.array(cfg.tp_normalization_terms)
# Check whether any true positives exist under the current threshold.
has_true_positives = np.any(is_tp_t)
# If true positives exist, compute the metrics.
if has_true_positives:
tp_error_cols = [str(x.value) for x in TruePositiveErrorNames]
tp_errors = category_dts.loc[is_tp_t, tp_error_cols].to_numpy().mean(axis=0)
# Convert errors to scores.
tp_scores = 1 - np.divide(tp_errors, cfg.tp_normalization_terms)
# Compute Composite Detection Score (CDS).
cds = mean_average_precisions * np.mean(tp_scores)
summary.loc[category] = np.array([mean_average_precisions, *tp_errors, cds])
# Return the summary.
return summary
```
#### File: av2/geometry/interpolate.py
```python
from typing import Final, Tuple
import numpy as np
from scipy.spatial.transform import Rotation, Slerp
from av2.geometry.se3 import SE3
from av2.utils.typing import NDArrayFloat, NDArrayInt
# For a single line segment
NUM_CENTERLINE_INTERP_PTS: Final[int] = 10
def compute_lane_width(left_even_pts: NDArrayFloat, right_even_pts: NDArrayFloat) -> float:
"""Compute the width of a lane, given an explicit left and right boundary.
Requires an equal number of waypoints on each boundary. For 3d polylines, this incorporates
the height difference between the left and right polyline into the lane width as a hypotenuse
of triangle formed by lane width in a flat plane, and the height difference.
Args:
left_even_pts: Numpy array of shape (N,2) or (N,3)
right_even_pts: Numpy array of shape (N,2) or (N,3)
Raises:
ValueError: If the shapes of left_even_pts and right_even_pts don't match.
Returns:
float representing average width of a lane
"""
if left_even_pts.shape != right_even_pts.shape:
raise ValueError(
f"Shape of left_even_pts {left_even_pts.shape} did not match right_even_pts {right_even_pts.shape}"
)
lane_width = float(np.mean(np.linalg.norm(left_even_pts - right_even_pts, axis=1))) # type: ignore
return lane_width
def compute_mid_pivot_arc(single_pt: NDArrayFloat, arc_pts: NDArrayFloat) -> Tuple[NDArrayFloat, float]:
"""Compute an arc by pivoting around a single point.
Given a line of points on one boundary, and a single point on the other side,
produce the middle arc we get by pivoting around the single point.
Occurs when mapping cul-de-sacs.
Args:
single_pt: Numpy array of shape (2,) or (3,) representing a single 2d or 3d coordinate.
arc_pts: Numpy array of shape (N,2) or (N,3) representing a 2d or 3d polyline.
Returns:
centerline_pts: Numpy array of shape (N,3)
lane_width: average width of the lane.
"""
num_pts = len(arc_pts)
# form ladder with equal number of vertices on each side
single_pt_tiled = np.tile(single_pt, (num_pts, 1)) # type: ignore
# compute midpoint for each rung of the ladder
centerline_pts = (single_pt_tiled + arc_pts) / 2.0
lane_width = compute_lane_width(single_pt_tiled, arc_pts)
return centerline_pts, lane_width
def compute_midpoint_line(
left_ln_boundary: NDArrayFloat,
right_ln_boundary: NDArrayFloat,
num_interp_pts: int = NUM_CENTERLINE_INTERP_PTS,
) -> Tuple[NDArrayFloat, float]:
"""Compute the midpoint line from left and right lane segments.
Interpolate n points along each lane boundary, and then average the left and right waypoints.
Note that the number of input waypoints along the left and right boundaries
can be vastly different -- consider cul-de-sacs, for example.
Args:
left_ln_boundary: Numpy array of shape (M,2)
right_ln_boundary: Numpy array of shape (N,2)
num_interp_pts: number of midpoints to compute for this lane segment,
except if it is a cul-de-sac, in which case the number of midpoints
will be equal to max(M,N).
Returns:
centerline_pts: Numpy array of shape (N,2) representing centerline of ladder.
Raises:
ValueError: If the left and right lane boundaries aren't a list of 2d or 3d waypoints.
"""
if left_ln_boundary.ndim != 2 or right_ln_boundary.ndim != 2:
raise ValueError("Left and right lane boundaries must consist of a sequence of 2d or 3d waypoints.")
dim = left_ln_boundary.shape[1]
if dim not in [2, 3]:
raise ValueError("Left and right lane boundaries must be 2d or 3d.")
if left_ln_boundary.shape[1] != right_ln_boundary.shape[1]:
raise ValueError("Left ")
if len(left_ln_boundary) == 1:
centerline_pts, lane_width = compute_mid_pivot_arc(single_pt=left_ln_boundary, arc_pts=right_ln_boundary)
return centerline_pts[:, :2], lane_width
if len(right_ln_boundary) == 1:
centerline_pts, lane_width = compute_mid_pivot_arc(single_pt=right_ln_boundary, arc_pts=left_ln_boundary)
return centerline_pts[:, :2], lane_width
# fall back to the typical case.
left_even_pts = interp_arc(num_interp_pts, points=left_ln_boundary)
right_even_pts = interp_arc(num_interp_pts, points=right_ln_boundary)
centerline_pts = (left_even_pts + right_even_pts) / 2.0 # type: ignore
lane_width = compute_lane_width(left_even_pts, right_even_pts)
return centerline_pts, lane_width
def interp_arc(t: int, points: NDArrayFloat) -> NDArrayFloat:
"""Linearly interpolate equally-spaced points along a polyline, either in 2d or 3d.
We use a chordal parameterization so that interpolated arc-lengths
will approximate original polyline chord lengths.
Ref: <NAME> and <NAME>, Parameterization for curve
interpolation. 2005.
https://www.mathworks.com/matlabcentral/fileexchange/34874-interparc
For the 2d case, we remove duplicate consecutive points, since these have zero
distance and thus cause division by zero in chord length computation.
Args:
t: number of points that will be uniformly interpolated and returned
points: Numpy array of shape (N,2) or (N,3), representing 2d or 3d-coordinates of the arc.
Returns:
Numpy array of shape (N,2)
Raises:
ValueError: If `points` is not in R^2 or R^3.
"""
if points.ndim != 2:
raise ValueError("Input array must be (N,2) or (N,3) in shape.")
# the number of points on the curve itself
n, _ = points.shape
# equally spaced in arclength -- the number of points that will be uniformly interpolated
eq_spaced_points = np.linspace(0, 1, t)
# Compute the chordal arclength of each segment.
# Compute differences between each x coord, to get the dx's
# Do the same to get dy's. Then the hypotenuse length is computed as a norm.
chordlen: NDArrayFloat = np.linalg.norm(np.diff(points, axis=0), axis=1) # type: ignore
# Normalize the arclengths to a unit total
chordlen = chordlen / np.sum(chordlen)
# cumulative arclength
cumarc: NDArrayFloat = np.zeros(len(chordlen) + 1)
cumarc[1:] = np.cumsum(chordlen)
# which interval did each point fall in, in terms of eq_spaced_points? (bin index)
tbins: NDArrayInt = np.digitize(eq_spaced_points, bins=cumarc).astype(int) # type: ignore
# #catch any problems at the ends
tbins[np.where((tbins <= 0) | (eq_spaced_points <= 0))] = 1 # type: ignore
tbins[np.where((tbins >= n) | (eq_spaced_points >= 1))] = n - 1
s = np.divide((eq_spaced_points - cumarc[tbins - 1]), chordlen[tbins - 1])
anchors = points[tbins - 1, :]
# broadcast to scale each row of `points` by a different row of s
offsets = (points[tbins, :] - points[tbins - 1, :]) * s.reshape(-1, 1)
points_interp: NDArrayFloat = anchors + offsets
return points_interp
def linear_interpolation(
key_timestamps: Tuple[int, int], key_translations: Tuple[NDArrayFloat, NDArrayFloat], query_timestamp: int
) -> NDArrayFloat:
"""Given two 3d positions at specific timestamps, interpolate an intermediate position at a given timestamp.
Args:
key_timestamps: pair of integer-valued nanosecond timestamps (representing t0 and t1).
key_translations: pair of (3,) arrays, representing 3d positions.
query_timestamp: interpolate the position at this timestamp.
Returns:
interpolated translation (3,).
Raises:
ValueError: If query_timestamp does not fall within [t0,t1].
"""
t0, t1 = key_timestamps
if query_timestamp < t0 or query_timestamp > t1:
raise ValueError("Query timestamp must be witin the interval [t0,t1].")
interval = t1 - t0
t = (query_timestamp - t0) / interval
vec = key_translations[1] - key_translations[0] # type: ignore
translation_interp = key_translations[0] + vec * t # type: ignore
return translation_interp
def interpolate_pose(key_timestamps: Tuple[int, int], key_poses: Tuple[SE3, SE3], query_timestamp: int) -> SE3:
"""Given two SE(3) poses at specific timestamps, interpolate an intermediate pose at a given timestamp.
Note: we use a straight line interpolation for the translation, while still using interpolate (aka "slerp")
for the rotational component.
Other implementations are possible, see:
https://github.com/borglab/gtsam/blob/develop/gtsam/geometry/Pose3.h#L129
https://github.com/borglab/gtsam/blob/744db328e7ae537e71329e04cc141b3a28b0d6bd/gtsam/base/Lie.h#L327
Args:
key_timestamps: list of timestamps, representing timestamps of the keyframes.
key_poses: list of poses, representing the keyframes.
query_timestamp: interpolate the pose at this timestamp.
Returns:
Inferred SE(3) pose at the query time.
Raises:
ValueError: If query_timestamp does not fall within [t0,t1].
"""
t0, t1 = key_timestamps
if query_timestamp < t0 or query_timestamp > t1:
raise ValueError("Query timestamp must be witin the interval [t0,t1].")
# Setup the fixed keyframe rotations and times
key_rots = Rotation.from_matrix(np.array([kp.rotation for kp in key_poses]))
slerp = Slerp(key_timestamps, key_rots)
# Interpolate the rotations at the given time:
R_interp = slerp(query_timestamp).as_matrix()
key_translations = (key_poses[0].translation, key_poses[1].translation)
t_interp = linear_interpolation(key_timestamps, key_translations=key_translations, query_timestamp=query_timestamp)
pose_interp = SE3(rotation=R_interp, translation=t_interp)
return pose_interp
```
#### File: av2/map/drivable_area.py
```python
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, List
import numpy as np
from av2.map.map_primitives import Point
from av2.utils.typing import NDArrayFloat
@dataclass
class DrivableArea:
"""Represents a single polygon, not a polyline.
Args:
id: unique identifier.
area_boundary: 3d vertices of polygon, representing the drivable area's boundary.
"""
id: int
area_boundary: List[Point]
@property
def xyz(self) -> NDArrayFloat:
"""Return (N,3) array representing the ordered 3d coordinates of the polygon vertices."""
return np.vstack([wpt.xyz for wpt in self.area_boundary])
@classmethod
def from_dict(cls, json_data: Dict[str, Any]) -> DrivableArea:
"""Generate object instance from dictionary read from JSON data."""
point_list = [Point(x=v["x"], y=v["y"], z=v["z"]) for v in json_data["area_boundary"]]
# append the first vertex to the end of vertex list
point_list.append(point_list[0])
return cls(id=json_data["id"], area_boundary=point_list)
```
#### File: av2/map/map_api.py
```python
from __future__ import annotations
import copy
import logging
import math
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Dict, Final, List, Optional, Tuple, Union
import numpy as np
import av2.geometry.interpolate as interp_utils
import av2.utils.dilation_utils as dilation_utils
import av2.utils.io as io_utils
import av2.utils.raster as raster_utils
from av2.geometry.sim2 import Sim2
from av2.map.drivable_area import DrivableArea
from av2.map.lane_segment import LaneSegment
from av2.map.pedestrian_crossing import PedestrianCrossing
from av2.utils.typing import NDArrayBool, NDArrayByte, NDArrayFloat, NDArrayInt
# 1 meter resolution is insufficient for the online-generated drivable area and ROI raster grids
# these grids can be generated at an arbitrary resolution, from vector (polygon) objects.
ONLINE_RASTER_RESOLUTION_M: Final[float] = 0.1 # 10 cm resolution
ONLINE_RASTER_RESOLUTION_SCALE: Final[float] = 1 / ONLINE_RASTER_RESOLUTION_M
GROUND_HEIGHT_THRESHOLD_M: Final[float] = 0.3 # 30 centimeters
ROI_ISOCONTOUR_M: Final[float] = 5.0 # in meters
ROI_ISOCONTOUR_GRID: Final[float] = ROI_ISOCONTOUR_M * ONLINE_RASTER_RESOLUTION_SCALE
WPT_INFINITY_NORM_INTERP_NUM: Final[int] = 50
logger = logging.getLogger(__name__)
class RasterLayerType(str, Enum):
"""Raster layer types."""
ROI = "ROI"
DRIVABLE_AREA = "DRIVABLE_AREA"
GROUND_HEIGHT = "GROUND_HEIGHT"
@dataclass(frozen=True)
class RasterMapLayer:
"""Data sampled at points along a regular grid, and a mapping from city coordinates to grid array coordinates."""
array: Union[NDArrayByte, NDArrayFloat]
array_Sim2_city: Sim2
def get_raster_values_at_coords(
self, points_xyz: NDArrayFloat, fill_value: Union[float, int]
) -> Union[NDArrayFloat, NDArrayInt]:
"""Index into a raster grid and extract values corresponding to city coordinates.
Note: a conversion is required between city coordinates and raster grid coordinates, via Sim(2).
Args:
points_xyz: array of shape (N,2) or (N,3) representing coordinates in the city coordinate frame.
fill_value: float representing default "raster" return value for out-of-bounds queries.
Returns:
raster_values: array of shape (N,) representing raster values at the N query coordinates.
"""
# Note: we do NOT round here, because we need to enforce scaled discretization.
city_coords = points_xyz[:, :2]
npyimage_coords = self.array_Sim2_city.transform_point_cloud(city_coords)
npyimage_coords = npyimage_coords.astype(np.int64)
# out of bounds values will default to the fill value, and will not be indexed into the array.
# index in at (x,y) locations, which are (y,x) in the image
raster_values = np.full((npyimage_coords.shape[0]), fill_value)
# generate boolean array indicating whether the value at each index represents a valid coordinate.
ind_valid_pts = (
(npyimage_coords[:, 1] >= 0)
* (npyimage_coords[:, 1] < self.array.shape[0])
* (npyimage_coords[:, 0] >= 0)
* (npyimage_coords[:, 0] < self.array.shape[1])
)
raster_values[ind_valid_pts] = self.array[npyimage_coords[ind_valid_pts, 1], npyimage_coords[ind_valid_pts, 0]]
return raster_values
@dataclass(frozen=True)
class GroundHeightLayer(RasterMapLayer):
"""Rasterized ground height map layer.
Stores the "ground_height_matrix" and also the array_Sim2_city: Sim(2) that produces takes point in city
coordinates to numpy image/matrix coordinates, e.g. p_npyimage = array_Transformation_city * p_city
"""
@classmethod
def from_file(cls, log_map_dirpath: Path) -> GroundHeightLayer:
"""Load ground height values (w/ values at 30 cm resolution) from .npy file, and associated Sim(2) mapping.
Note: ground height values are stored on disk as a float16 2d-array, but cast to float32 once loaded for
compatibility with matplotlib.
Args:
log_map_dirpath: path to directory which contains map files associated with one specific log/scenario.
Returns:
The ground height map layer.
Raises:
RuntimeError: If raster ground height layer file is missing or Sim(2) mapping from city to image coordinates
is missing.
"""
ground_height_npy_fpaths = sorted(log_map_dirpath.glob("*_ground_height_surface____*.npy"))
if not len(ground_height_npy_fpaths) == 1:
raise RuntimeError("Raster ground height layer file is missing")
Sim2_json_fpaths = sorted(log_map_dirpath.glob("*___img_Sim2_city.json"))
if not len(Sim2_json_fpaths) == 1:
raise RuntimeError("Sim(2) mapping from city to image coordinates is missing")
# load the file with rasterized values
ground_height_array: NDArrayFloat = np.load(ground_height_npy_fpaths[0]) # type: ignore
array_Sim2_city = Sim2.from_json(Sim2_json_fpaths[0])
return cls(array=ground_height_array.astype(np.float32), array_Sim2_city=array_Sim2_city)
def get_ground_points_boolean(self, points_xyz: NDArrayFloat) -> NDArrayBool:
"""Check whether each 3d point is likely to be from the ground surface.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
Returns:
Numpy array of shape (N,) where ith entry is True if the 3d point (e.g. a LiDAR return) is likely
located on the ground surface.
Raises:
ValueError: If `points_xyz` aren't 3d.
"""
if points_xyz.shape[1] != 3:
raise ValueError("3-dimensional points must be provided to classify them as `ground` with the map.")
ground_height_values = self.get_ground_height_at_xy(points_xyz)
z = points_xyz[:, 2]
near_ground: NDArrayBool = np.absolute(z - ground_height_values) <= GROUND_HEIGHT_THRESHOLD_M
underground: NDArrayBool = z < ground_height_values
is_ground_boolean_arr: NDArrayBool = near_ground | underground
return is_ground_boolean_arr
def get_rasterized_ground_height(self) -> Tuple[NDArrayFloat, Sim2]:
"""Get ground height matrix along with Sim(2) that maps matrix coordinates to city coordinates.
Returns:
ground_height_matrix:
array_Sim2_city: Sim(2) that produces takes point in city coordinates to image coordinates, e.g.
p_image = image_Transformation_city * p_city
"""
ground_height_matrix: NDArrayFloat = self.array.astype(float)
return ground_height_matrix, self.array_Sim2_city
def get_ground_height_at_xy(self, points_xyz: NDArrayFloat) -> NDArrayFloat:
"""Get ground height for each of the xy locations for all points {(x,y,z)} in a point cloud.
Args:
points_xyz: Numpy array of shape (K,2) or (K,3)
Returns:
Numpy array of shape (K,)
"""
ground_height_values: NDArrayFloat = self.get_raster_values_at_coords(points_xyz, fill_value=np.nan).astype(
float
)
return ground_height_values
@dataclass(frozen=True)
class DrivableAreaMapLayer(RasterMapLayer):
"""Rasterized drivable area map layer.
This provides the "drivable area" as a binary segmentation mask in the bird's eye view.
"""
@classmethod
def from_vector_data(cls, drivable_areas: List[DrivableArea]) -> DrivableAreaMapLayer:
"""Return a drivable area map from vector data.
NOTE: This function provides "drivable area" as a binary segmentation mask in the bird's eye view.
Args:
drivable_areas: List of drivable areas.
Returns:
Driveable area map layer.
"""
# We compute scene boundaries on the fly, based on the vertices of all drivable area polygons.
# These scene boundaries are used to define the raster grid extents.
x_min, y_min, x_max, y_max = compute_data_bounds(drivable_areas)
# The resolution of the rasterization will affect image dimensions.
array_s_city = ONLINE_RASTER_RESOLUTION_SCALE
img_h = int((y_max - y_min + 1) * array_s_city)
img_w = int((x_max - x_min + 1) * array_s_city)
# scale determines the resolution of the raster DA layer.
array_Sim2_city = Sim2(R=np.eye(2), t=np.array([-x_min, -y_min]), s=array_s_city)
# convert vertices for each polygon from a 3d array in city coordinates, to a 2d array
# in image/array coordinates.
da_polygons_img = []
for da_polygon_city in drivable_areas:
da_polygon_img = array_Sim2_city.transform_from(da_polygon_city.xyz[:, :2])
da_polygon_img = np.round(da_polygon_img).astype(np.int32) # type: ignore
da_polygons_img.append(da_polygon_img)
da_mask = raster_utils.get_mask_from_polygons(da_polygons_img, img_h, img_w)
return cls(array=da_mask, array_Sim2_city=array_Sim2_city)
@dataclass(frozen=True)
class RoiMapLayer(RasterMapLayer):
"""Rasterized Region of Interest (RoI) map layer.
This layer provides the "region of interest" as a binary segmentation mask in the bird's eye view.
"""
@classmethod
def from_drivable_area_layer(cls, drivable_area_layer: DrivableAreaMapLayer) -> RoiMapLayer:
"""Rasterize and return 3d vector drivable area as a 2d array, and dilate it by 5 meters, to return a ROI mask.
Args:
drivable_area_layer: Drivable map layer.
Returns:
ROI Layer, containing a (M,N) matrix representing a binary segmentation for the region of interest,
and `array_Sim2_city`, Similarity(2) transformation that transforms point in the city coordinates to
2d array coordinates:
p_array = array_Sim2_city * p_city
"""
# initialize ROI as zero-level isocontour of drivable area, and the dilate to 5-meter isocontour
roi_mat_init: NDArrayByte = copy.deepcopy(drivable_area_layer.array).astype(np.uint8)
roi_mask = dilation_utils.dilate_by_l2(roi_mat_init, dilation_thresh=ROI_ISOCONTOUR_GRID)
return cls(array=roi_mask, array_Sim2_city=drivable_area_layer.array_Sim2_city)
def compute_data_bounds(drivable_areas: List[DrivableArea]) -> Tuple[int, int, int, int]:
"""Find the minimum and maximum coordinates along the x and y axes for a set of drivable areas.
Args:
drivable_areas: list of drivable area objects, defined in the city coordinate frame.
Returns:
xmin: float representing minimum x-coordinate of any vertex of any provided drivable area.
ymin: float representing minimum y-coordinate, as above.
xmax: float representing maximum x-coordinate, as above.
ymax: float representing maximum y-coordinate, as above.
"""
xmin = math.floor(min([da.xyz[:, 0].min() for da in drivable_areas]))
ymin = math.floor(min([da.xyz[:, 1].min() for da in drivable_areas]))
xmax = math.ceil(max([da.xyz[:, 0].max() for da in drivable_areas]))
ymax = math.ceil(max([da.xyz[:, 1].max() for da in drivable_areas]))
return xmin, ymin, xmax, ymax
@dataclass
class ArgoverseStaticMap:
"""API to interact with a local map for a single log (within a single city).
Nodes in the lane graph are lane segments. Edges in the lane graph provided the lane segment connectivity, via
left and right neighbors and successors.
Lane segments are parameterized by 3d waypoints representing their left and right boundaries.
Note: predecessors are implicit and available by reversing the directed graph dictated by successors.
Args:
log_id: unique identifier for log/scenario.
vector_drivable_areas: drivable area polygons. Each polygon is represented by a Nx3 array of its vertices.
Note: the first and last polygon vertex are identical (i.e. the first index is repeated).
vector_lane_segments: lane segments that are local to this log/scenario. Consists of a mapping from
lane segment ID to vector lane segment object, parameterized in 3d.
vector_pedestrian_crossings: all pedestrian crossings (i.e. crosswalks) that are local to this log/scenario.
Note: the lookup index is simply a list, rather than a dictionary-based mapping, since pedestrian crossings
are not part of a larger graph.
raster_drivable_area_layer: 2d raster representation of drivable area segmentation.
raster_roi_layer: 2d raster representation of region of interest segmentation.
raster_ground_height_layer: not provided for Motion Forecasting-specific scenarios/logs.
"""
# handle out-of-bounds lane segment ids with ValueError
log_id: str
vector_drivable_areas: Dict[int, DrivableArea]
vector_lane_segments: Dict[int, LaneSegment]
vector_pedestrian_crossings: Dict[int, PedestrianCrossing]
raster_drivable_area_layer: Optional[DrivableAreaMapLayer]
raster_roi_layer: Optional[RoiMapLayer]
raster_ground_height_layer: Optional[GroundHeightLayer]
@classmethod
def from_json(cls, static_map_path: Path) -> ArgoverseStaticMap:
"""Instantiate an Argoverse static map object (without raster data) from a JSON file containing map data.
Args:
static_map_path: Path to the JSON file containing map data. The file name must match
the following pattern: "log_map_archive_{log_id}.json".
Returns:
An Argoverse HD map.
"""
log_id = static_map_path.stem.split("log_map_archive_")[1]
vector_data = io_utils.read_json_file(static_map_path)
vector_drivable_areas = {da["id"]: DrivableArea.from_dict(da) for da in vector_data["drivable_areas"].values()}
vector_lane_segments = {ls["id"]: LaneSegment.from_dict(ls) for ls in vector_data["lane_segments"].values()}
if "pedestrian_crossings" not in vector_data:
logger.error("Missing Pedestrian crossings!")
vector_pedestrian_crossings = {}
else:
vector_pedestrian_crossings = {
pc["id"]: PedestrianCrossing.from_dict(pc) for pc in vector_data["pedestrian_crossings"].values()
}
return cls(
log_id=log_id,
vector_drivable_areas=vector_drivable_areas,
vector_lane_segments=vector_lane_segments,
vector_pedestrian_crossings=vector_pedestrian_crossings,
raster_drivable_area_layer=None,
raster_roi_layer=None,
raster_ground_height_layer=None,
)
@classmethod
def from_map_dir(cls, log_map_dirpath: Path, build_raster: bool = False) -> ArgoverseStaticMap:
"""Instantiate an Argoverse map object from data stored within a map data directory.
Note: the ground height surface file and associated coordinate mapping is not provided for the
2.0 Motion Forecasting dataset, so `build_raster` defaults to False. If raster functionality is
desired, users should pass `build_raster` to True (e.g. for the Sensor Datasets and Map Change Datasets).
Args:
log_map_dirpath: Path to directory containing scenario-specific map data,
JSON file must follow this schema: "log_map_archive_{log_id}.json".
build_raster: Whether to rasterize drivable areas, compute region of interest BEV binary segmentation,
and to load raster ground height from disk (when available).
Returns:
The HD map.
Raises:
RuntimeError: If the vector map data JSON file is missing.
"""
# Load vector map data from JSON file
vector_data_fnames = sorted(log_map_dirpath.glob("log_map_archive_*.json"))
if not len(vector_data_fnames) == 1:
raise RuntimeError(f"JSON file containing vector map data is missing (searched in {log_map_dirpath})")
vector_data_fname = vector_data_fnames[0]
vector_data_json_path = log_map_dirpath / vector_data_fname
static_map = cls.from_json(vector_data_json_path)
static_map.log_id = log_map_dirpath.parent.stem
# Avoid file I/O and polygon rasterization when not needed
if build_raster:
drivable_areas: List[DrivableArea] = list(static_map.vector_drivable_areas.values())
static_map.raster_drivable_area_layer = DrivableAreaMapLayer.from_vector_data(drivable_areas=drivable_areas)
static_map.raster_roi_layer = RoiMapLayer.from_drivable_area_layer(static_map.raster_drivable_area_layer)
static_map.raster_ground_height_layer = GroundHeightLayer.from_file(log_map_dirpath)
return static_map
def get_scenario_vector_drivable_areas(self) -> List[DrivableArea]:
"""Fetch a list of polygons, whose union represents the drivable area for the log/scenario.
NOTE: this function provides drivable areas in vector, not raster, format).
Returns:
List of drivable area polygons.
"""
return list(self.vector_drivable_areas.values())
def get_lane_segment_successor_ids(self, lane_segment_id: int) -> Optional[List[int]]:
"""Get lane id for the lane sucessor of the specified lane_segment_id.
Args:
lane_segment_id: unique identifier for a lane segment within a log scenario map (within a single city).
Returns:
successor_ids: list of integers, representing lane segment IDs of successors. If there are no
successor lane segments, then the list will be empty.
"""
successor_ids = self.vector_lane_segments[lane_segment_id].successors
return successor_ids
def get_lane_segment_left_neighbor_id(self, lane_segment_id: int) -> Optional[int]:
"""Get id of lane segment that is the left neighbor (if any exists) to the query lane segment id.
Args:
lane_segment_id: unique identifier for a lane segment within a log scenario map (within a single city).
Returns:
integer representing id of left neighbor to the query lane segment id, or None if no such neighbor exists.
"""
return self.vector_lane_segments[lane_segment_id].left_neighbor_id
def get_lane_segment_right_neighbor_id(self, lane_segment_id: int) -> Optional[int]:
"""Get id of lane segment that is the right neighbor (if any exists) to the query lane segment id.
Args:
lane_segment_id: unique identifier for a lane segment within a log scenario map (within a single city).
Returns:
integer representing id of right neighbor to the query lane segment id, or None if no such neighbor exists.
"""
return self.vector_lane_segments[lane_segment_id].right_neighbor_id
def get_scenario_lane_segment_ids(self) -> List[int]:
"""Get ids of all lane segments that are local to this log/scenario (according to l-infinity norm).
Returns:
list containing ids of local lane segments
"""
return list(self.vector_lane_segments.keys())
def get_lane_segment_centerline(self, lane_segment_id: int) -> NDArrayFloat:
"""Infer a 3D centerline for any particular lane segment by forming a ladder of left and right waypoints.
Args:
lane_segment_id: unique identifier for a lane segment within a log scenario map (within a single city).
Returns:
Numpy array of shape (N,3).
"""
left_ln_bound = self.vector_lane_segments[lane_segment_id].left_lane_boundary.xyz
right_ln_bound = self.vector_lane_segments[lane_segment_id].right_lane_boundary.xyz
lane_centerline, _ = interp_utils.compute_midpoint_line(
left_ln_boundary=left_ln_bound,
right_ln_boundary=right_ln_bound,
num_interp_pts=interp_utils.NUM_CENTERLINE_INTERP_PTS,
)
return lane_centerline
def get_lane_segment_polygon(self, lane_segment_id: int) -> NDArrayFloat:
"""Return an array contained coordinates of vertices that represent the polygon's boundary.
Args:
lane_segment_id: unique identifier for a lane segment within a log scenario map (within a single city).
Returns:
Array of polygon boundary (K,3), with identical and last boundary points
"""
return self.vector_lane_segments[lane_segment_id].polygon_boundary
def lane_is_in_intersection(self, lane_segment_id: int) -> bool:
"""Check if the specified lane_segment_id falls within an intersection.
Args:
lane_segment_id: unique identifier for a lane segment within a log scenario map (within a single city).
Returns:
boolean indicating if the lane segment falls within an intersection
"""
return self.vector_lane_segments[lane_segment_id].is_intersection
def get_scenario_ped_crossings(self) -> List[PedestrianCrossing]:
"""Return a list of all pedestrian crossing objects that are local to this log/scenario (by l-infinity norm).
Returns:
lpcs: local pedestrian crossings
"""
return list(self.vector_pedestrian_crossings.values())
def get_nearby_ped_crossings(self, query_center: NDArrayFloat, search_radius_m: float) -> List[PedestrianCrossing]:
"""Return nearby pedestrian crossings.
Returns pedestrian crossings for which any waypoint of their boundary falls within `search_radius_m` meters
of query center, by l-infinity norm.
Search radius defined in l-infinity norm (could also provide an l2 norm variant).
Args:
query_center: Numpy array of shape (2,) representing 2d query center.
search_radius_m: distance threshold in meters (by infinity norm) to use for search.
Raises:
NotImplementedError: Always (not implemented!).
"""
raise NotImplementedError("This method isn't currently supported.")
def get_scenario_lane_segments(self) -> List[LaneSegment]:
"""Return a list of all lane segments objects that are local to this log/scenario.
Returns:
vls_list: lane segments local to this scenario (any waypoint within 100m by L2 distance)
"""
return list(self.vector_lane_segments.values())
def get_nearby_lane_segments(self, query_center: NDArrayFloat, search_radius_m: float) -> List[LaneSegment]:
"""Return the nearby lane segments.
Return lane segments for which any waypoint of their lane boundaries falls
within search_radius meters of query center, by l-infinity norm.
Args:
query_center: Numpy array of shape (2,) representing 2d query center.
search_radius_m: distance threshold in meters (by infinity norm) to use for search.
Returns:
ls_list: lane segments that fall within the requested search radius.
"""
scenario_lane_segments = self.get_scenario_lane_segments()
return [
ls for ls in scenario_lane_segments if ls.is_within_l_infinity_norm_radius(query_center, search_radius_m)
]
def remove_ground_surface(self, points_xyz: NDArrayFloat) -> NDArrayFloat:
"""Get a collection of 3d points, snap them to the grid, perform the O(1) raster map queries.
If our z-height is within THRESHOLD of that grid's z-height, then we keep it; otherwise, discard it.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
Returns:
subset of original point cloud, with ground points removed
"""
is_ground_boolean_arr = self.get_ground_points_boolean(points_xyz)
filtered_points_xyz: NDArrayFloat = points_xyz[~is_ground_boolean_arr]
return filtered_points_xyz
def get_ground_points_boolean(self, points_xyz: NDArrayFloat) -> NDArrayBool:
"""Check whether each 3d point is likely to be from the ground surface.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
Returns:
Numpy array of shape (N,) where ith entry is True if the 3d point
(e.g. a LiDAR return) is likely located on the ground surface.
Raises:
ValueError: If `self.raster_ground_height_layer` is `None`.
"""
if self.raster_ground_height_layer is None:
raise ValueError("Raster ground height is not loaded!")
return self.raster_ground_height_layer.get_ground_points_boolean(points_xyz)
def remove_non_drivable_area_points(self, points_xyz: NDArrayFloat) -> NDArrayFloat:
"""Decimate the point cloud to the drivable area only.
Get a 3d point, snap it to the grid, perform the O(1) raster map query.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
Returns:
subset of original point cloud, returning only those points lying within the drivable area.
"""
is_da_boolean_arr = self.get_raster_layer_points_boolean(points_xyz, layer_name=RasterLayerType.DRIVABLE_AREA)
filtered_points_xyz: NDArrayFloat = points_xyz[is_da_boolean_arr]
return filtered_points_xyz
def remove_non_roi_points(self, points_xyz: NDArrayFloat) -> NDArrayFloat:
"""Decimate the point cloud to the Region of Interest (ROI) area only.
Get a 3d point, snap it to the grid, perform the O(1) raster map query.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
Returns:
subset of original point cloud, returning only those points lying within the ROI.
"""
is_da_boolean_arr = self.get_raster_layer_points_boolean(points_xyz, layer_name=RasterLayerType.ROI)
filtered_points_xyz: NDArrayFloat = points_xyz[is_da_boolean_arr]
return filtered_points_xyz
def get_rasterized_drivable_area(self) -> Tuple[NDArrayByte, Sim2]:
"""Get the drivable area along with Sim(2) that maps matrix coordinates to city coordinates.
Returns:
da_matrix: Numpy array of shape (M,N) representing binary values for drivable area,
or None if `build_raster=False`.
array_Sim2_city: Sim(2) that produces takes point in city coordinates to Numpy array coordinates, e.g.
p_array = array_Transformation_city * p_city
Raises:
ValueError: If `self.raster_drivable_area_layer` is `None`.
"""
if self.raster_drivable_area_layer is None:
raise ValueError("Raster drivable area is not loaded!")
raster_drivable_area_layer: NDArrayByte = self.raster_drivable_area_layer.array.astype(np.uint8)
return raster_drivable_area_layer, self.raster_drivable_area_layer.array_Sim2_city
def get_rasterized_roi(self) -> Tuple[NDArrayByte, Sim2]:
"""Get the drivable area along with Sim(2) that maps matrix coordinates to city coordinates.
Returns:
da_matrix: Numpy array of shape (M,N) representing binary values for drivable area.
array_Sim2_city: Sim(2) that produces takes point in city coordinates to numpy image, e.g.
p_npyimage = npyimage_Transformation_city * p_city
Raises:
ValueError: If `self.raster_roi_layer` is `None`.
"""
if self.raster_roi_layer is None:
raise ValueError("Raster ROI is not loaded!")
raster_roi_layer: NDArrayByte = self.raster_roi_layer.array.astype(np.uint8)
return raster_roi_layer, self.raster_roi_layer.array_Sim2_city
def get_raster_layer_points_boolean(self, points_xyz: NDArrayFloat, layer_name: RasterLayerType) -> NDArrayBool:
"""Query the binary segmentation layers (drivable area and ROI) at specific coordinates, to check values.
Args:
points_xyz: Numpy array of shape (N,3) representing 3d coordinates of N query locations.
layer_name: enum indicating layer name, for either region-of-interest or drivable area.
Returns:
Numpy array of shape (N,) where i'th entry is True if binary segmentation is
equal to 1 at the i'th point coordinate (i.e. is within the ROI, or within the drivable area,
depending upon `layer_name` argument).
Raises:
ValueError: If `self.raster_roi_layer`, `self.raster_drivable_area_layer` is `None`. Additionally,
if `layer_name` is not `roi` or `driveable_area`.
"""
if layer_name == RasterLayerType.ROI:
if self.raster_roi_layer is None:
raise ValueError("Raster ROI is not loaded!")
layer_values = self.raster_roi_layer.get_raster_values_at_coords(points_xyz, fill_value=0)
elif layer_name == RasterLayerType.DRIVABLE_AREA:
if self.raster_drivable_area_layer is None:
raise ValueError("Raster drivable area is not loaded!")
layer_values = self.raster_drivable_area_layer.get_raster_values_at_coords(points_xyz, fill_value=0)
else:
raise ValueError("layer_name should be either `roi` or `drivable_area`.")
is_layer_boolean_arr: NDArrayBool = layer_values == 1.0
return is_layer_boolean_arr
def append_height_to_2d_city_pt_cloud(self, points_xy: NDArrayFloat) -> NDArrayFloat:
"""Accept 2d point cloud in xy plane and returns a 3d point cloud (xyz) by querying map for ground height.
Args:
points_xy: Numpy array of shape (N,2) representing 2d coordinates of N query locations.
Returns:
Numpy array of shape (N,3) representing 3d coordinates on the ground surface at N (x,y) query locations.
Raises:
ValueError: If `self.raster_ground_height_layer` is `None` or input is not a set of 2d coordinates.
"""
if self.raster_ground_height_layer is None:
raise ValueError("Raster ground height is not loaded!")
if points_xy.shape[1] != 2:
raise ValueError("Input query points must have shape (N,2")
points_z = self.raster_ground_height_layer.get_ground_height_at_xy(points_xy)
points_xyz: NDArrayFloat = np.hstack([points_xy, points_z[:, np.newaxis]])
return points_xyz
```
#### File: av2/map/pedestrian_crossing.py
```python
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, Tuple
import numpy as np
from av2.map.map_primitives import Polyline
from av2.utils.typing import NDArrayFloat
@dataclass
class PedestrianCrossing:
"""Represents a pedestrian crossing (i.e. crosswalk) as two edges along its principal axis.
Both lines should be pointing in nominally the same direction and a pedestrian is expected to
move either roughly parallel to both lines or anti-parallel to both lines.
Args:
id: unique identifier of this pedestrian crossing.
edge1: 3d polyline representing one edge of the crosswalk, with 2 waypoints.
edge2: 3d polyline representing the other edge of the crosswalk, with 2 waypoints.
"""
id: int
edge1: Polyline
edge2: Polyline
def get_edges_2d(self) -> Tuple[NDArrayFloat, NDArrayFloat]:
"""Retrieve the two principal edges of the crosswalk, in 2d.
Returns:
edge1: array of shape (2,2), a 2d polyline representing one edge of the crosswalk, with 2 waypoints.
edge2: array of shape (2,2), a 2d polyline representing the other edge of the crosswalk, with 2 waypoints.
"""
return (self.edge1.xyz[:, :2], self.edge2.xyz[:, :2])
def __eq__(self, other: object) -> bool:
"""Check if two pedestrian crossing objects are equal, up to a tolerance."""
if not isinstance(other, PedestrianCrossing):
return False
return np.allclose(self.edge1.xyz, other.edge1.xyz) and np.allclose(self.edge2.xyz, other.edge2.xyz)
@classmethod
def from_dict(cls, json_data: Dict[str, Any]) -> PedestrianCrossing:
"""Generate a PedestrianCrossing object from a dictionary read from JSON data."""
edge1 = Polyline.from_json_data(json_data["edge1"])
edge2 = Polyline.from_json_data(json_data["edge2"])
return PedestrianCrossing(id=json_data["id"], edge1=edge1, edge2=edge2)
@property
def polygon(self) -> NDArrayFloat:
"""Return the vertices of the polygon representing the pedestrian crossing.
Returns:
array of shape (N,3) representing vertices. The first and last vertex that are provided are identical.
"""
v0, v1 = self.edge1.xyz
v2, v3 = self.edge2.xyz
return np.array([v0, v1, v3, v2, v0])
```
#### File: av2/utils/dataclass.py
```python
import itertools
from dataclasses import is_dataclass
import numpy as np
import pandas as pd
def dataclass_eq(base_dataclass: object, other: object) -> bool:
"""Check if base_dataclass is equal to the other object, with proper handling for numpy array fields.
Args:
base_dataclass: Base dataclass to compare against.
other: Other object to compare against the base dataclass.
Raises:
ValueError: If base_dataclass is not an instance of a dataclass.
Returns:
Flag indicating whether base_dataclass and the other object are considered equal.
"""
if not is_dataclass(base_dataclass):
raise ValueError(f"'{base_dataclass.__class__.__name__}' is not a dataclass!")
# Check whether the two objects point to the same instance
if base_dataclass is other:
return True
# Check whether the two objects are both dataclasses of the same type
if base_dataclass.__class__ is not other.__class__:
return False
# Check whether the dataclasses have equal values in all members
base_tuple = vars(base_dataclass).values()
other_tuple = vars(other).values()
return all(_dataclass_member_eq(base_mem, other_mem) for base_mem, other_mem in zip(base_tuple, other_tuple))
def _dataclass_member_eq(base: object, other: object) -> bool:
"""Check if dataclass members base and other are equal, with proper handling for numpy arrays.
Args:
base: Base object to compare against.
other: Other object to compare against the base object.
Returns:
Bool flag indicating whether objects a and b are equal.
"""
# Objects are equal if they point to the same instance
if base is other:
return True
# If both objects are lists, check equality for all members
if isinstance(base, list) and isinstance(other, list):
return all(_dataclass_member_eq(base_i, other_i) for base_i, other_i in itertools.zip_longest(base, other))
# If both objects are np arrays, delegate equality check to numpy's built-in operation
if isinstance(base, np.ndarray) and isinstance(other, np.ndarray):
return bool(np.array_equal(base, other))
# If both objects are pd dataframes, delegate equality check to pandas' built-in operation
if isinstance(base, pd.DataFrame) and isinstance(other, pd.DataFrame):
return bool(pd.DataFrame.equals(base, other))
# Equality checks for all other types are delegated to the standard equality check
try:
return bool(base == other)
except (TypeError, ValueError):
return False
```
#### File: datasets/sensor/test_av2_sensor_dataloader.py
```python
from pathlib import Path
import numpy as np
from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader
from av2.geometry.se3 import SE3
from av2.utils.typing import NDArrayFloat
def test_get_subsampled_ego_trajectory(test_data_root_dir: Path) -> None:
"""Ensure we can sample the poses at a specific frequency.
Args:
test_data_root_dir: Path to the root dir for test data (provided via fixture).
"""
log_id = "adcf7d18-0510-35b0-a2fa-b4cea13a6d76"
dataroot = test_data_root_dir / "sensor_dataset_logs"
loader = AV2SensorDataLoader(data_dir=dataroot, labels_dir=dataroot)
# retrieve every pose! (sub-nanosecond precision)
traj_ns = loader.get_subsampled_ego_trajectory(log_id=log_id, sample_rate_hz=1e9)
assert traj_ns.shape == (2637, 2)
# retrieve poses @ 1 Hz
traj_1hz = loader.get_subsampled_ego_trajectory(log_id=log_id, sample_rate_hz=1)
# 16 second log segment.
assert traj_1hz.shape == (16, 2)
def test_get_city_SE3_ego(test_data_root_dir: Path) -> None:
"""Ensure we can obtain the egovehicle's pose in the city coordinate frame at a specific timestamp.
Args:
test_data_root_dir: Path to the root dir for test data (provided via fixture).
"""
log_id = "adcf7d18-0510-35b0-a2fa-b4cea13a6d76"
timestamp_ns = 315973157899927216
dataroot = test_data_root_dir / "sensor_dataset_logs"
loader = AV2SensorDataLoader(data_dir=dataroot, labels_dir=dataroot)
city_SE3_egovehicle = loader.get_city_SE3_ego(log_id=log_id, timestamp_ns=timestamp_ns)
assert isinstance(city_SE3_egovehicle, SE3)
expected_translation: NDArrayFloat = np.array([1468.87, 211.51, 13.14])
assert np.allclose(city_SE3_egovehicle.translation, expected_translation, atol=1e-2)
```
#### File: datasets/sensor/test_sensor_dataloader.py
```python
import tempfile
from pathlib import Path
from typing import Dict, Final, List
from av2.datasets.sensor.av2_sensor_dataloader import AV2SensorDataLoader
from av2.datasets.sensor.constants import RingCameras
from av2.datasets.sensor.sensor_dataloader import SensorDataloader
SENSOR_TIMESTAMPS_MS_DICT: Final[Dict[str, List[int]]] = {
"ring_rear_left": [0, 50, 100, 150, 200, 250, 300, 350, 400, 450],
"ring_side_left": [15, 65, 115, 165, 215, 265, 315, 365, 415, 465],
"ring_front_left": [30, 80, 130, 180, 230, 280, 330, 380, 430, 480],
"ring_front_center": [42, 92, 142, 192, 242, 292, 342, 392, 442, 492],
"ring_front_right": [5, 55, 105, 155, 205, 255, 305, 355, 405, 455],
"ring_side_right": [20, 70, 120, 170, 220, 270, 320, 370, 420, 470],
"ring_rear_right": [35, 85, 135, 185, 235, 285, 335, 385, 435, 485],
"lidar": [2, 102, 202, 303, 402, 502, 603, 702, 802, 903],
}
def _create_dummy_sensor_dataloader(log_id: str) -> SensorDataloader:
"""Create a dummy sensor dataloader."""
with Path(tempfile.TemporaryDirectory().name) as sensor_dataset_dir:
for sensor_name, timestamps_ms in SENSOR_TIMESTAMPS_MS_DICT.items():
for t in timestamps_ms:
if "ring" in sensor_name:
fpath = Path(
sensor_dataset_dir, "dummy", log_id, "sensors", "cameras", sensor_name, f"{int(t*1e6)}.jpg"
)
Path(fpath).parent.mkdir(exist_ok=True, parents=True)
fpath.open("w").close()
elif "lidar" in sensor_name:
fpath = Path(sensor_dataset_dir, "dummy", log_id, "sensors", sensor_name, f"{int(t*1e6)}.feather")
Path(fpath).parent.mkdir(exist_ok=True, parents=True)
fpath.open("w").close()
return SensorDataloader(dataset_dir=sensor_dataset_dir, with_cache=False)
def test_sensor_data_loader_milliseconds() -> None:
"""Test that the sensor dataset dataloader can synchronize lidar and image data.
Given toy data in milliseconds, we write out dummy files at corresponding timestamps.
(Sensor timestamps are real, and come from log 00a6ffc1-6ce9-3bc3-a060-6006e9893a1a).
sensor_name timestamp_ns ring_front_center ...
0 lidar 2000000 42000000.0
1 lidar 102000000 92000000.0
2 lidar 202000000 192000000.0
3 lidar 303000000 292000000.0
4 lidar 402000000 392000000.0
5 lidar 502000000 492000000.0
6 lidar 603000000 NaN
7 lidar 702000000 NaN
8 lidar 802000000 NaN
9 lidar 903000000 NaN
"""
# 7x10 images, and 10 sweeps. Timestamps below given in human-readable milliseconds.
log_id = "00a6ffc1-6ce9-3bc3-a060-6006e9893a1a"
loader = _create_dummy_sensor_dataloader(log_id=log_id)
# LiDAR 402 -> matches to ring front center 392.
img_fpath = loader.find_closest_target_fpath(
split="dummy",
log_id=log_id,
src_sensor_name="lidar",
src_timestamp_ns=int(402 * 1e6),
target_sensor_name="ring_front_center",
)
assert isinstance(img_fpath, Path)
# result should be 392 milliseconds (and then a conversion to nanoseconds by adding 6 zeros)
assert img_fpath.name == "392" + "000000" + ".jpg"
# nothing should be within bounds for this (valid lidar timestamp 903)
img_fpath = loader.find_closest_target_fpath(
split="dummy",
log_id=log_id,
src_sensor_name="lidar",
target_sensor_name="ring_front_center",
src_timestamp_ns=int(903 * 1e6),
)
assert img_fpath is None
# nothing should be within bounds for this (invalid lidar timestamp 904)
img_fpath = loader.find_closest_target_fpath(
split="dummy",
log_id=log_id,
src_sensor_name="lidar",
target_sensor_name="ring_front_center",
src_timestamp_ns=int(904 * 1e6),
)
assert img_fpath is None
# ring front center 392 -> matches to LiDAR 402.
lidar_fpath = loader.find_closest_target_fpath(
split="dummy",
log_id=log_id,
src_sensor_name="ring_front_center",
target_sensor_name="lidar",
src_timestamp_ns=int(392 * 1e6),
)
assert isinstance(lidar_fpath, Path)
# result should be 402 milliseconds (and then a conversion to nanoseconds by adding 6 zeros)
assert lidar_fpath.name == "402" + "000000.feather"
# way outside of bounds
lidar_fpath = loader.find_closest_target_fpath(
split="dummy",
log_id=log_id,
src_sensor_name="ring_front_center",
target_sensor_name="lidar",
src_timestamp_ns=int(7000 * 1e6),
)
assert lidar_fpath is None
# use the non-pandas implementation as a "brute-force" (BF) check.
# read out the dataset root from the other dataloader's attributes.
bf_loader = AV2SensorDataLoader(data_dir=loader.dataset_dir / "dummy", labels_dir=loader.dataset_dir / "dummy")
# for every image, make sure query result matches the brute-force query result.
for ring_camera_enum in RingCameras:
ring_camera_name = ring_camera_enum.value
for cam_timestamp_ms in SENSOR_TIMESTAMPS_MS_DICT[ring_camera_name]:
cam_timestamp_ns = int(cam_timestamp_ms * 1e6)
result = loader.get_closest_lidar_fpath(
split="dummy", log_id=log_id, cam_name=ring_camera_name, cam_timestamp_ns=cam_timestamp_ns
)
bf_result = bf_loader.get_closest_lidar_fpath(log_id=log_id, cam_timestamp_ns=cam_timestamp_ns)
assert result == bf_result
# for every lidar sweep, make sure query result matches the brute-force query result.
for lidar_timestamp_ms in SENSOR_TIMESTAMPS_MS_DICT["lidar"]:
lidar_timestamp_ns = int(lidar_timestamp_ms * 1e6)
for ring_camera_enum in list(RingCameras):
ring_camera_name = ring_camera_enum.value
result = loader.get_closest_img_fpath(
split="dummy", log_id=log_id, cam_name=ring_camera_name, lidar_timestamp_ns=lidar_timestamp_ns
)
bf_result = bf_loader.get_closest_img_fpath(
log_id=log_id, cam_name=ring_camera_name, lidar_timestamp_ns=lidar_timestamp_ns
)
assert result == bf_result
if __name__ == "__main__":
test_sensor_data_loader_milliseconds()
```
#### File: evaluation/detection/test_eval.py
```python
import math
from pathlib import Path
from typing import Final, List
import numpy as np
import pandas as pd
from scipy.spatial.transform import Rotation
from av2.evaluation.detection.constants import AffinityType, DistanceType
from av2.evaluation.detection.eval import evaluate
from av2.evaluation.detection.utils import (
DetectionCfg,
accumulate,
assign,
compute_affinity_matrix,
compute_evaluated_dts_mask,
compute_evaluated_gts_mask,
compute_objects_in_roi_mask,
distance,
interpolate_precision,
)
from av2.geometry.geometry import wrap_angles
from av2.geometry.iou import iou_3d_axis_aligned
from av2.map.map_api import ArgoverseStaticMap
from av2.structures.cuboid import ORDERED_CUBOID_COL_NAMES
from av2.utils.constants import PI
from av2.utils.io import read_city_SE3_ego, read_feather
from av2.utils.typing import NDArrayBool, NDArrayFloat
TEST_DATA_DIR: Final[Path] = Path(__file__).parent.resolve() / "data"
TRANSLATION_COLS: Final[List[str]] = ["tx_m", "ty_m", "tz_m"]
DIMS_COLS: Final[List[str]] = ["length_m", "width_m", "height_m"]
QUAT_COLS: Final[List[str]] = ["qw", "qx", "qy", "qz"]
ANNO_COLS: Final[List[str]] = ["timestamp_ns", "category"] + DIMS_COLS + QUAT_COLS + TRANSLATION_COLS
CUBOID_COLS: Final[List[str]] = ["tx_m", "ty_m", "tz_m", "length_m", "width_m", "height_m", "qw", "qx", "qy", "qz"]
def _get_summary_identity() -> pd.DataFrame:
"""Define an evaluator that compares a set of results to itself."""
detection_cfg = DetectionCfg(categories=("REGULAR_VEHICLE",), eval_only_roi_instances=False)
dts: pd.DataFrame = pd.read_feather(TEST_DATA_DIR / "detections_identity.feather")
gts: pd.DataFrame = dts.copy()
gts.loc[:, "num_interior_pts"] = np.array([1, 1, 1, 1, 1, 1])
_, _, summary = evaluate(dts, gts, detection_cfg)
return summary
def _get_summary_assignment() -> pd.DataFrame:
"""Define an evaluator that compares a set of results to one with an extra detection to check assignment."""
detection_cfg = DetectionCfg(categories=("REGULAR_VEHICLE",), eval_only_roi_instances=False)
dts: pd.DataFrame = pd.read_feather(TEST_DATA_DIR / "detections_assignment.feather")
gts: pd.DataFrame = pd.read_feather(TEST_DATA_DIR / "labels.feather")
_, _, summary = evaluate(dts, gts, detection_cfg)
return summary
def _get_summary() -> pd.DataFrame:
"""Get a dummy summary."""
detection_cfg = DetectionCfg(categories=("REGULAR_VEHICLE",), eval_only_roi_instances=False)
dts: pd.DataFrame = pd.read_feather(TEST_DATA_DIR / "detections.feather")
gts: pd.DataFrame = pd.read_feather(TEST_DATA_DIR / "labels.feather")
_, _, summary = evaluate(dts, gts, detection_cfg)
return summary
def test_affinity_center() -> None:
"""Initialize a detection and a ground truth label.
Verify that calculated distance matches expected affinity under the specified `AffFnType`.
"""
dts: NDArrayFloat = np.array([[0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0]])
gts: NDArrayFloat = np.array([[3.0, 4.0, 0.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0]])
expected_result = -5.0
assert compute_affinity_matrix(dts, gts, AffinityType.CENTER) == expected_result
def test_translation_distance() -> None:
"""Initialize a detection and a ground truth label with only translation parameters.
Verify that calculated distance matches expected distance under the specified `DistFnType`.
"""
dts: NDArrayFloat = np.array([[0.0, 0.0, 0.0]])
gts: NDArrayFloat = np.array([[5.0, 5.0, 5.0]])
expected_result: float = np.sqrt(25 + 25 + 25)
assert np.allclose(distance(dts, gts, DistanceType.TRANSLATION), expected_result)
def test_scale_distance() -> None:
"""Initialize a detection and a ground truth label with only shape parameters.
Verify that calculated scale error matches the expected value.
NOTE: We only provide shape parameters due to alignment assumption.
"""
dts: NDArrayFloat = np.array([[5.0, 5.0, 5.0]])
gts: NDArrayFloat = np.array([[10.0, 10.0, 10.0]])
expected_result: float = 1 - 0.125
assert np.allclose(distance(dts, gts, DistanceType.SCALE), expected_result)
def test_orientation_quarter_angles() -> None:
"""Initialize a detection and a ground truth label with only orientation parameters.
Verify that calculated orientation error matches the expected smallest angle ((2 * PI) / 4)
between the detection and ground truth label.
"""
# Check all of the 90 degree angles
expected_result: float = (2 * PI) / 4
quarter_angles: List[NDArrayFloat] = [np.array([0, 0, angle]) for angle in np.arange(0, 2 * PI, expected_result)]
for i in range(len(quarter_angles) - 1):
quat_xyzw_dts: NDArrayFloat = Rotation.from_rotvec(quarter_angles[i : i + 1]).as_quat()
quat_xyzw_gts: NDArrayFloat = Rotation.from_rotvec(quarter_angles[i + 1 : i + 2]).as_quat()
quat_wxyz_dts = quat_xyzw_dts[..., [3, 0, 1, 2]]
quat_wxyz_gts = quat_xyzw_gts[..., [3, 0, 1, 2]]
assert np.isclose(distance(quat_wxyz_dts, quat_wxyz_gts, DistanceType.ORIENTATION), expected_result)
assert np.isclose(distance(quat_wxyz_gts, quat_wxyz_dts, DistanceType.ORIENTATION), expected_result)
def test_orientation_eighth_angles() -> None:
"""Initialize a detection and a ground truth label with only orientation parameters.
Verify that calculated orientation error matches the expected smallest angle ((2 * PI) / 8)
between the detection and ground truth label.
"""
expected_result: float = (2 * PI) / 8
eigth_angles: List[NDArrayFloat] = [np.array([0, 0, angle]) for angle in np.arange(0, 2 * PI, expected_result)]
for i in range(len(eigth_angles) - 1):
quat_xyzw_dts = Rotation.from_rotvec(eigth_angles[i : i + 1]).as_quat()
quat_xyzw_gts = Rotation.from_rotvec(eigth_angles[i + 1 : i + 2]).as_quat()
quat_wxyz_dts = quat_xyzw_dts[..., [3, 0, 1, 2]]
quat_wxyz_gts = quat_xyzw_gts[..., [3, 0, 1, 2]]
assert np.isclose(distance(quat_wxyz_dts, quat_wxyz_gts, DistanceType.ORIENTATION), expected_result)
assert np.isclose(distance(quat_wxyz_gts, quat_wxyz_dts, DistanceType.ORIENTATION), expected_result)
def test_wrap_angle() -> None:
"""Test mapping angles to a fixed interval."""
theta: NDArrayFloat = np.array([-3 * PI / 2])
expected_result: NDArrayFloat = np.array([PI / 2])
assert np.isclose(wrap_angles(theta), expected_result)
def test_accumulate() -> None:
"""Verify that the accumulate function matches known output for a self-comparison."""
cfg = DetectionCfg(eval_only_roi_instances=False)
gts: pd.DataFrame = pd.read_feather(TEST_DATA_DIR / "labels.feather")
for _, group in gts.groupby(["log_id", "timestamp_ns"]):
job = (group.loc[:, CUBOID_COLS].to_numpy(), group.loc[:, CUBOID_COLS + ["num_interior_pts"]].to_numpy(), cfg)
dts, gts = accumulate(*job)
# Check that there's a true positive under every threshold.
assert np.all(dts[:, :4])
# Check that all error metrics are zero.
assert (dts[:, 4:7] == 0).all()
# # Check that there are 2 regular vehicles.
# assert gts["category"].value_counts()["REGULAR_VEHICLE"] == 2
# # Check that there are no other labels.
# assert gts["category"].value_counts().sum() == 2
def test_assign() -> None:
"""Verify that the assign functions as expected by checking ATE of assigned detections against known distance."""
cfg = DetectionCfg(eval_only_roi_instances=False)
dts: NDArrayFloat = np.array(
[
[0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[10.0, 10.0, 10.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[20.0, 20.0, 20.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 1.0],
],
)
gts: NDArrayFloat = np.array(
[
[-10.0, -10.0, -10.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.1, 0.0, 0.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[10.1, 10.0, 10.0, 5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 1.0],
],
)
dts_assignments, _ = assign(dts, gts, cfg)
# if these assign correctly, we should get an ATE of 0.1 for the first two
expected_result: float = 0.1
ATE_COL_IDX = 4
assert math.isclose(dts_assignments[0, ATE_COL_IDX], expected_result) # instance 0
assert math.isclose(dts_assignments[1, ATE_COL_IDX], expected_result) # instance 1
assert math.isclose(dts_assignments[2, ATE_COL_IDX], 2.0) # instance 32
def test_interp() -> None:
"""Test non-decreasing `interpolation` constraint enforced on precision results."""
prec: NDArrayFloat = np.array([1.0, 0.5, 0.33, 0.5])
expected_result: NDArrayFloat = np.array([1.0, 0.5, 0.5, 0.5])
assert np.isclose(interpolate_precision(prec), expected_result).all()
def test_iou_aligned_3d() -> None:
"""Initialize a detection and a ground truth label with only shape parameters.
Verify that calculated intersection-over-union matches the expected
value between the detection and ground truth label.
NOTE: We only provide shape parameters due to alignment assumption.
"""
columns = DIMS_COLS
dt_dims: NDArrayFloat = pd.DataFrame([[4.0, 10.0, 3.0]], columns=columns).to_numpy()
gt_dims: NDArrayFloat = pd.DataFrame([[9.0, 5.0, 2.0]], columns=columns).to_numpy()
# Intersection is 40 = 4 * 5 * 2 (min of all dimensions).
# Union is the sum of the two volumes, minus intersection: 270 = (10 * 3 * 4) + (5 * 2 * 9) - 40.
expected_result: float = 40 / 270.0
assert iou_3d_axis_aligned(dt_dims, gt_dims) == expected_result
def test_assignment() -> None:
"""Verify that assignment works as expected; should have one duplicate in the provided results."""
expected_result: float = 0.999
assert _get_summary_assignment().loc["AVERAGE_METRICS", "AP"] == expected_result
def test_ap() -> None:
"""Test that AP is 1 for the self-compared results."""
expected_result: float = 1.0
assert _get_summary_identity().loc["AVERAGE_METRICS", "AP"] == expected_result
def test_translation_error() -> None:
"""Test that ATE is 0 for the self-compared results."""
expected_result_identity: float = 0.0
expected_result_det: float = 0.017 # 0.1 / 6, one of six dets is off by 0.1
assert _get_summary_identity().loc["AVERAGE_METRICS", "ATE"] == expected_result_identity
assert _get_summary().loc["AVERAGE_METRICS", "ATE"] == expected_result_det
def test_scale_error() -> None:
"""Test that ASE is 0 for the self-compared results."""
expected_result_identity: float = 0.0
expected_result_det: float = 0.033 # 0.2 / 6, one of six dets is off by 20% in IoU
assert _get_summary_identity().loc["AVERAGE_METRICS", "ASE"] == expected_result_identity
assert _get_summary().loc["AVERAGE_METRICS", "ASE"] == expected_result_det
def test_orientation_error() -> None:
"""Test that AOE is 0 for the self-compared results."""
expected_result_identity = 0.0
expected_result_det = 0.524 # pi / 6, since one of six dets is off by pi
assert _get_summary_identity().loc["AVERAGE_METRICS", "AOE"] == expected_result_identity
assert _get_summary().loc["AVERAGE_METRICS", "AOE"] == expected_result_det
def test_compute_evaluated_dts_mask() -> None:
"""Unit test for computing valid detections cuboids."""
dts: NDArrayFloat = np.array(
[
[5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0], # In bounds with at least 1 point.
[175, 175.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0], # Out of bounds with at least 1 point.
[-175.0, -175.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0], # Out of bounds with at least 1 point.
[1.0, 1.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0], # In bounds with at least 1 point.
],
)
detection_cfg = DetectionCfg(categories=("REGULAR_VEHICLE",), eval_only_roi_instances=False)
dts_mask = compute_evaluated_dts_mask(dts, detection_cfg)
dts_mask_: NDArrayBool = np.array([True, False, False, True])
np.testing.assert_array_equal(dts_mask, dts_mask_) # type: ignore
def test_compute_evaluated_dts_mask_2() -> None:
"""Randomly generate detections and ensure that they never exceed the maximum detection limit."""
detection_cfg = DetectionCfg(categories=("REGULAR_VEHICLE",), eval_only_roi_instances=False)
for i in range(1000):
dts: NDArrayFloat = np.random.randint(0, 250, size=(detection_cfg.max_num_dts_per_category + i, 10)).astype(
float
)
dts_mask = compute_evaluated_dts_mask(dts, detection_cfg)
assert dts_mask.sum() <= detection_cfg.max_num_dts_per_category
def test_compute_evaluated_gts_mask() -> None:
"""Unit test for computing valid ground truth cuboids."""
gts: NDArrayFloat = np.array(
[
[5.0, 5.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0, 5], # In bounds with at least 1 point.
[175, 175.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0, 5], # Out of bounds with at least 1 point.
[-175.0, -175.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0, 5], # Out of bounds with at least 1 point.
[1.0, 1.0, 5.0, 1.0, 0.0, 0.0, 0.0, 3.0, 4.0, 0.0, 0], # In bounds with at least 1 point.
],
)
detection_cfg = DetectionCfg(categories=("REGULAR_VEHICLE",), eval_only_roi_instances=False)
gts_xyz_ego = gts[..., :3]
num_interior_pts = gts[..., -1]
gts_mask = compute_evaluated_gts_mask(gts_xyz_ego, num_interior_pts, detection_cfg)
gts_mask_: NDArrayBool = np.array([True, False, False, False])
np.testing.assert_array_equal(gts_mask, gts_mask_) # type: ignore
def test_compute_objects_in_roi_mask() -> None:
"""Test filtering ground truth annotations by the ROI.
Three annotations are tested. The first and third are partially in the ROI --- these
are both considered VALID since they have at least on of their cuboid vertices within
the ROI.
The second annotations is _FULLY_ outside of the ROI, thus it is filtered.
"""
map_dir = TEST_DATA_DIR / "adcf7d18-0510-35b0-a2fa-b4cea13a6d76" / "map"
timestamp_ns = 315973157959879000
track_uuids = [
"f53639ef-794e-420e-bb2a-d0cde0203b3a", # Two vertices within ROI.
"6c198de2-cb7d-4c09-96aa-52547d9bbe37", # Completely outside of ROI.
"a7c8f6a2-26b6-4610-9eb3-294799f9846c", # Two vertices within ROI.
]
avm = ArgoverseStaticMap.from_map_dir(map_dir, build_raster=True)
annotations = read_feather(TEST_DATA_DIR / "adcf7d18-0510-35b0-a2fa-b4cea13a6d76" / "annotations.feather")
timestamped_city_SE3_egoposes = read_city_SE3_ego(TEST_DATA_DIR / "adcf7d18-0510-35b0-a2fa-b4cea13a6d76")
selected_cuboids_mask = np.logical_and(
annotations.timestamp_ns == timestamp_ns, annotations["track_uuid"].isin(track_uuids)
)
sweep_annotations = annotations.loc[selected_cuboids_mask]
mask = compute_objects_in_roi_mask(
sweep_annotations.loc[:, ORDERED_CUBOID_COL_NAMES].to_numpy(), timestamped_city_SE3_egoposes[timestamp_ns], avm
)
mask_: NDArrayBool = np.array([True, False, True])
np.testing.assert_array_equal(mask, mask_) # type: ignore
```
#### File: tests/geometry/test_pinhole_camera.py
```python
from pathlib import Path
from typing import Tuple
import numpy as np
import pytest
from av2.datasets.sensor.constants import RingCameras
from av2.geometry.camera.pinhole_camera import Intrinsics, PinholeCamera
from av2.geometry.se3 import SE3
from av2.utils.typing import NDArrayFloat, NDArrayInt
_TEST_DATA_ROOT = Path(__file__).resolve().parent.parent / "test_data"
def _create_pinhole_camera(
fx_px: float,
fy_px: float,
cx_px: float,
cy_px: float,
height_px: int,
width_px: int,
cam_name: str,
) -> PinholeCamera:
"""Create a pinhole camera."""
rotation: NDArrayFloat = np.eye(3)
translation: NDArrayFloat = np.zeros(3)
ego_SE3_cam = SE3(rotation=rotation, translation=translation)
intrinsics = Intrinsics(fx_px=fx_px, fy_px=fy_px, cx_px=cx_px, cy_px=cy_px, width_px=width_px, height_px=height_px)
pinhole_camera = PinholeCamera(ego_SE3_cam, intrinsics, cam_name)
return pinhole_camera
def _fit_plane_to_point_cloud(points_xyz: NDArrayFloat) -> Tuple[float, float, float, float]:
"""Use SVD with at least 3 points to fit a plane.
Args:
points_xyz: (N,3) array of points.
Returns:
(4,) Plane coefficients. Defining ax + by + cz = d for the plane.
"""
center_xyz: NDArrayFloat = np.mean(points_xyz, axis=0)
out: Tuple[NDArrayFloat, NDArrayFloat, NDArrayFloat] = np.linalg.svd(points_xyz - center_xyz) # type: ignore
vh = out[2]
# Get the unitary normal vector
a, b, c = float(vh[2, 0]), float(vh[2, 1]), float(vh[2, 2])
d: float = -np.dot([a, b, c], center_xyz) # type: ignore
return (a, b, c, d)
def test_intrinsics_constructor() -> None:
"""Ensure 3x3 intrinsics matrix is populated correctly."""
fx_px, fy_px = 1000, 1001
width_px = 2048
height_px = 1550
cx_px, cy_px = 1024, 775
intrinsics = Intrinsics(fx_px=fx_px, fy_px=fy_px, cx_px=cx_px, cy_px=cy_px, width_px=width_px, height_px=height_px)
K_expected: NDArrayFloat = np.array(([1000, 0, 1024], [0, 1001, 775], [0, 0, 1]), dtype=np.float64)
assert np.array_equal(intrinsics.K, K_expected)
def test_right_clipping_plane() -> None:
"""Test form_right_clipping_plane(). Use 4 points to fit the right clipping plane.
In the camera coordinate frame, y is down the imager, x is across the imager,
and z is along the optical axis. The focal length is the distance to the center
of the image plane. We know that a similar triangle is formed as follows:
(x,y,z)---(x,y,z)
| /
| / ->outside of frustum
| / ->outside of frustum
| (w/2)/
o-----o IMAGE PLANE
| /
fx| /
| /
| /
O PINHOLE
Normal must point into the frustum. The plane moves +fx in z-axis for
every +w/2 in x-axis, so normal will have negative inverse slope components.
"""
fx_px = 10.0
width_px = 30
pinhole_camera = _create_pinhole_camera(
fx_px=fx_px, fy_px=0, cx_px=0, cy_px=0, height_px=30, width_px=width_px, cam_name="ring_front_center"
)
right_plane = pinhole_camera.right_clipping_plane
Y_OFFSET = 10 # arbitrary extent down the imager
right: NDArrayFloat = np.array(
[
[0, 0, 0],
[width_px / 2.0, 0, fx_px],
[0, Y_OFFSET, 0],
[width_px / 2.0, Y_OFFSET, fx_px],
]
)
a, b, c, d = _fit_plane_to_point_cloud(right)
right_plane_expected: NDArrayFloat = np.array([a, b, c, d])
# enforce that plane normal points into the frustum
# x-component of normal should point in negative direction.
if right_plane_expected[0] > 0:
right_plane_expected *= -1
assert np.allclose(right_plane, right_plane_expected)
def test_left_clipping_plane() -> None:
r"""Test left_clipping_plane. Use 4 points to fit the left clipping plane.
(x,y,z)-----(x,y,z)
\\ |
outside of frustum <- \\ |
outside of frustum <- \\ |
\\ (-w/2)|
o------o IMAGE PLANE
\\ |
\\ |
\\ |fx
\\ |
\\ |
O PINHOLE
"""
fx_px = 10.0
width_px = 30
pinhole_camera = _create_pinhole_camera(
fx_px=fx_px, fy_px=0, cx_px=0, cy_px=0, height_px=30, width_px=width_px, cam_name="ring_front_center"
)
left_plane = pinhole_camera.left_clipping_plane
Y_OFFSET = 10
points_xyz: NDArrayFloat = np.array(
[
[0, 0, 0],
[-width_px / 2.0, 0, fx_px],
[0, Y_OFFSET, 0],
[-width_px / 2.0, Y_OFFSET, fx_px],
]
)
a, b, c, d = _fit_plane_to_point_cloud(points_xyz)
left_plane_expected = -np.array([a, b, c, d])
# enforce that plane normal points into the frustum
if left_plane_expected[0] < 0:
left_plane_expected *= -1
assert np.allclose(left_plane, left_plane_expected)
def test_top_clipping_plane() -> None:
r"""Test top_clipping_plane. Use 3 points to fit the TOP clipping plane.
(x,y,z) (x,y,z)
\\=================//
\\ //
(-w/h,-h/2,fx) (w/h,-h/2,fx)
o-------------o
|\\ //| IMAGE PLANE
| \\ // | IMAGE PLANE
o--\\-----//--o
\\ //
\\ //
O PINHOLE
"""
fx_px = 10.0
height_px = 45
pinhole_camera = _create_pinhole_camera(
fx_px=fx_px, fy_px=0, cx_px=0, cy_px=0, height_px=height_px, width_px=1000, cam_name="ring_front_center"
)
top_plane = pinhole_camera.top_clipping_plane
width_px = 1000.0
points_xyz: NDArrayFloat = np.array(
[
[0, 0, 0],
[-width_px / 2, -height_px / 2, fx_px],
[width_px / 2, -height_px / 2, fx_px],
]
)
a, b, c, d = _fit_plane_to_point_cloud(points_xyz)
top_plane_expected: NDArrayFloat = np.array([a, b, c, d])
# enforce that plane normal points into the frustum
if top_plane_expected[1] < 0:
# y-coord of normal should point in pos y-axis dir(down) on top-clipping plane
top_plane_expected *= -1
assert top_plane_expected[1] > 0 and top_plane_expected[2] > 0
assert np.allclose(top_plane, top_plane_expected)
def test_bottom_clipping_plane() -> None:
r"""Test bottom_clipping_plane. Use 3 points to fit the BOTTOM clipping plane.
(x,y,z) (x,y,z)
\\ //
\\ o-------------o //
\\| IMAGE PLANE |//
| |/
(-w/h,h/2,fx) o-------------o (w/h,h/2,fx)
\\ //
\\ //
\\ //
\\ //
\\ //
O PINHOLE
"""
fx_px = 12.0
height_px = 35
width_px = 10000
pinhole_camera = _create_pinhole_camera(
fx_px=fx_px, fy_px=1, cx_px=0, cy_px=0, height_px=height_px, width_px=width_px, cam_name="ring_front_center"
)
bottom_plane = pinhole_camera.bottom_clipping_plane
low_pts: NDArrayFloat = np.array(
[
[0, 0, 0],
[-width_px / 2, height_px / 2, fx_px],
[width_px / 2, height_px / 2, fx_px],
]
)
a, b, c, d = _fit_plane_to_point_cloud(low_pts)
bottom_plane_expected: NDArrayFloat = np.array([a, b, c, d])
# enforce that plane normal points into the frustum
# y-coord of normal should point in neg y-axis dir(up) on low-clipping plane
# z-coord should point in positive z-axis direction (away from camera)
if bottom_plane_expected[1] > 0:
bottom_plane_expected *= -1
assert bottom_plane_expected[1] < 0 and bottom_plane_expected[2] > 0
assert np.allclose(bottom_plane, bottom_plane_expected)
def test_form_near_clipping_plane() -> None:
"""Test near_clipping_plane(). Use 4 points to fit the near clipping plane."""
width_px = 10
height_px = 15
near_clip_dist = 30.0
pinhole_camera = _create_pinhole_camera(
fx_px=1, fy_px=0, cx_px=0, cy_px=0, height_px=30, width_px=width_px, cam_name="ring_front_center"
)
near_plane = pinhole_camera.near_clipping_plane(near_clip_dist)
points_xyz: NDArrayFloat = np.array(
[
[width_px / 2, 0, near_clip_dist],
[-width_px / 2, 0, near_clip_dist],
[width_px / 2, -height_px / 2.0, near_clip_dist],
[width_px / 2, height_px / 2.0, near_clip_dist],
]
)
a, b, c, d = _fit_plane_to_point_cloud(points_xyz)
near_plane_expected: NDArrayFloat = np.array([a, b, c, d])
assert np.allclose(near_plane, near_plane_expected)
def test_frustum_planes_ring_cam() -> None:
"""Test frustum_planes for a ring camera."""
near_clip_dist = 6.89 # arbitrary value
# Set "focal_length_x_px_"
fx_px = 1402.4993697398709
# Set "focal_length_y_px_"
fy_px = 1405.1207294310225
# Set "focal_center_x_px_"
cx_px = 957.8471720086527
# Set "focal_center_y_px_"
cy_px = 600.442948946496
camera_name = "ring_front_right"
height_px = 1550
width_px = 2048
pinhole_camera = _create_pinhole_camera(
fx_px=fx_px, fy_px=fy_px, cx_px=cx_px, cy_px=cy_px, height_px=height_px, width_px=width_px, cam_name=camera_name
)
left_plane, right_plane, near_plane, bottom_plane, top_plane = pinhole_camera.frustum_planes(near_clip_dist)
left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])
right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])
near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])
bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])
top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])
assert np.allclose(left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)) # type: ignore
assert np.allclose(right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)) # type: ignore
assert np.allclose(bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)) # type: ignore
assert np.allclose(top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)) # type: ignore
assert np.allclose(near_plane, near_plane_expected)
def test_generate_frustum_planes_stereo() -> None:
"""Test generate_frustum_planes() for a stereo camera."""
near_clip_dist = 3.56 # arbitrary value
# Set "focal_length_x_px_"
fx_px = 3666.534329132812
# Set "focal_length_y_px_"
fy_px = 3673.5030423482513
# Set "focal_center_x_px_"
cx_px = 1235.0158218941356
# Set "focal_center_y_px_"
cy_px = 1008.4536901420888
camera_name = "stereo_front_left"
height_px = 1550
width_px = 2048
pinhole_camera = _create_pinhole_camera(
fx_px=fx_px, fy_px=fy_px, cx_px=cx_px, cy_px=cy_px, height_px=height_px, width_px=width_px, cam_name=camera_name
)
left_plane, right_plane, near_plane, bottom_plane, top_plane = pinhole_camera.frustum_planes(near_clip_dist)
left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])
right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])
near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])
bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])
top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])
assert np.allclose(left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)) # type: ignore
assert np.allclose(right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)) # type: ignore
assert np.allclose(bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)) # type: ignore
assert np.allclose(top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)) # type: ignore
assert np.allclose(near_plane, near_plane_expected)
def test_compute_pixel_ray_directions_vectorized_invalid_focal_lengths() -> None:
"""If focal lengths in the x and y directions do not match, we throw an exception.
Tests vectorized variant (multiple ray directions.)
"""
uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])
fx = 10
fy = 11
img_w = 20
img_h = 10
pinhole_camera = _create_pinhole_camera(
fx_px=fx,
fy_px=fy,
cx_px=img_w / 2,
cy_px=img_h / 2,
height_px=img_h,
width_px=img_w,
cam_name="ring_front_center", # dummy name
)
with pytest.raises(ValueError):
pinhole_camera.compute_pixel_ray_directions(uv)
def test_compute_pixel_ray_direction_invalid_focal_lengths() -> None:
"""If focal lengths in the x and y directions do not match, we throw an exception.
Tests non-vectorized variant (single ray direction).
"""
u = 12
v = 2
fx = 10
fy = 11
img_w = 20
img_h = 10
with pytest.raises(ValueError):
_compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)
def test_compute_pixel_ray_directions_vectorized() -> None:
"""Ensure that the ray direction (in camera coordinate frame) for each pixel is computed correctly.
Small scale test, for just four selected positions in a 10 x 20 px image in (height, width).
"""
fx = 10
fy = 10
# dummy 2d coordinates in the image plane.
uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])
# principal point is at (10,5)
img_w = 20
img_h = 10
pinhole_camera = _create_pinhole_camera(
fx_px=fx,
fy_px=fy,
cx_px=img_w / 2,
cy_px=img_h / 2,
height_px=img_h,
width_px=img_w,
cam_name="ring_front_center", # dummy name
)
ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)
gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])
gt_ray_dir /= np.linalg.norm(gt_ray_dir) # type: ignore
for i in range(4):
assert np.allclose(gt_ray_dir, ray_dirs[i])
def test_compute_pixel_ray_directions_vectorized_entireimage() -> None:
"""Ensure that the ray direction for each pixel (in camera coordinate frame) is computed correctly.
Compare all computed rays against non-vectorized variant, for correctness.
Larger scale test, for every pixel in a 50 x 100 px image in (height, width).
"""
fx = 10
fy = 10
img_w = 100
img_h = 50
pinhole_camera = _create_pinhole_camera(
fx_px=fx,
fy_px=fy,
cx_px=img_w / 2,
cy_px=img_h / 2,
height_px=img_h,
width_px=img_w,
cam_name="ring_front_center", # dummy name
)
uv_list = []
for u in range(img_w):
for v in range(img_h):
uv_list += [(u, v)]
uv: NDArrayInt = np.array(uv_list)
assert uv.shape == (img_w * img_h, 2)
ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)
# compare w/ vectorized, should be identical
for i, ray_dir_vec in enumerate(ray_dirs):
u, v = uv[i]
ray_dir_nonvec = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)
assert np.allclose(ray_dir_vec, ray_dir_nonvec)
def test_compute_pixel_rays() -> None:
"""Ensure that the ray direction (in camera coordinate frame) for a single pixel is computed correctly.
Small scale test, for just one selected position in a 10 x 20 px image in (height, width).
For row = 2, column = 12.
"""
u = 12
v = 2
img_w = 20
img_h = 10
fx = 10
fy = 10
ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)
gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])
gt_ray_dir /= np.linalg.norm(gt_ray_dir) # type: ignore
assert np.allclose(gt_ray_dir, ray_dir)
def _compute_pixel_ray_direction(u: float, v: float, fx: float, fy: float, img_w: int, img_h: int) -> NDArrayFloat:
r"""Generate rays in the camera coordinate frame.
Note: only used as a test utility.
Find point P on image plane.
(x,y,z)-----(x,y,z)
\\ |
outside of frustum <- \\ |
outside of frustum <- \\ |
\\ (-w/2)|
o------o IMAGE PLANE
\\ |
\\ |
\\ |fx
\\ |
\\ |
O PINHOLE
Args:
u: pixel's x-coordinate
v: pixel's y-coordinate
fx: focal length in x-direction, measured in pixels.
fy: focal length in y-direction, measured in pixels.
img_w: image width (in pixels)
img_h: image height (in pixels)
Returns:
Direction of 3d ray, provided in the camera frame.
Raises:
ValueError: If horizontal and vertical focal lengths are not close (within 1e-3).
"""
if not np.isclose(fx, fy, atol=1e-3):
raise ValueError(f"Focal lengths in the x and y directions must match: {fx} != {fy}")
# approximation for principal point
px = img_w / 2
py = img_h / 2
# the camera coordinate frame (where Z is out, x is right, y is down).
# compute offset from the center
x_center_offs = u - px
y_center_offs = v - py
ray_dir: NDArrayFloat = np.array([x_center_offs, y_center_offs, fx])
ray_dir /= np.linalg.norm(ray_dir) # type: ignore
return ray_dir
def test_get_frustum_parameters() -> None:
r"""Ensure we can compute field of view, and camera's yaw in the egovehicle frame.
w/2 = 1000
o----------o IMAGE PLANE
\\ | //
\\ | //
\\ |fx = 1000
\\ | //
\\ | //
O PINHOLE
"""
fx, fy = 1000, 1000
img_w = 2000
img_h = 1000
pinhole_camera = _create_pinhole_camera(
fx_px=fx,
fy_px=fy,
cx_px=img_w / 2,
cy_px=img_h / 2,
height_px=img_h,
width_px=img_w,
cam_name="ring_front_center", # dummy name
)
fov_theta_deg = np.rad2deg(pinhole_camera.fov_theta_rad)
assert np.isclose(fov_theta_deg, 90.0)
# for identity SE(3), the yaw angle is zero radians.
cam_yaw_ego = pinhole_camera.egovehicle_yaw_cam_rad
assert np.isclose(cam_yaw_ego, 0)
def test_get_egovehicle_yaw_cam() -> None:
"""Ensure we can compute the camera's yaw in the egovehicle frame."""
sample_log_dir = _TEST_DATA_ROOT / "sensor_dataset_logs" / "test_log"
# clockwise around the top of the car, in degrees.
expected_ego_yaw_cam_deg_dict = {
"ring_rear_left": 153.2,
"ring_side_left": 99.4,
"ring_front_left": 44.7,
"ring_front_center": 0.4,
"ring_front_right": -44.9,
"ring_side_right": -98.9,
"ring_rear_right": -152.9,
}
for cam_enum in list(RingCameras):
cam_name = cam_enum.value
pinhole_camera = PinholeCamera.from_feather(log_dir=sample_log_dir, cam_name=cam_name)
ego_yaw_cam_deg = np.rad2deg(pinhole_camera.egovehicle_yaw_cam_rad)
assert np.isclose(ego_yaw_cam_deg, expected_ego_yaw_cam_deg_dict[cam_name], atol=0.1)
np.rad2deg(pinhole_camera.fov_theta_rad)
```
#### File: tests/map/test_map_api.py
```python
from pathlib import Path
import numpy as np
import pytest
from av2.map.drivable_area import DrivableArea
from av2.map.lane_segment import LaneSegment
from av2.map.map_api import ArgoverseStaticMap
from av2.map.map_primitives import Point, Polyline
from av2.map.pedestrian_crossing import PedestrianCrossing
from av2.utils.typing import NDArrayBool, NDArrayFloat
@pytest.fixture()
def dummy_static_map(test_data_root_dir: Path) -> ArgoverseStaticMap:
"""Set up test by instantiating static map object from dummy test data.
Args:
test_data_root_dir: Path to the root dir for test data (provided via fixture).
Returns:
Static map instantiated from dummy test data.
"""
log_map_dirpath = (
test_data_root_dir / "static_maps" / "dummy_log_map_gs1B8ZCv7DMi8cMt5aN5rSYjQidJXvGP__2020-07-21-Z1F0076"
)
return ArgoverseStaticMap.from_map_dir(log_map_dirpath, build_raster=True)
@pytest.fixture(scope="module")
def full_static_map(test_data_root_dir: Path) -> ArgoverseStaticMap:
"""Set up test by instantiating static map object from full test data.
Args:
test_data_root_dir: Path to the root dir for test data (provided via fixture).
Returns:
Static map instantiated from full test data.
"""
log_map_dirpath = (
test_data_root_dir / "static_maps" / "full_log_map_gs1B8ZCv7DMi8cMt5aN5rSYjQidJXvGP__2020-07-21-Z1F0076"
)
return ArgoverseStaticMap.from_map_dir(log_map_dirpath, build_raster=True)
class TestPolyline:
"""Class for unit testing `PolyLine`."""
def test_from_list(self) -> None:
"""Ensure object is generated correctly from a list of dictionaries."""
points_dict_list = [{"x": 874.01, "y": -105.15, "z": -19.58}, {"x": 890.58, "y": -104.26, "z": -19.58}]
polyline = Polyline.from_json_data(points_dict_list)
assert isinstance(polyline, Polyline)
assert len(polyline.waypoints) == 2
assert polyline.waypoints[0] == Point(874.01, -105.15, -19.58)
assert polyline.waypoints[1] == Point(890.58, -104.26, -19.58)
def test_from_array(self) -> None:
"""Ensure object is generated correctly from a Numpy array of shape (N,3)."""
# fmt: off
array: NDArrayFloat = np.array([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.],
[9., 10., 11.]
])
# fmt: on
polyline = Polyline.from_array(array)
assert isinstance(polyline, Polyline)
assert len(polyline) == 4
assert polyline.waypoints[0].x == 1
assert polyline.waypoints[0].y == 2
assert polyline.waypoints[0].z == 3
class TestPedestrianCrossing:
"""Class for unit testing `PedestrianCrossing`."""
def test_from_dict(self) -> None:
"""Ensure object is generated correctly from a dictionary."""
json_data = {
"id": 6310421,
"edge1": [{"x": 899.17, "y": -91.52, "z": -19.58}, {"x": 915.68, "y": -93.93, "z": -19.53}],
"edge2": [{"x": 899.44, "y": -95.37, "z": -19.48}, {"x": 918.25, "y": -98.05, "z": -19.4}],
}
pedestrian_crossing = PedestrianCrossing.from_dict(json_data)
isinstance(pedestrian_crossing, PedestrianCrossing)
class TestArgoverseStaticMap:
"""Unit test for the Argoverse 2.0 per-log map."""
def test_get_lane_segment_successor_ids(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure lane segment successors are fetched properly."""
lane_segment_id = 93269421
successor_ids = dummy_static_map.get_lane_segment_successor_ids(lane_segment_id)
expected_successor_ids = [93269500]
assert successor_ids == expected_successor_ids
lane_segment_id = 93269500
successor_ids = dummy_static_map.get_lane_segment_successor_ids(lane_segment_id)
expected_successor_ids = [93269554]
assert successor_ids == expected_successor_ids
lane_segment_id = 93269520
successor_ids = dummy_static_map.get_lane_segment_successor_ids(lane_segment_id)
expected_successor_ids = [93269526]
assert successor_ids == expected_successor_ids
def test_lane_is_in_intersection(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure the attribute describing if a lane segment is located with an intersection is fetched properly."""
lane_segment_id = 93269421
in_intersection = dummy_static_map.lane_is_in_intersection(lane_segment_id)
assert isinstance(in_intersection, bool)
assert not in_intersection
lane_segment_id = 93269500
in_intersection = dummy_static_map.lane_is_in_intersection(lane_segment_id)
assert isinstance(in_intersection, bool)
assert in_intersection
lane_segment_id = 93269520
in_intersection = dummy_static_map.lane_is_in_intersection(lane_segment_id)
assert isinstance(in_intersection, bool)
assert not in_intersection
def test_get_lane_segment_left_neighbor_id(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Test getting a lane segment id from the left neighbor."""
# Ensure id of lane segment (if any) that is the left neighbor to the query lane segment can be fetched properly
lane_segment_id = 93269421
l_neighbor_id = dummy_static_map.get_lane_segment_left_neighbor_id(lane_segment_id)
assert l_neighbor_id is None
lane_segment_id = 93269500
l_neighbor_id = dummy_static_map.get_lane_segment_left_neighbor_id(lane_segment_id)
assert l_neighbor_id is None
lane_segment_id = 93269520
l_neighbor_id = dummy_static_map.get_lane_segment_left_neighbor_id(lane_segment_id)
assert l_neighbor_id == 93269421
def test_get_lane_segment_right_neighbor_id(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Test getting a lane segment id from the right neighbor."""
# Ensure id of lane segment (if any) that is the right neighbor to the query lane segment can be fetched
lane_segment_id = 93269421
r_neighbor_id = dummy_static_map.get_lane_segment_right_neighbor_id(lane_segment_id)
assert r_neighbor_id == 93269520
lane_segment_id = 93269500
r_neighbor_id = dummy_static_map.get_lane_segment_right_neighbor_id(lane_segment_id)
assert r_neighbor_id == 93269526
lane_segment_id = 93269520
r_neighbor_id = dummy_static_map.get_lane_segment_right_neighbor_id(lane_segment_id)
assert r_neighbor_id == 93269458
def test_get_scenario_lane_segment_ids(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure ids of all lane segments in the local map can be fetched properly."""
lane_segment_ids = dummy_static_map.get_scenario_lane_segment_ids()
expected_lane_segment_ids = [93269421, 93269500, 93269520]
assert lane_segment_ids == expected_lane_segment_ids
def test_get_lane_segment_polygon(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure lane segment polygons are fetched properly."""
lane_segment_id = 93269421
ls_polygon = dummy_static_map.get_lane_segment_polygon(lane_segment_id)
assert isinstance(ls_polygon, np.ndarray)
expected_ls_polygon: NDArrayFloat = np.array(
[
[874.01, -105.15, -19.58],
[890.58, -104.26, -19.58],
[890.29, -100.56, -19.66],
[880.31, -101.44, -19.7],
[873.97, -101.75, -19.7],
[874.01, -105.15, -19.58],
]
)
np.testing.assert_allclose(ls_polygon, expected_ls_polygon) # type: ignore
def test_get_lane_segment_centerline(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure lane segment centerlines can be inferred and fetched properly."""
lane_segment_id = 93269421
centerline = dummy_static_map.get_lane_segment_centerline(lane_segment_id)
assert isinstance(centerline, np.ndarray)
expected_centerline: NDArrayFloat = np.array(
[
[873.99, -103.45, -19.64],
[875.81871374, -103.35615034, -19.64],
[877.64742747, -103.26230069, -19.64],
[879.47614121, -103.16845103, -19.64],
[881.30361375, -103.0565384, -19.63815074],
[883.129891, -102.92723072, -19.63452059],
[884.95616825, -102.79792304, -19.63089044],
[886.7824455, -102.66861536, -19.62726029],
[888.60872275, -102.53930768, -19.62363015],
[890.435, -102.41, -19.62],
]
)
np.testing.assert_allclose(centerline, expected_centerline) # type: ignore
def test_get_scenario_lane_segments(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure that all LaneSegment objects in the local map can be returned as a list."""
vector_lane_segments = dummy_static_map.get_scenario_lane_segments()
assert isinstance(vector_lane_segments, list)
assert all([isinstance(vls, LaneSegment) for vls in vector_lane_segments])
assert len(vector_lane_segments) == 3
def test_get_scenario_ped_crossings(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure that all PedCrossing objects in the local map can be returned as a list."""
ped_crossings = dummy_static_map.get_scenario_ped_crossings()
assert isinstance(ped_crossings, list)
assert all([isinstance(pc, PedestrianCrossing) for pc in ped_crossings])
# fmt: off
expected_ped_crossings = [
PedestrianCrossing(
id=6310407,
edge1=Polyline.from_array(np.array(
[
[ 892.17, -99.44, -19.59],
[ 893.47, -115.4 , -19.45]
]
)), edge2=Polyline.from_array(np.array(
[
[ 896.06, -98.95, -19.52],
[ 897.43, -116.58, -19.42]
]
))
), PedestrianCrossing(
id=6310421,
edge1=Polyline.from_array(np.array(
[
[899.17, -91.52, -19.58],
[915.68, -93.93, -19.53]
]
)),
edge2=Polyline.from_array(np.array(
[
[899.44, -95.37, -19.48],
[918.25, -98.05, -19.4]
]
)),
)
]
# fmt: on
assert len(ped_crossings) == len(expected_ped_crossings)
assert all([pc == expected_pc for pc, expected_pc in zip(ped_crossings, expected_ped_crossings)])
def test_get_scenario_vector_drivable_areas(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure that drivable areas are loaded and formatted correctly."""
vector_das = dummy_static_map.get_scenario_vector_drivable_areas()
assert isinstance(vector_das, list)
assert len(vector_das) == 1
assert isinstance(vector_das[0], DrivableArea)
# examine just one sample
vector_da = vector_das[0]
assert vector_da.xyz.shape == (172, 3)
# compare first and last vertex, for equality
np.testing.assert_allclose(vector_da.xyz[0], vector_da.xyz[171]) # type: ignore
# fmt: off
# compare first 4 vertices
expected_first4_vertices: NDArrayFloat = np.array(
[[905.09, -148.95, -19.19],
[904.85, -141.95, -19.25],
[904.64, -137.25, -19.28],
[904.37, -132.55, -19.32]])
# fmt: on
np.testing.assert_allclose(vector_da.xyz[:4], expected_first4_vertices) # type: ignore
def test_get_ground_height_at_xy(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure that ground height at (x,y) locations can be retrieved properly."""
point_cloud: NDArrayFloat = np.array(
[
[770.6398, -105.8351, -19.4105], # ego-vehicle pose at one timestamp
[943.5386, -49.6295, -19.3291], # ego-vehicle pose at one timestamp
[918.0960, 82.5588, -20.5742], # ego-vehicle pose at one timestamp
[9999999, 999999, 0], # obviously out of bounds value for city coordinate system
[-999999, -999999, 0], # obviously out of bounds value for city coordinate system
]
)
assert dummy_static_map.raster_ground_height_layer is not None
ground_height_z = dummy_static_map.raster_ground_height_layer.get_ground_height_at_xy(point_cloud)
assert ground_height_z.shape[0] == point_cloud.shape[0]
assert ground_height_z.dtype == np.dtype(np.float64)
# last 2 indices should be filled with dummy values (NaN) because obviously out of bounds.
assert np.all(np.isnan(ground_height_z[-2:]))
# based on grid resolution, ground should be within 7 centimeters of 30cm under back axle.
expected_ground = point_cloud[:3, 2] - 0.30
assert np.allclose(np.absolute(expected_ground - ground_height_z[:3]), 0, atol=0.07)
def test_get_ground_points_boolean(self, dummy_static_map: ArgoverseStaticMap) -> None:
"""Ensure that points close to the ground surface are correctly classified as `ground` category."""
point_cloud: NDArrayFloat = np.array(
[
[770.6398, -105.8351, -19.4105], # ego-vehicle pose at one timestamp
[943.5386, -49.6295, -19.3291], # ego-vehicle pose at one timestamp
[918.0960, 82.5588, -20.5742], # ego-vehicle pose at one timestamp
[9999999, 999999, 0], # obviously out of bounds value for city coordinate system
[-999999, -999999, 0], # obviously out of bounds value for city coordinate system
]
)
# first 3 points correspond to city_SE3_ego, i.e. height of rear axle in city frame
# ~30 cm below the axle should be the ground surface.
point_cloud -= 0.30
assert dummy_static_map.raster_ground_height_layer is not None
is_ground_pt = dummy_static_map.raster_ground_height_layer.get_ground_points_boolean(point_cloud)
expected_is_ground_pt: NDArrayBool = np.array([True, True, True, False, False])
assert is_ground_pt.dtype == np.dtype(bool)
assert np.array_equal(is_ground_pt, expected_is_ground_pt)
def test_load_motion_forecasting_map(test_data_root_dir: Path) -> None:
"""Try to load a real map from the motion forecasting dataset."""
mf_scenario_id = "0a1e6f0a-1817-4a98-b02e-db8c9327d151"
mf_scenario_map_path = (
test_data_root_dir / "forecasting_scenarios" / mf_scenario_id / f"log_map_archive_{mf_scenario_id}.json"
)
mf_map = ArgoverseStaticMap.from_json(mf_scenario_map_path)
assert mf_map.log_id == mf_scenario_id
```
#### File: tests/rendering/test_color.py
```python
import numpy as np
import av2.rendering.color as color_utils
from av2.rendering.color import GREEN_HEX, RED_HEX
def test_create_colormap() -> None:
"""Ensure we can create a red-to-green RGB colormap with values in [0,1]."""
colors_arr_rgb = color_utils.create_colormap(color_list=[RED_HEX, GREEN_HEX], n_colors=10)
assert np.logical_and(0 <= colors_arr_rgb, colors_arr_rgb <= 1).all()
assert colors_arr_rgb.shape == (10, 3)
```
#### File: tests/rendering/test_video.py
```python
from pathlib import Path
from tempfile import NamedTemporaryFile
import numpy as np
import av2.rendering.video as video_utils
from av2.utils.typing import NDArrayByte, NDArrayFloat
def generate_dummy_rgb_video(N: int, H: int, W: int) -> NDArrayByte:
"""Generate dummy video data (increasing brightness from top-left to bottom-right, and as video progresses).
Args:
N: number of video frames to generate.
H: frame height, in pixels.
W: frame width, in pixels.
Returns:
tensor of shape (N,H,W,3)
Raises:
ValueError: if more than 55 frames are requested (to keep values in [0,200 + 55]).
"""
if N > 55:
raise ValueError("Will overflow")
video: NDArrayByte = np.zeros((N, H, W, 3), dtype=np.uint8)
for frame_idx in np.arange(N):
frame_f: NDArrayFloat = np.arange(H * W).reshape(H, W).astype(np.float32)
frame_f /= frame_f.max()
frame_f *= 200.0
frame_f += frame_idx
frame: NDArrayByte = frame_f.astype(np.uint8)
for c in range(3):
video[frame_idx, :, :, c] = frame
return video
def test_write_video_even_dims() -> None:
"""Ensure we can encode a video tensor (with even H/W dimensions) as a mp4 file with AV, and save it to disk.
Dummy data is 30 frames of (60,60) RGB video.
"""
video: NDArrayByte = generate_dummy_rgb_video(N=30, H=60, W=60)
save_fpath = Path(NamedTemporaryFile(suffix=".mp4").name)
assert not save_fpath.exists()
video_utils.write_video(
video=video,
dst=save_fpath,
)
assert save_fpath.exists()
def test_write_video_odd_dims() -> None:
"""Ensure we can encode a video tensor (with odd H/W dimensions) as a mp4 file with AV, and save it to disk.
Dummy data is 30 frames of (65,65) RGB video.
"""
video: NDArrayByte = generate_dummy_rgb_video(N=30, H=65, W=65)
save_fpath = Path(NamedTemporaryFile(suffix=".mp4").name)
assert not save_fpath.exists()
video_utils.write_video(
video=video,
dst=save_fpath,
)
assert save_fpath.exists()
def test_crop_video_to_even_dims() -> None:
"""Ensure we can crop a video tensor along the height and width dimensions to assure even dimensions.
Dummy data is 55 frames of (501,501) RGB video.
"""
video: NDArrayByte = generate_dummy_rgb_video(N=55, H=501, W=501)
cropped_video = video_utils.crop_video_to_even_dims(video)
assert cropped_video.shape == (55, 500, 500, 3)
assert cropped_video.dtype == np.dtype(np.uint8)
save_fpath = Path(NamedTemporaryFile(suffix=".mp4").name)
assert not save_fpath.exists()
video_utils.write_video(video=cropped_video, dst=save_fpath, fps=10, preset="medium")
assert save_fpath.exists()
```
#### File: tests/structures/test_sweep.py
```python
from pathlib import Path
import numpy as np
import pytest
from av2.structures.sweep import Sweep
from av2.utils.typing import NDArrayByte, NDArrayFloat, NDArrayInt
@pytest.fixture
def dummy_sweep(test_data_root_dir: Path) -> Sweep:
"""Get a fake sweep containing two points."""
path = test_data_root_dir / "sensor_dataset_logs" / "dummy" / "sensors" / "lidar" / "315968663259918000.feather"
return Sweep.from_feather(path)
def test_sweep_from_feather(dummy_sweep: Sweep) -> None:
"""Test loading a sweep from a feather file."""
xyz_expected: NDArrayFloat = np.array([[-22.1875, 20.484375, 0.55029296875], [-20.609375, 19.1875, 1.30078125]])
intensity_expected: NDArrayByte = np.array([38, 5], dtype=np.uint8)
laser_number_expected: NDArrayByte = np.array([19, 3], dtype=np.uint8)
offset_ns_expected: NDArrayInt = np.array([253440, 283392], dtype=np.int32)
timestamp_ns_expected: int = 315968663259918000
assert np.array_equal(dummy_sweep.xyz, xyz_expected)
assert np.array_equal(dummy_sweep.intensity, intensity_expected)
assert np.array_equal(dummy_sweep.laser_number, laser_number_expected)
assert np.array_equal(dummy_sweep.offset_ns, offset_ns_expected)
assert dummy_sweep.timestamp_ns == timestamp_ns_expected
```
#### File: tests/utils/test_mesh_grid.py
```python
from typing import List
import numpy as np
import av2.geometry.mesh_grid as mesh_grid_utils
from av2.utils.typing import NDArrayFloat
def test_get_mesh_grid_as_point_cloud_3x3square() -> None:
"""Ensure a sampled regular grid returns 9 grid points from 1 meter resolution on 2x2 meter area."""
min_x = -3 # integer, minimum x-coordinate of 2D grid
max_x = -1 # integer, maximum x-coordinate of 2D grid
min_y = 2 # integer, minimum y-coordinate of 2D grid
max_y = 4 # integer, maximum y-coordinate of 2D grid
# return pts, a Numpy array of shape (N,2)
pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(min_x, max_x, min_y, max_y, downsample_factor=1.0)
assert pts.shape == (9, 2)
gt_pts: NDArrayFloat = np.array(
[
[-3.0, 2.0],
[-2.0, 2.0],
[-1.0, 2.0],
[-3.0, 3.0],
[-2.0, 3.0],
[-1.0, 3.0],
[-3.0, 4.0],
[-2.0, 4.0],
[-1.0, 4.0],
]
)
assert np.allclose(gt_pts, pts)
def test_get_mesh_grid_as_point_cloud_3x2rect() -> None:
"""Ensure a sampled regular grid returns 6 grid points from 1 meter resolution on 1x2 meter area."""
min_x = -3 # integer, minimum x-coordinate of 2D grid
max_x = -1 # integer, maximum x-coordinate of 2D grid
min_y = 2 # integer, minimum y-coordinate of 2D grid
max_y = 3 # integer, maximum y-coordinate of 2D grid
# return pts, a Numpy array of shape (N,2)
pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(min_x, max_x, min_y, max_y, downsample_factor=1.0)
assert pts.shape == (6, 2)
# fmt: off
gt_pts: NDArrayFloat = np.array(
[
[-3.0, 2.0],
[-2.0, 2.0],
[-1.0, 2.0],
[-3.0, 3.0],
[-2.0, 3.0],
[-1.0, 3.0]
])
# fmt: on
assert np.allclose(gt_pts, pts)
def test_get_mesh_grid_as_point_cloud_single_pt() -> None:
"""Ensure a sampled regular grid returns only 1 point for a range of 0 meters in x and 0 meters in y."""
min_x = -3 # integer, minimum x-coordinate of 2D grid
max_x = -3 # integer, maximum x-coordinate of 2D grid
min_y = 2 # integer, minimum y-coordinate of 2D grid
max_y = 2 # integer, maximum y-coordinate of 2D grid
# return pts, a Numpy array of shape (N,2)
pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(min_x, max_x, min_y, max_y, downsample_factor=1.0)
assert pts.shape == (1, 2)
gt_pts: NDArrayFloat = np.array([[-3.0, 2.0]])
assert np.allclose(gt_pts, pts)
def test_get_mesh_grid_as_point_cloud_downsample() -> None:
"""Given 3x3 area, ensure a sampled regular grid returns coordinates at 4 corners only."""
min_x = -3 # integer, minimum x-coordinate of 2D grid
max_x = 0 # integer, maximum x-coordinate of 2D grid
min_y = 2 # integer, minimum y-coordinate of 2D grid
max_y = 5 # integer, maximum y-coordinate of 2D grid
# return pts, a Numpy array of shape (N,2)
pts = mesh_grid_utils.get_mesh_grid_as_point_cloud(min_x, max_x, min_y, max_y, downsample_factor=3.0)
assert pts.shape == (4, 2)
# fmt: off
gt_pts: List[List[float]] = [
[-3.0, 2.0],
[0.0, 2.0],
[-3.0, 5.0],
[0.0, 5.0]
]
# fmt: on
assert np.allclose(gt_pts, pts)
``` |
{
"source": "jhonykaesemodel/image2mesh",
"score": 2
} |
#### File: python/voxel/get_all_iou.py
```python
import os
import numpy as np
import scipy.io
import binvox_rw
path = dict()
path['model_gt'] = 'Z:\datasets\FreeFormDeformation'
path['model_hat'] = 'Z:\data\img2mesh'
class2uid = {
'bottle' : '02876657',
'bicycle' : '02834778',
'knife' : '03624134',
'chair' : '03001627',
'car' : '02958343',
'diningtable' : '04379243',
'sofa' : '04256520',
'bed' : '02818832',
'dresser' : '02933112',
'aeroplane' : '02691156',
'motorbike' : '03790512',
'bus' : '02924116',
}
def compute_iou(cls):
modeldir_gt = os.path.join(path['model_gt'], class2uid[cls], 'rendered')
modeldir_hat = os.path.join(path['model_hat'], class2uid[cls], 'obj_models')
setname = 'estimated_objs'
setfile = os.path.join(modeldir_hat, setname+'.list')
iou = []
print('Computing IoU...')
with open(setfile, 'r') as fp:
for line in fp:
muid = line[:-1]
muid_hat = muid.split('.')[0]
muid_gt = 'render'+muid_hat[5:]
bvfile_gt = os.path.join(modeldir_gt, muid_gt+'.binvox')
bvfile_hat = os.path.join(modeldir_hat, muid_hat+'.binvox')
with open(bvfile_gt, 'rb') as bvf:
vxl_gt = binvox_rw.read_as_3d_array(bvf)
with open(bvfile_hat, 'rb') as bvf:
vxl_hat = binvox_rw.read_as_3d_array(bvf)
# The prediction and gt are 3 dim voxels. Each voxel has values 1 or 0
intersection = np.sum(np.logical_and(vxl_hat.data, vxl_gt.data))
union = np.sum(np.logical_or(vxl_hat.data, vxl_gt.data))
IoU = intersection / union
print(IoU)
iou.append(IoU)
filename = os.path.join(modeldir_hat, 'all_iou_test.mat')
scipy.io.savemat(filename, {'all_iou_test': iou})
print(np.mean(iou))
print('Finally done!')
``` |
{
"source": "jhoobergs/numbas-lti-provider",
"score": 2
} |
#### File: numbas-lti-provider/numbas_lti/groups.py
```python
def group_for_user(user):
return 'user-{}'.format(user.id)
def group_for_attempt(attempt):
return 'attempt-{}'.format(attempt.id)
def group_for_resource(resource):
return 'resource-{}'.format(resource.id)
def group_for_resource_stats(resource):
return 'resource-{}-stats'.format(resource.id)
```
#### File: management/commands/test_exam.py
```python
from django.core.management.base import BaseCommand
import os
import json
import subprocess
import datetime
from django.utils.timezone import now
from numbas_lti.models import Exam, Resource
from numbas_lti.test_exam import test_exam, ExamTestException
class Command(BaseCommand):
help = 'Test an exam package'
def add_arguments(self, parser):
parser.add_argument('exam_pk',nargs='?',type=int)
parser.add_argument('--resource',type=int,dest='resource', help='The ID of a resource whose current exam package should be tested')
def handle(self, *args, **options):
if options.get('resource'):
exam = Resource.objects.get(pk=options['resource']).exam
elif options.get('exam_pk'):
exam_pk = options['exam_pk']
exam = Exam.objects.get(pk=exam_pk)
else:
raise Exception("You must give either an exam ID or a resource ID.")
print("Testing exam {}, \"{}\".".format(exam.pk,exam.title))
try:
result = test_exam(exam)
print("This exam passed all the tests.")
except ExamTestException as e:
print(e)
```
#### File: numbas_lti/migrations/0061_exam_static_uuid.py
```python
from django.conf import settings
from django.db import migrations, models
import uuid
import os
import shutil
def move_to_uuid_folder(apps, schema_editor):
Exam = apps.get_model('numbas_lti', 'Exam')
for exam in Exam.objects.all():
exam.static_uuid = uuid.uuid4()
exam.save()
old_path = os.path.join(os.getcwd(), settings.MEDIA_ROOT,'extracted_zips',exam.__class__.__name__,str(exam.pk))
new_path = os.path.join(os.getcwd(), settings.MEDIA_ROOT,'extracted_zips',exam.__class__.__name__,str(exam.static_uuid))
if os.path.exists(old_path):
shutil.move(old_path, new_path)
def undo_move_to_uuid_folder(apps, schema_editor):
Exam = apps.get_model('numbas_lti', 'Exam')
for exam in Exam.objects.all():
old_path = os.path.join(os.getcwd(), settings.MEDIA_ROOT,'extracted_zips',exam.__class__.__name__,str(exam.pk))
new_path = os.path.join(os.getcwd(), settings.MEDIA_ROOT,'extracted_zips',exam.__class__.__name__,str(exam.static_uuid))
if os.path.exists(new_path):
shutil.move(new_path, old_path)
class Migration(migrations.Migration):
dependencies = [
('numbas_lti', '0060_auto_20201215_1345'),
]
operations = [
migrations.AddField(
model_name='exam',
name='static_uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, verbose_name='UUID of exam package on disk'),
),
migrations.RunPython(move_to_uuid_folder, undo_move_to_uuid_folder),
migrations.AlterField(
model_name='exam',
name='static_uuid',
field=models.UUIDField(default=uuid.uuid4, editable=False, unique=True, verbose_name='UUID of exam package on disk'),
)
]
```
#### File: numbas_lti/migrations/0063_auto_20210211_1307.py
```python
from django.db import migrations, models
import django.db.models.deletion
def set_exam_resources(apps, schema_editor):
Resource = apps.get_model('numbas_lti', 'Resource')
Attempt = apps.get_model('numbas_lti', 'Attempt')
for r in Resource.objects.exclude(exam=None):
r.exam.resource = r
r.exam.save()
for a in Attempt.objects.exclude(exam=None):
if a.exam.resource is None:
a.exam.resource = a.resource
a.exam.save()
class Migration(migrations.Migration):
dependencies = [
('numbas_lti', '0062_scormelementdiff'),
]
operations = [
migrations.AddField(
model_name='exam',
name='resource',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams', to='numbas_lti.Resource'),
),
migrations.AlterField(
model_name='resource',
name='exam',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='main_exam_of', to='numbas_lti.Exam'),
),
migrations.RunPython(set_exam_resources,migrations.RunPython.noop),
]
``` |
{
"source": "J-Hooch/MIT6.0001",
"score": 4
} |
#### File: assignments/ps2/hangman.py
```python
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print("Loading word list from file...")
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r')
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = line.split()
print(" ", len(wordlist), "words loaded.")
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# Load the list of words into the variable wordlist
# so that it can be accessed from anywhere in the program
wordlist = load_words()
def is_word_guessed(secret_word, letters_guessed):
'''
secret_word: string, the word the user is guessing; assumes all letters are
lowercase
letters_guessed: list (of letters), which letters have been guessed so far;
assumes that all letters are lowercase
returns: boolean, True if all the letters of secret_word are in letters_guessed;
False otherwise
'''
#turn secret word into list
secret_word_list=list(secret_word)
letters_guessed_check = []
#make a list of only correctly guesed letters
for i in range(len(letters_guessed)):
for x in range(len(secret_word_list)):
if letters_guessed[i] == secret_word_list[x]:
letters_guessed_check.append(letters_guessed[i])
#compare correct guessed letters to secret word as sets
return set(letters_guessed_check) == set(secret_word_list)
def get_guessed_word(secret_word, letters_guessed):
'''
secret_word: string, the word the user is guessing
letters_guessed: list (of letters), which letters have been guessed so far
returns: string, comprised of letters, underscores (_), and spaces that represents
which letters in secret_word have been guessed so far.
'''
#turn secret word into list
secret_word_list = list(secret_word)
revealed_word_list = []
for i in range(len(secret_word_list)):
if str(secret_word_list[i]) in letters_guessed:
revealed_word_list.append(secret_word_list[i])
else:
revealed_word_list.append("_")
return ''.join(str(i) for i in revealed_word_list)
def get_available_letters(letters_guessed):
'''
letters_guessed: list (of letters), which letters have been guessed so far
returns: string (of letters), comprised of letters that represents which letters have not
yet been guessed.
'''
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet_list = list(alphabet)
avaliable_letters = []
for i in range(len(alphabet_list)):
if alphabet_list[i] not in letters_guessed:
avaliable_letters.append(alphabet_list[i])
return ''.join(str(i) for i in avaliable_letters)
def hangman(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Remember to make
sure that the user puts in a letter!
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guessed word so far.
Follows the other limitations detailed in the problem write-up.
'''
secret_word_list = list(secret_word)
secret_word_length = len(secret_word)
letters_guessed = []
guesses_left = 6
warning = 3
print(f"Welcome to the game Hangman!\nI am thinking of a word that is {secret_word_length} letters long\n___")
while guesses_left > 0:
print(f"You have {guesses_left} guesses left")
print(f"Available letters {get_available_letters(letters_guessed)}")
guess = input("Please guess a letter:")
guess = guess.lower() # force lowercase
#check if user input is proper(a letter) WARNING
while guess.isalpha() is False:
warning -= 1
print(f"Oops! That is not a valid letter. You have {warning} warnings left "
f"{get_guessed_word(secret_word, letters_guessed)}")
if warning < 0:
print("You Lose!")
quit()
guess = input("Please guess a letter:")
guess = guess.lower() # force lowercase
#check if user has entered a previous guess WARNING
if guess in letters_guessed:
warning -= 1
print(f"Oops! This letter has already been guessed. You have {warning} warnings left "
f"{get_guessed_word(secret_word, letters_guessed)}")
if warning < 0:
print("You Lose!")
quit()
guess = input("Please guess a letter:")
guess = guess.lower() # force lowercase
letters_guessed.append(guess)
#check if guess is in secret word
if guess in secret_word_list:
print(f"Good guess: {get_guessed_word(secret_word, letters_guessed)}\n___")
else:
print(f"Oops! That letter is not in my word: {get_guessed_word(secret_word,letters_guessed)}\n___")
guesses_left -= 1
#check if word is guessed
if is_word_guessed(secret_word, letters_guessed) is True:
print(f"Congratulations, you won!\nYour total score "
f"for this game is : {guesses_left * secret_word_length}\n___")
quit()
#Game is lost when you run out of guesses
print(f"You have ran out of guesses.You are a loser. Better luck next time.\nThe answer was{secret_word}")
# When you've completed your hangman function, scroll down to the bottom
# of the file and uncomment the first two lines to test
#(hint: you might want to pick your own
# secret_word while you're doing your own testing)
# -----------------------------------
def match_with_gaps(my_word, other_word):
'''
my_word: string with _ characters, current guess of secret word
other_word: string, regular English word
returns: boolean, True if all the actual letters of my_word match the
corresponding letters of other_word, or the letter is the special symbol
_ , and my_word and other_word are of the same length;
False otherwise:
'''
my_word_list = list(my_word)
other_word_list = list(other_word)
check = True
#check if each letter, that is not "_" matches
for i in range(len(other_word_list)):
if my_word_list[i] != other_word_list[i] and my_word_list[i] != "_":
check *= False
#check if there is an "_" where there would be a duplicate guessed letter
if my_word_list[i] == "_":
if other_word_list[i] in my_word_list:
check *= False
return bool(check)
def show_possible_matches(my_word):
'''
my_word: string with _ characters, current guess of secret word
returns: nothing, but should print out every word in wordlist that matches my_word
Keep in mind that in hangman when a letter is guessed, all the positions
at which that letter occurs in the secret word are revealed.
Therefore, the hidden letter(_ ) cannot be one of the letters in the word
that has already been revealed.
'''
my_word_list = list(my_word)
temp = []
for i in range(len(wordlist)):
#add same word length and match with gaps to temp
if len(my_word) ==len(wordlist[i]) and match_with_gaps(my_word,wordlist[i]):
temp.append(wordlist[i])
print(" ".join(temp))
def hangman_with_hints(secret_word):
'''
secret_word: string, the secret word to guess.
Starts up an interactive game of Hangman.
* At the start of the game, let the user know how many
letters the secret_word contains and how many guesses s/he starts with.
* The user should start with 6 guesses
* Before each round, you should display to the user how many guesses
s/he has left and the letters that the user has not yet guessed.
* Ask the user to supply one guess per round. Make sure to check that the user guesses a letter
* The user should receive feedback immediately after each guess
about whether their guess appears in the computer's word.
* After each guess, you should display to the user the
partially guesse word so far.
* If the guess is the symbol *, print out all words in wordlist that
matches the current guessed word.
Follows the other limitations detailed in the problem write-up.
'''
secret_word_list = list(secret_word)
secret_word_length = len(secret_word)
letters_guessed = []
guesses_left = 6
warning = 3
vowel = ["a","e","i","o","u"]
print(f"Welcome to the game Hangman!\nI am thinking of a word that is {secret_word_length} letters long\n___")
while guesses_left > 0:
print(f"You have {guesses_left} guesses left")
print(f"Available letters {get_available_letters(letters_guessed)}")
guess = input("Please guess a letter:")
guess = guess.lower() # force lowercase
#allow user to gues a hint
if guess == "*":
print(f"Possible word matches are:")
show_possible_matches(get_guessed_word(secret_word,letters_guessed))
print("---")
continue
#check if user input is proper(a letter) WARNING
while guess.isalpha() is False and guess != "*":
warning -= 1
print(f"Oops! That is not a valid letter. You have {warning} warnings left "
f"{get_guessed_word(secret_word, letters_guessed)}")
if warning < 0:
print("You Lose!")
quit()
guess = input("Please guess a letter:")
guess = guess.lower() # force lowercase
#check if user has entered a previous guess WARNING
if guess in letters_guessed:
warning -= 1
print(f"Oops! This letter has already been guessed. You have {warning} warnings left "
f"{get_guessed_word(secret_word, letters_guessed)}")
if warning < 0:
print("You Lose!")
quit()
guess = input("Please guess a letter:")
guess = guess.lower() # force lowercase
letters_guessed.append(guess)
#check if guess is in secret word
if guess in secret_word_list:
print(f"Good guess: {get_guessed_word(secret_word, letters_guessed)}\n___")
elif guess in vowel:
print(f"Oops! That letter is not in my word: {get_guessed_word(secret_word,letters_guessed)}\n___")
guesses_left -= 2
else:
print(f"Oops! That letter is not in my word: {get_guessed_word(secret_word,letters_guessed)}\n___")
guesses_left -= 1
#check if word is guessed
if is_word_guessed(secret_word, letters_guessed) is True:
print(f"Congratulations, you won!\nYour total score "
f"for this game is : {guesses_left * secret_word_length}\n___")
quit()
#Game is lost when you run out of guesses
print(f"You have ran out of guesses.You are a loser. Better luck next time.\nThe answer was {secret_word}")
# When you've completed your hangman_with_hint function, comment the two similar
# lines above that were used to run the hangman function, and then uncomment
# these two lines and run this file to test!
# Hint: You might want to pick your own secret_word while you're testing.
if __name__ == "__main__":
# pass
# To test part 2, comment out the pass line above and
# uncomment the following two lines.
# secret_word = choose_word(wordlist)
# hangman(secret_word)
#my_word = "t__t"
#other_word = "tadt"
# show_possible_matches(my_word)
###############
# To test part 3 re-comment out the above lines and
# uncomment the following two lines.
secret_word = choose_word(wordlist)
hangman_with_hints(secret_word)
``` |
{
"source": "JhooClan/QSimOv",
"score": 2
} |
#### File: JhooClan/QSimOv/qalg.py
```python
from qlibcj import *
def DJAlg(size, U_f, **kwargs): # U_f es el oraculo, que debe tener x1..xn e y como qubits. Tras aplicarlo el qubit y debe valer f(x1..xn) XOR y. El argumento size es n + 1, donde n es el numero de bits de entrada de f.
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
r = QRegistry(([0 for i in range(size - 1)] + [1])) # Los qubits se inicializan a cero (x1..xn) excepto el ultimo (y), inicializado a uno
r.applyGate(H(size)) # Se aplica una compuerta hadamard a todos los qubits
r.applyGate(U_f) # Se aplica el oraculo
r.applyGate(H(size - 1), I(1)) # Se aplica una puerta Hadamard a todos los qubits excepto al ultimo
return r.measure([1 for i in range(size - 1)] + [0]) # Se miden los qubit x, si es igual a 0 la funcion es constante. En caso contrario no lo es.
def ExampleDJCircuit(size, U_f, **kwargs):
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
c = DJAlgCircuit(size, U_f, save=kwargs.get('save', True))
res = c.execute([0 for i in range(size - 1)]) # Los qubits se inicializan a cero (x1..xn) excepto el ultimo (y), inicializado a uno por el circuito tal y como se indicó en su construccion
print(all(i == 0 for i in res[1][0][:-1]))
return res # Los qubits se inicializan a cero (x1..xn) excepto el ultimo (y), inicializado a uno por el circuito tal y como se indicó en su construccion
def DJAlgCircuit(size, U_f, save=True): # U_f es el oraculo, que debe tener x1..xn e y como qubits. Tras aplicarlo el qubit y debe valer f(x1..xn) XOR y. El argumento size es n + 1, donde n es el numero de bits de entrada de f.
c = QCircuit("Deutsch-Josza Algorithm", save=save, ancilla=[1]) # El ultimo QuBit al ejecutar el algoritmo es de ancilla, con su valor a 1
c.addLine(H(size)) # Se aplica una compuerta hadamard a todos los qubits
c.addLine(U_f) # Se aplica el oraculo
c.addLine(H(size - 1), I(1)) # Se aplica una puerta Hadamard a todos los qubits excepto al ultimo
# f = lambda _, l: print(all(i == 0 for i in l[:-1])) # Funcion que imprimira cierto tras realizar la medida si la funcion es constante
# c.addLine(Measure([1 for i in range(size - 1)] + [0], tasks=[f])) # Se miden los qubit x, si es igual a 0 la funcion es constante. En caso contrario no lo es.
c.addLine(Measure([1 for i in range(size - 1)] + [0])) # Se miden los qubit x, si es igual a 0 la funcion es constante. En caso contrario no lo es.
return c
'''
Crea un oraculo U_f tal y como viene definido en el algoritmo de Deutsch-Josza para una funcion balanceada f: {0,1}^n ---> {0,1}, f(x) = msb(x) (bit mas significativo de x).
El argumento n no es el numero de bits de la entrada de f, sino dicho numero mas 1 (para el qubit de "salida").
'''
def Bal(n):
b = I(n)
'''
Se invierte el valor del qubit y en los casos en los que el bit mas significativo sea 1.
Una puerta C-NOT serviria de U_f con la definicion de f dada con n = 2. Bal(2) = CNOT().
'''
for i in range(int((2**n)/2), (2**n) - 1, 2):
t = np.copy(b[i,:])
b[i], b[i+1] = b[i+1, :], t
return b
'''
U_f generada con n = 3:
1 0 0 0 0 0 0 0
0 1 0 0 0 0 0 0
0 0 1 0 0 0 0 0
0 0 0 1 0 0 0 0
0 0 0 0 0 1 0 0
0 0 0 0 1 0 0 0
0 0 0 0 0 0 0 1
0 0 0 0 0 0 1 0
Las entradas son, empezando por el qubit mas significativo: x1, x2 e y.
Al aplicar el oraculo lo que hara es intercambiar la probabilidad asociada a |100> con la de |101> y la de |110> con |111>.
De forma mas general, la funcion Bal se observa que devuelve siempre una puerta que al ser aplicada a un conjunto x1, ..., xn, y
de qubits aplicara una C-NOT sobre x1 (control) e y (objetivo), dejando el resto de qubits intactos.
De esta forma el oraculo pondra en el qubit y el valor de x1 XOR y. Como para la mitad de las posibles entradas x1 valdra 0
y para la otra mitad 1, la funcion f es balanceada ya que devuelve 0 para la mitad de las posibles entradas y 1 para la otra mitad.
El oraculo U_f a su vez se comporta como se indica en el algoritmo, teniendo que y <- f(x) XOR y.
'''
def Teleportation(qbit, **kwargs): # El qubit que va a ser teleportado. Aunque en un computador cuantico real no es posible ver el valor de un qubit sin que colapse, al ser un simulador se puede. Puede especificarse semilla con seed = <seed>.
rnd.seed(kwargs.get('seed', None)) # Se fija la semilla antes de comenzar el experimento. En este caso la tomamos por parametro.
r = QRegistry([qbit, 0, 0]) # Se crea un registro con el qubit que debe ser enviado a Alice, el qubit de Bob y el de Alice, en adelante Q, B y A. B y A estan inicializados a |0>.
print ("Original registry:\n", r.state) # Se muestra el estado del registro de qubits.
r.applyGate(I(1), H(1), I(1)) # Se aplica la puerta Hadamard a B, ahora en una superposicion de los estados |0> y |1>, ambos exactamente con la misma probabilidad.
r.applyGate(I(1), CNOT()) # Se aplica una puerta C-NOT sobre B (control) y A (objetivo).
print ("With Bell+ state:\n", r.state) # Tras la aplicacion de las anteriores dos puertas tenemos un estado de Bell +, B y A estan entrelazados. Se muestra el valor del registro.
# Aqui es donde trabajamos con el qubit Q que queremos enviar posteriormente. En este caso de ejemplo le vamos a aplicar Hadamard y despues un cambio de fase de pi/2
r.applyGate(H(1), I(2))
r.applyGate(PhaseShift(np.pi/2), I(2))
# Una vez terminado todo lo que queremos hacerle al QuBit, procedemos a preparar el envio
r.applyGate(CNOT(), I(1)) # Se aplica una puerta C-NOT sobre Q (control) y B (objetivo).
r.applyGate(H(1), I(2)) # Se aplica una puerta Hadamard sobre Q.
print ("\nBefore measurement:\n", r.state) # Se muestra el valor del registro antes de la medida.
m = r.measure([1,1,0]) # Se miden los qubits Q y B.
print ("q0 = ", m[0], "\nq1 = ", m[1]) # Se muestra el resultado de la medida
q0 = 0 # Se crean para ver que la teleportacion se realiza con exito dos qubits, q0 y q1.
q1 = 0 # Usandolos crearemos un registro con los valores que debe tener si la teleportacion se ha realizado con exito.
if (m[1] == 1):
q1 = 1
r.applyGate(I(2), PauliX()) # Si al medir B obtuvimos un 1, rotamos A en el eje X (Pauli-X o NOT)
if (m[0] == 1):
q0 = 1
r.applyGate(I(2), PauliZ()) # Si al medir Q obtuvimos un 1, rotamos A en el eje Z (Pauli-Z).
er = QRegistry([q0, q1, qbit]) # Se crea el registro para testeo mencionado anteriormente.
# Y aplicamos las mismas operaciones para ver que es lo que se debe recibir, en este caso Hadamard y PhaseShift.
er.applyGate(I(2), H(1))
er.applyGate(I(2), PhaseShift(np.pi/2))
print ("\nExpected result:\n", er.state, "\nResult:\n", r.state) # Se muestra el contenido de los registros, tanto el del resultado esperado como el obtenido.
print ("Assert: " + str(r.state == er.state))
return r # Se devuelve el registro obtenido tras aplicar el algoritmo.
def TeleportationCircuit(gate, save=True): # Recibe como argumento lo que se va a ejecutar sobre el primer QuBit despues de hacer el estado de Bell con los dos últimos.
qc = QCircuit("Teleportation", save=save, ancilla=[0, 0])
qc.addLine(I(1), H(1), I(1))
qc.addLine(I(1), CNOT())
# Aqui es donde trabajamos con el qubit Q que queremos enviar posteriormente. Se le aplica la puerta pasada como parámetro
qc.addLine(gate, I(2))
# Una vez terminado todo lo que queremos hacerle al QuBit, procedemos a preparar el envio
qc.addLine(CNOT(), I(1)) # Se aplica una puerta C-NOT sobre Q (control) y B (objetivo).
qc.addLine(H(1), I(2)) # Se aplica una puerta Hadamard sobre Q.
c1 = Condition([None, 1, None], PauliX())
c2 = Condition([1, None, None], PauliZ())
m = Measure([1, 1, 0], conds=[c1, c2], remove=True)
qc.addLine(m)
return qc # Se devuelve el circuito.
def ExampleTC(value, gate, **kwargs): # El valor debe ser 0 o 1, valor inicial del QuBit a teleportar. Gate es la puerta que se va a aplicar sobre el QuBit a teleportar.
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
# Diseñamos la puerta que se va a aplicar sobre el QuBit
#g = QGate()
#g.addLine(H(1))
#g.addLine(PhaseShift(np.pi/2))
c = TeleportationCircuit(gate, save=kwargs.get('save', True))
r = c.execute([value]) # Se ejecuta el circuito
exr = QRegistry([value])
exr.applyGate(gate)
print ("Expected result:\n", exr.state, "\nResult:\n", r.state)
print ("Assert: " + str(all((r.state == exr.state)[0])))
def TwoBitSubstractor(nums, **kwargs): # Se pasa como parametro los dos numeros binarios a restar como [A0, A1, B0, B1]. Devuelve el resultado en los qubit de mayor peso y en el tercer qubit indica si ha habido overflow
rnd.seed(kwargs.get('seed', None)) # Para asegurar la repetibilidad fijamos la semilla antes del experimento.
r = QRegistry(nums + [0,0,0,0,0,0,0]) # 7 bits de ancilla a 0 son necesarios en esta implementacion
r.applyGate(I(1), SWAP(), SWAP(), I(6))
r.applyGate(I(2), SWAP(), SWAP(), I(5))
r.applyGate(I(3), SWAP(), SWAP(), I(4))
r.applyGate(I(4), SWAP(), I(5))
r.applyGate(I(5), Substractor())
r.applyGate(I(5), SWAP(), I(4))
r.applyGate(I(4), SWAP(), I(5))
r.applyGate(I(3), SWAP(), I(6))
r.applyGate(I(2), SWAP(), I(7))
r.applyGate(Substractor(), I(5))
r.applyGate(I(5), SWAP(), I(4))
r.applyGate(I(4), SWAP(), I(5))
r.applyGate(I(3), SWAP(), I(6))
r.applyGate(I(2), SWAP(), I(7))
r.applyGate(I(1), SWAP(), I(8))
return r.measure([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
```
#### File: JhooClan/QSimOv/qlibcj.py
```python
import cmath as cm
import numpy as np
from structures.qregistry import *
from structures.qgate import *
from structures.qcircuit import *
# np.zeros((h,w), dtype=complex) Inicializa una matriz de numeros complejos con alto h y ancho w
# La suma de matrices se realiza con +. A + B
# La multiplicacion por un escalar se hace con *. n * A
# Para multiplicar las matrices A y B se usa np.dot(A,B)
# El producto Kronecker de A y B esta definido con np.kron(A,B)
def _hMat(n): # Devuelve una matriz que al ser multiplicada por 1/sqrt(2^n) resulta en la puerta Hadamard para n bits
H = np.ones((2,2), dtype=complex)
H[1,1] = -1
if n > 1:
H = np.kron(H, _hMat(n - 1))
return H
def H(n): # Devuelve una puerta Hadamard para n QuBits
H = QGate("H")
H.addLine(_hMat(n))
H.setMult(1 / np.sqrt(2**n))
return H
def PauliX(): # Also known as NOT
px = QGate("NOT")
m = np.array([0,1,1,0], dtype=complex)
m.shape = (2,2)
px.addLine(m)
return px
def PauliY():
py = QGate("Y")
m = np.array([0,-1j,1j,0], dtype=complex)
m.shape = (2,2)
py.addLine(m)
return py
def PauliZ():
pz = QGate("Z")
m = np.array([1,0,0,-1], dtype=complex)
m.shape = (2,2)
pz.addLine(m)
return pz
def SqrtNOT(): # Square root of NOT gate, usually seen in its controlled form C-√NOT. Sometimes called C-√X gate.
v = QGate("√NOT")
m = np.array([1, -1j, -1j, 1], dtype=complex)
m.shape = (2,2)
v.addLine(m)
v.setMult((1 + 1j)/2)
return v
def ControlledU(gate): # Returns a controlled version of the given gate
g = gate
name = "U"
if type(gate) == QGate:
g = gate.m
name = gate.name
gdim = g.shape[0]
m = np.eye(gdim*2, dtype=complex)
m[gdim:,gdim:] = g
cu = QGate("C-" + name)
cu.addLine(m)
return cu
def CNOT(): # Returns a CNOT gate for two QuBits, also called Feynman gate
#return ControlledU(PauliX())
cn = QGate("C-NOT")
m = np.zeros((4,4), dtype=complex)
m[0,0] = 1
m[1,1] = 1
m[2,3] = 1
m[3,2] = 1
cn.addLine(m)
return cn
def NOTC(): # Returns a CNOT gate for two QuBits, first QuBit objective and second one control
#return SWAP() @ CNOT() @ SWAP()
nc = QGate("NOT-C")
m = np.zeros((4,4), dtype=complex)
m[0,0] = 1
m[3,1] = 1
m[2,2] = 1
m[1,3] = 1
nc.addLine(m)
return nc
def SWAP(): # SWAP gate for 2 qubits
sw = QGate("SWAP")
#m = np.zeros((4,4), dtype=complex)
#m[0,0] = 1
#m[1,2] = 1
#m[2,1] = 1
#m[3,3] = 1
#sw.addLine(m)
sw.addLine(CNOT())
sw.addLine(NOTC())
sw.addLine(CNOT())
return sw
def SqrtSWAP(): # Square root of SWAP gate for 2 qubits
sw = QGate("√SWAP")
m = np.zeros((4,4), dtype=complex)
m[0,0] = 1
m[1,1] = 0.5 * (1+1j)
m[1,2] = 0.5 * (1-1j)
m[2,1] = 0.5 * (1-1j)
m[2,2] = 0.5 * (1+1j)
m[3,3] = 1
sw.addLine(m)
return sw
def Toffoli(): # Returns a CCNOT gate for three QuBits. A, B, C -> P = A, Q = B, R = AB XOR C.
''' # This does the same as the line below. Circuit with the implementation of Toffoli gate using SWAP, CNOT, Controlled-SNot and Controlled-SNot+
# Gates needed (without control SWAPs): 5
gate = np.kron(I(1), ControlledU(V()))
gate = np.dot(gate, np.kron(SWAP(), I(1)))
gate = np.dot(gate, np.kron(I(1), ControlledU(V())))
gate = np.dot(gate, np.kron(CNOT(), I(1)))
gate = np.dot(gate, np.kron(I(1), ControlledU(Dagger(V()))))
gate = np.dot(gate, np.kron(CNOT(), I(1)))
gate = np.dot(gate, np.kron(SWAP(), I(1)))
return gate
'''
return ControlledU(CNOT())
def Fredkin(): # Returns a CSWAP gate for three QuBits
return ControlledU(SWAP())
def Deutsch(angle): # Returns Deutsh gate with specified angle. D(pi/2) = Toffoli
d = np.eye(8, dtype=complex)
can = np.cos(angle)
san = np.sin(angle)
d[6,6] = can * 1j
d[6,7] = san
d[7,6] = san
d[7,7] = can * 1j
g = QGate("D-" + str(angle))
g.addLine(d)
return g
def getSC(number): # Gets the number of significative ciphers of a given number
return len(str(number).replace('.', ''))
def setSC(number, sc): # Returns the specified number with the specified significative ciphers
res = 0
num = str(number).split('.')
i = len(num[0])
d = 0
if i >= sc:
diff = i - sc
res = int(num[0][0:sc]+"0"*diff)
elif len(num) == 2:
d = len(num[1])
tsc = min(sc - i, d)
diff = 0
if sc - i > d:
diff = sc - i - d
res = float(num[0] + '.' + num[1][0:tsc]+"0"*diff)
if d > tsc and num[1][tsc] >= '5':
res += 10**-tsc
return res
def toComp(angle, sc=None): # Returns a complex number with module 1 and the specified phase.
while angle >= 2*np.pi:
angle -= 2*np.pi
if sc == None:
sc = getSC(angle)
res = np.around(np.cos(angle), decimals=sc-1) + np.around(np.sin(angle), decimals=sc-1)*1j
return res
def PhaseShift(angle): # Phase shift (R) gate, rotates qubit with specified angle (in radians)
ps = np.array([1, 0, 0, toComp(angle, 16)], dtype=complex)
ps.shape = (2,2)
g = QGate("R(" + str(angle) + ")")
return ps
def Peres(): # A, B, C -> P = A, Q = A XOR B, R = AB XOR C. Peres gate.
''' # Implementation of Peres gate with smaller gates.
# Gates needed (without control SWAPs): 4
p = QGate("Peres")
p.addLine(SWAP(), I(1))
p.addLine(I(1), ControlledU(SqrtNOT()))
p.addLine(SWAP(), I(1))
p.addLine(I(1), ControlledU(SqrtNOT()))
p.addLine(CNOT(), I(1))
p.addLine(I(1), ControlledU(Dagger(SqrtNOT())))
return p
'''
p = QGate("Peres")
p.addLine(Toffoli())
p.addLine(CNOT(), I(1))
return p
def R(): # A, B, C -> P = A XOR B, Q = A, R = AB XOR ¬C. R gate.
# Optimized implementation with smaller gates
# Gates needed (without control SWAPs): 6
r = QGate("R")
r.addLine(SWAP(), PauliX())
r.addLine(I(1), ControlledU(SqrtNOT()))
r.addLine(SWAP(), I(1))
r.addLine(I(1), ControlledU(SqrtNOT()))
r.addLine(CNOT(), I(1))
r.addLine(I(1), ControlledU(Dagger(SqrtNOT())))
r.addLine(SWAP(), I(1))
return r
def TR(): # A, B, C -> P = A, Q = A XOR B, R = A¬B XOR C. TR gate.
# Implementation of TR gate with smaller gates.
# Gates needed (without control SWAPs): 6
tr = QGate("TR")
tr.addLine(I(1), PauliX(), I(1))
tr.addLine(SWAP(), I(1))
tr.addLine(I(1), ControlledU(SqrtNOT()))
tr.addLine(SWAP(), I(1))
tr.addLine(I(1), ControlledU(SqrtNOT()))
tr.addLine(CNOT(), I(1))
tr.addLine(I(1), ControlledU(Dagger(SqrtNOT())))
tr.addLine(I(1), PauliX(), I(1))
return tr
def URG(): # A, B, C -> P = (A+B) XOR C, Q = B, R = AB XOR C.
# Implementation of URG gate with smaller gates.
# Gates needed (without control SWAPs): 8
urg = QGate("URG")
urg.addLine(I(1), ControlledU(SqrtNOT()))
urg.addLine(SWAP(), I(1))
urg.addLine(I(1), ControlledU(SqrtNOT()))
urg.addLine(CNOT(), I(1))
urg.addLine(I(1), ControlledU(Dagger(SqrtNOT())))
urg.addLine(CNOT(), I(1))
urg.addLine(I(1), SWAP())
urg.addLine(CNOT(), I(1))
urg.addLine(I(1), CNOT())
urg.addLine(CNOT(), I(1))
urg.addLine(I(1), SWAP())
urg.addLine(SWAP(), I(1))
return urg
def BJN(): # A, B, C -> P = A, Q = B, R = (A+B) XOR C. BJN gate.
# Implementation of TR gate with smaller gates.
# Gates needed (without control SWAPs): 5
bjn = QGate("BJN")
bjn.addLine(SWAP(), I(1))
bjn.addLine(I(1), ControlledU(SqrtNOT()))
bjn.addLine(SWAP(), I(1))
bjn.addLine(I(1), ControlledU(SqrtNOT()))
bjn.addLine(CNOT(), I(1))
bjn.addLine(I(1), ControlledU(SqrtNOT()))
bjn.addLine(CNOT(), I(1))
return bjn
def BlochCoords(qbit):
alpha = qbit[0][0]
pcorr = cm.rect(1, -cm.phase(alpha))
alpha *= pcorr
alpha = alpha.real
beta = qbit[0][1] * pcorr
theta = np.arccos(alpha) * 2
s = np.sin(theta/2)
if (s != 0):
phi = np.log(beta/s)/1j
phi = phi.real
else:
phi = 0.
return (theta, phi)
def getTruthTable(gate, ancilla=None, garbage=0, iterations=1): # Prints the truth table of the given gate.
# You can set the ancilla bits to not include them in the table, with the list of values they must have.
# For example, if you have two 0s and one 1 as ancilla bits, ancilla[0,0,1]. It always takes the last bits as the ancilla ones!
# The garbage=n removes the last n bits from the truth table, considering them garbage.
# For example, if you have 6 outputs and the last 4 outputs are garbage, only the value of the first two would be printed.
# Always removes the last n bits!
num = int(np.log2(gate.shape[0]))
mesd = {}
for iteration in range(iterations):
for i in range(0, gate.shape[0]):
nbin = [int(x) for x in bin(i)[2:]]
qinit = [0 for j in range(num - len(nbin))]
qinit += nbin
if ancilla == None or qinit[-len(ancilla):] == ancilla:
qr = QRegistry(qinit)
qr.ApplyGate(gate)
mes = qr.Measure([1 for j in range(num-garbage)])
if ancilla != None:
ini = qinit[:-len(ancilla)]
else:
ini = qinit
if str(ini) not in mesd:
mesd[str(ini)] = np.zeros(num)
mesd[str(ini)] = [x + y for x, y in zip(mesd[str(ini)], mes)]
for k in mesd:
print(k + " -> " + str(["P(1)=" + str(v/iterations) if v/iterations != 1.0 and v/iterations != 0.0 else int(v/iterations) for v in mesd[k]]))
def QEq(q1, q2):
return np.array_equal(q1,q2) and str(q1) == str(q2)
def HalfSubstractor(): # A, B, 0 -> P = A-B, Q = Borrow, R = B = Garbage
hs = QGate("Half Substractor")
hs.addLine(SWAP(), I(1))
hs.addLine(TR())
hs.addLine(SWAP(), I(1))
hs.addLine(I(1), SWAP())
return hs
def Substractor(): # A, B, Bin, 0, 0, 0 -> P = A-B, Q = Borrow, R = B1 = Garbage, S = B1B2 = Garbage, T = Bin = Garbage, U = B = Garbage
# Can be used as a comparator. Q will be 0 if A>=B, 1 otherwise.
fs = QGate("Substractor")
fs.addLine(I(2), SWAP(), I(2))
fs.addLine(HalfSubstractor(), I(3))
fs.addLine(I(2), SWAP(), I(2))
fs.addLine(I(1), SWAP(), SWAP(), I(1))
fs.addLine(I(2), SWAP(), SWAP())
fs.addLine(HalfSubstractor(), I(3))
fs.addLine(I(2), SWAP(), I(2))
fs.addLine(I(3), SWAP(), I(1))
fs.addLine(I(1), URG(), I(2))
return fs
# Function that returns the 2^nth root of the unity
def nroot(n, rc = 14): # Rounds to 14 decimal places by default
r = cm.exp(2j * cm.pi / pow(2, n))
return round(r.real, rc) + round(r.imag, rc) * 1j
def RUnity(m, rc = 14):
ru = np.eye(2, dtype=complex)
ru[1,1] = nroot(m, rc)
g = QGate("RU" + str(m))
g.addLine(ru)
return g
def QFT(size, rc = 14):
'''
size = 4
uft = np.kron(Hadamard(1), I(size - 1))
uft = np.dot(uft, np.kron(np.dot(SWAP(), ControlledU(RUnity(2, rc))), I(size - 2)))
uft = np.dot(uft, np.kron(Hadamard(1), np.kron(np.dot(SWAP(), ControlledU(RUnity(3, rc))), I(size - 3))))
uft = np.dot(uft, np.kron(np.dot(SWAP(), ControlledU(RUnity(2, rc))), np.dot(SWAP(), ControlledU(RUnity(4, rc)))))
uft = np.dot(uft, np.kron(Hadamard(1), np.kron(np.dot(SWAP(), ControlledU(RUnity(3, rc))), I(size - 3))))
uft = np.dot(uft, np.kron(np.dot(SWAP(), ControlledU(RUnity(2, rc))), I(size - 2)))
uft = np.dot(uft, np.kron(Hadamard(1), I(size - 1)))
uft = np.dot(uft, np.kron(SWAP(), I(size - 2)))
uft = np.dot(uft, np.kron(I(size - 3), np.kron(SWAP(), I(size - 3))))
uft = np.dot(uft, np.kron(SWAP(), SWAP()))
uft = np.dot(uft, np.kron(I(size - 3), np.kron(SWAP(), I(size - 3))))
uft = np.dot(uft, np.kron(SWAP(), I(size - 2)))
return uft
'''
from tests.shor import DFT
return DFT(pow(2, size))
```
#### File: QSimOv/structures/qgate.py
```python
import numpy as np
import gc
class QGate(object):
def __init__(self, name="UNNAMED"):
self.m = 1
self.mult = 1
self.simple = True
self.lines = []
self.name = name
def __getitem__(self, key):
return self.m[key]
def __setitem__(self, key, value):
self.m[key] = value
def __delitem__(self, key):
del self.m[key]
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __lt__(self, other):
m = other
if type(other) == QGate:
m = other.m
return self.m.__lt__(m)
def __le_(self, other):
m = other
if type(other) == QGate:
m = other.m
return self.m.__le__(m)
def __eq__(self, other):
m = other
if type(other) == QGate:
m = other.m
return self.m.__eq__(m)
def __ne_(self, other):
m = other
if type(other) == QGate:
m = other.m
return self.m.__ne__(m)
def __gt__(self, other):
m = other
if type(other) == QGate:
m = other.m
return self.m.__gt__(m)
def __ge_(self, other):
m = other
if type(other) == QGate:
m = other.m
return self.m.__ge__(m)
def __add__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(self.m.__add__(m))
return sol
def __sub__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(self.m.__sub__(m))
return sol
def __mod__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(self.m.__mod__(m))
return sol
def __mul__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(self.m.__mul__(m))
return sol
def __rmul__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(self.m.__rmul__(m))
return sol
def __imul__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(self.m.__rmul__(m))
return sol
def __matmul__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(np.dot(self.m, m))
return sol
def __pow__(self, other):
m = other
if type(other) == QGate:
m = other.m
sol = QGate()
sol.addLine(np.kron(self.m, m))
return sol
def addLine(self, *args):
self.lines.append(list(args))
if self.simple and (len(list(args)) > 1 or len(self.lines) > 1):
self.simple = False
aux = 1
for gate in args:
g = gate
if type(gate) == QGate:
g = gate.m
aux = np.kron(aux, g)
gc.collect()
self.m = np.dot(aux, self.m)
gc.collect()
def setMult(self, mult):
self.m *= mult/self.mult
self.mult = mult
def addMult(self, mult):
self.m *= mult
self.mult *= mult
def setName(self, name):
self.name = name
def I(n): # Returns Identity Matrix for the specified number of QuBits
#IM = np.array([[1,0],[0,1]], dtype=complex)
#if n > 1:
# IM = np.kron(IM, I(n - 1))
return np.eye(2**n, dtype=complex)
def _getMatrix(gate):
m = gate
if type(gate) == QGate:
m = gate.m
return m
def unitaryMatrix(mat, decimals=10):
mustbei = np.around(np.dot(_getMatrix(mat), _getMatrix(dagger(mat))), decimals=decimals)
return (mustbei == I(int(np.log2(mustbei.shape[0])))).all()
def normalizeGate(mat):
det = np.linalg.det(mat)
if det != 0:
return mat/det
else:
return None
def transpose(gate): # Returns the Transpose of the given matrix
if type(gate) == QGate:
t = QGate(gate.name + "T")
t.addLine(np.matrix.transpose(gate.m))
else:
t = QGate("UT")
t.addLine(np.matrix.transpose(gate))
return t
def dagger(gate): # Returns the Hermitian Conjugate or Conjugate Transpose of the given matrix
if type(gate) == QGate:
t = QGate(gate.name + "†")
if gate.simple:
t.addLine(dagger(gate.m))
else:
lines = gate.lines[::-1]
for line in lines:
t.addLine(*[dagger(g).m for g in line])
t.setMult(gate.mult)
else:
t = QGate("U†")
t.addLine(np.matrix.getH(gate))
return t
def invert(gate): # Returns the inverse of the given matrix
if type(gate) == QGate:
t = QGate(gate.name + "-¹")
t.addLine(np.linalg.inv(gate.m))
else:
t = QGate("U-¹")
t.addLine(np.linalg.inv(gate))
return t
```
#### File: QSimOv/structures/qregistry.py
```python
import cmath as cm
import numpy as np
import random as rnd
from structures.qgate import _getMatrix
class QRegistry:
def __init__(self, nqbits, **kwargs):
# nqbits -> number of QuBits in the registry.
# Seed for the Pseudo Random Number Generation can be specified with seed = <seed> as an argument.
self.state = np.zeros(2**nqbits, dtype=complex)
self.state[0] = 1
self.state.shape = (1, 2**nqbits)
def measure(self, msk, remove = False): # List of numbers with the QuBits that should be measured. 0 means not measuring that qubit, 1 otherwise. remove = True if you want to remove a QuBit from the registry after measuring
if (type(msk) != list or len(msk) != int(np.log2(self.state.size)) or \
not all(type(num) == int and (num == 0 or num == 1) for num in msk)):
raise ValueError('Not valid mask')
mask = []
for i in range(len(msk)):
if msk[i] == 1:
mask.append(i)
tq = int(np.log2(self.state.size))
if (not all(num < tq and num > -1 for num in mask)):
raise ValueError('Out of range')
mes = []
for qbit in mask:
r = rnd.random()
p = 0
max = 2**(tq - (qbit + 1))
cnt = 0
rdy = True
for i in range(0, self.state.size):
if (cnt == max):
rdy = not rdy
cnt = 0
if (rdy):
p += cm.polar(self.state[0,i])[0]**2
cnt += 1
if (r < p):
me = 0
else:
me = 1
mes.append(me)
self.collapse((tq - (qbit + 1)), me, remove)
return mes
def applyGate(self, *gates): # Applies a quantum gate to the registry.
gate = _getMatrix(gates[0])
for g in list(gates)[1:]:
gate = np.kron(gate, _getMatrix(g))
self.state = np.transpose(np.dot(gate, ket(self.state)))
def collapse(self, qbit, mes, remove): # Collapses a qubit from the registry. qbit is the id of the qubit, numerated as q0..qn in the registry. mes is the value obtained when measuring it. remove indicates whether it should be removed from the registry.
max = 2**qbit
cnt = 0
rdy = mes == 1
mfd = []
for i in range(0, self.state.size):
if (cnt == max):
rdy = not rdy
cnt = 0
if (rdy):
self.state[0, i] = 0
mfd.append(i)
cnt += 1
if (remove):
for qbit in mfd[::-1]:
self.state = np.delete(self.state, qbit, 1)
normalize(self.state)
def densityMatrix(self):
return np.dot(ket(self.state), bra(self.state))
def vnEntropy(self, **kwargs):
base = kwargs.get('base', "e")
#dm = self.densityMatrix()
#evalues, m = np.linalg.eig(dm)
entropy = 0
#for e in evalues:
# if e != 0:
# entropy += e * np.log(e)
for amp in self.state[0]:
p = cm.polar(amp)[0]**2
if p > 0:
if base == "e":
entropy += p * np.log(p)
elif type(base) == int or type(base) == float:
entropy += p * np.log(p)/np.log(base)
return -entropy
def prob(q, x): # Devuelve la probabilidad de obtener x al medir el qbit q
p = 0
if (x < q.size):
p = cm.polar(q[0,x])[0]**2
return p
def bra(v): # Devuelve el vector pasado como parametro en forma de fila conjugado. <v|
b = v[:]
s = v.shape
if s[0] != 1:
b = np.matrix.getH(b)
else:
b = np.conjugate(b)
return b
def ket(v): # Devuelve el vector pasado como parametro en forma de columna. |v>
k = v[:]
s = v.shape
if s[1] != 1:
k = np.transpose(k)
return k
def superposition(x, y): # Devuelve el estado compuesto por los dos QuBits.
z = np.kron(x, y)
normalize(z)
return z
def normalize(state): # Funcion que asegura que se cumpla la propiedad que dice que |a|^2 + |b|^2 = 1 para cualquier QuBit. Si no se cumple, modifica el QuBit para que la cumpla si se puede.
sqs = 0
for i in range(0, state.size):
sqs += cm.polar(state[0, i])[0]**2
sqs = np.sqrt(sqs)
if (sqs == 0):
raise ValueError('Impossible QuBit')
if (sqs != 1):
for bs in state:
bs /= sqs
def QBit(a,b): # Devuelve un QuBit con a y b. q = a|0> + b|1>, con a y b complejos
q = np.array([a,b], dtype=complex)
q.shape = (1,2)
normalize(q)
return q
def QZero(): # Devuelve un QuBit en el estado 0
q = np.array([complex(1,0),complex(0,0)])
q.shape = (1,2)
return q
def QOne(): # Devuelve un QuBit en el estado 1
q = np.array([complex(0,0),complex(1,0)])
q.shape = (1,2)
return q
``` |
{
"source": "jhoofwijk/adaptive",
"score": 2
} |
#### File: adaptive/learner/integrator_learner.py
```python
import sys
from collections import defaultdict
from math import sqrt
from operator import attrgetter
import numpy as np
from scipy.linalg import norm
from sortedcontainers import SortedSet
from adaptive.learner.base_learner import BaseLearner
from adaptive.notebook_integration import ensure_holoviews
from adaptive.utils import cache_latest, restore
from .integrator_coeffs import (T_left, T_right, V_inv, Vcond, alpha, b_def,
eps, gamma, hint, min_sep, ndiv_max, ns, xi)
def _downdate(c, nans, depth):
# This is algorithm 5 from the thesis of <NAME>.
b = b_def[depth].copy()
m = ns[depth] - 1
for i in nans:
b[m + 1] /= alpha[m]
xii = xi[depth][i]
b[m] = (b[m] + xii * b[m + 1]) / alpha[m - 1]
for j in range(m - 1, 0, -1):
b[j] = ((b[j] + xii * b[j + 1] - gamma[j + 1] * b[j + 2])
/ alpha[j - 1])
b = b[1:]
c[:m] -= c[m] / b[m] * b[:m]
c[m] = 0
m -= 1
return c
def _zero_nans(fx):
"""Caution: this function modifies fx."""
nans = []
for i in range(len(fx)):
if not np.isfinite(fx[i]):
nans.append(i)
fx[i] = 0.0
return nans
def _calc_coeffs(fx, depth):
"""Caution: this function modifies fx."""
nans = _zero_nans(fx)
c_new = V_inv[depth] @ fx
if nans:
fx[nans] = np.nan
c_new = _downdate(c_new, nans, depth)
return c_new
class DivergentIntegralError(ValueError):
pass
class _Interval:
"""
Attributes
----------
(a, b) : (float, float)
The left and right boundary of the interval.
c : numpy array of shape (4, 33)
Coefficients of the fit.
depth : int
The level of refinement, `depth=0` means that it has 5 (the minimal
number of) points and `depth=3` means it has 33 (the maximal number
of) points.
fx : numpy array of size `(5, 9, 17, 33)[self.depth]`.
The function values at the points `self.points(self.depth)`.
igral : float
The integral value of the interval.
err : float
The error associated with the integral value.
rdepth : int
The number of splits that the interval has gone through, starting at 1.
ndiv : int
A number that is used to determine whether the interval is divergent.
parent : _Interval
The parent interval.
children : list of `_Interval`s
The intervals resulting from a split.
done_points : dict
A dictionary with the x-values and y-values: `{x1: y1, x2: y2 ...}`.
done : bool
The integral and the error for the interval has been calculated.
done_leaves : set or None
Leaves used for the error and the integral estimation of this
interval. None means that this information was already propagated to
the ancestors of this interval.
depth_complete : int or None
The level of refinement at which the interval has the integral value
evaluated. If None there is no level at which the integral value is
known yet.
Methods
-------
refinement_complete : depth, optional
If true, all the function values in the interval are known at `depth`.
By default the depth is the depth of the interval.
"""
__slots__ = [
'a', 'b', 'c', 'c00', 'depth', 'igral', 'err', 'fx', 'rdepth',
'ndiv', 'parent', 'children', 'done_points', 'done_leaves',
'depth_complete', 'removed',
]
def __init__(self, a, b, depth, rdepth):
self.children = []
self.done_points = {}
self.a = a
self.b = b
self.depth = depth
self.rdepth = rdepth
self.done_leaves = set()
self.depth_complete = None
self.removed = False
@classmethod
def make_first(cls, a, b, depth=2):
ival = _Interval(a, b, depth, rdepth=1)
ival.ndiv = 0
ival.parent = None
ival.err = sys.float_info.max # needed because inf/2 == inf
return ival
@property
def T(self):
"""Get the correct shift matrix.
Should only be called on children of a split interval.
"""
assert self.parent is not None
left = self.a == self.parent.a
right = self.b == self.parent.b
assert left != right
return T_left if left else T_right
def refinement_complete(self, depth):
"""The interval has all the y-values to calculate the intergral."""
if len(self.done_points) < ns[depth]:
return False
return all(p in self.done_points for p in self.points(depth))
def points(self, depth=None):
if depth is None:
depth = self.depth
a = self.a
b = self.b
return (a + b) / 2 + (b - a) * xi[depth] / 2
def refine(self):
self.depth += 1
return self
def split(self):
points = self.points()
m = points[len(points) // 2]
ivals = [_Interval(self.a, m, 0, self.rdepth + 1),
_Interval(m, self.b, 0, self.rdepth + 1)]
self.children = ivals
for ival in ivals:
ival.parent = self
ival.ndiv = self.ndiv
ival.err = self.err / 2
return ivals
def calc_igral(self):
self.igral = (self.b - self.a) * self.c[0] / sqrt(2)
def update_heuristic_err(self, value):
"""Sets the error of an interval using a heuristic (half the error of
the parent) when the actual error cannot be calculated due to its
parents not being finished yet. This error is propagated down to its
children."""
self.err = value
for child in self.children:
if child.depth_complete or (child.depth_complete == 0
and self.depth_complete is not None):
continue
child.update_heuristic_err(value / 2)
def calc_err(self, c_old):
c_new = self.c
c_diff = np.zeros(max(len(c_old), len(c_new)))
c_diff[:len(c_old)] = c_old
c_diff[:len(c_new)] -= c_new
c_diff = norm(c_diff)
self.err = (self.b - self.a) * c_diff
for child in self.children:
if child.depth_complete is None:
child.update_heuristic_err(self.err / 2)
return c_diff
def calc_ndiv(self):
div = (self.parent.c00 and self.c00 / self.parent.c00 > 2)
self.ndiv += div
if self.ndiv > ndiv_max and 2 * self.ndiv > self.rdepth:
raise DivergentIntegralError
if div:
for child in self.children:
child.update_ndiv_recursively()
def update_ndiv_recursively(self):
self.ndiv += 1
if self.ndiv > ndiv_max and 2 * self.ndiv > self.rdepth:
raise DivergentIntegralError
for child in self.children:
child.update_ndiv_recursively()
def complete_process(self, depth):
"""Calculate the integral contribution and error from this interval,
and update the done leaves of all ancestor intervals."""
assert self.depth_complete is None or self.depth_complete == depth - 1
self.depth_complete = depth
fx = [self.done_points[k] for k in self.points(depth)]
self.fx = np.array(fx)
force_split = False # This may change when refining
first_ival = self.parent is None and depth == 2
if depth and not first_ival:
# Store for usage in refine
c_old = self.c
self.c = _calc_coeffs(self.fx, depth)
if first_ival:
self.c00 = 0.0
return False, False
self.calc_igral()
if depth:
# Refine
c_diff = self.calc_err(c_old)
force_split = c_diff > hint * norm(self.c)
else:
# Split
self.c00 = self.c[0]
if self.parent.depth_complete is not None:
c_old = self.T[:, :ns[self.parent.depth_complete]] @ self.parent.c
self.calc_err(c_old)
self.calc_ndiv()
for child in self.children:
if child.depth_complete is not None:
child.calc_ndiv()
if child.depth_complete == 0:
c_old = child.T[:, :ns[self.depth_complete]] @ self.c
child.calc_err(c_old)
if self.done_leaves is not None and not len(self.done_leaves):
# This interval contributes to the integral estimate.
self.done_leaves = {self}
# Use this interval in the integral estimates of the ancestors
# while possible.
ival = self.parent
old_leaves = set()
while ival is not None:
unused_children = [child for child in ival.children
if child.done_leaves is not None]
if not all(len(child.done_leaves) for child in unused_children):
break
if ival.done_leaves is None:
ival.done_leaves = set()
old_leaves.add(ival)
for child in ival.children:
if child.done_leaves is None:
continue
ival.done_leaves.update(child.done_leaves)
child.done_leaves = None
ival.done_leaves -= old_leaves
ival = ival.parent
remove = self.err < (abs(self.igral) * eps * Vcond[depth])
return force_split, remove
def __repr__(self):
lst = [
f'(a, b)=({self.a:.5f}, {self.b:.5f})',
f'depth={self.depth}',
f'rdepth={self.rdepth}',
f'err={self.err:.5E}',
'igral={:.5E}'.format(self.igral if hasattr(self, 'igral') else np.inf),
]
return ' '.join(lst)
class IntegratorLearner(BaseLearner):
def __init__(self, function, bounds, tol):
"""
Parameters
----------
function : callable: X → Y
The function to learn.
bounds : pair of reals
The bounds of the interval on which to learn 'function'.
tol : float
Relative tolerance of the error to the integral, this means that
the learner is done when: `tol > err / abs(igral)`.
Attributes
----------
approximating_intervals : set of intervals
The intervals that can be used in the determination of the integral.
n : int
The total number of evaluated points.
igral : float
The integral value in `self.bounds`.
err : float
The absolute error associated with `self.igral`.
max_ivals : int, default: 1000
Maximum number of intervals that can be present in the calculation
of the integral. If this amount exceeds max_ivals, the interval
with the smallest error will be discarded.
Methods
-------
done : bool
Returns whether the `tol` has been reached.
plot : hv.Scatter
Plots all the points that are evaluated.
"""
self.function = function
self.bounds = bounds
self.tol = tol
self.max_ivals = 1000
self.priority_split = []
self.done_points = {}
self.pending_points = set()
self._stack = []
self.x_mapping = defaultdict(lambda: SortedSet([], key=attrgetter('rdepth')))
self.ivals = set()
ival = _Interval.make_first(*self.bounds)
self.add_ival(ival)
self.first_ival = ival
@property
def approximating_intervals(self):
return self.first_ival.done_leaves
def tell(self, point, value):
if point not in self.x_mapping:
raise ValueError("Point {} doesn't belong to any interval"
.format(point))
self.done_points[point] = value
self.pending_points.discard(point)
# Select the intervals that have this point
ivals = self.x_mapping[point]
for ival in ivals:
ival.done_points[point] = value
if ival.depth_complete is None:
from_depth = 0 if ival.parent is not None else 2
else:
from_depth = ival.depth_complete + 1
for depth in range(from_depth, ival.depth + 1):
if ival.refinement_complete(depth):
force_split, remove = ival.complete_process(depth)
if remove:
# Remove the interval (while remembering the excess
# integral and error), since it is either too narrow,
# or the estimated relative error is already at the
# limit of numerical accuracy and cannot be reduced
# further.
self.propagate_removed(ival)
elif force_split and not ival.children:
# If it already has children it has already been split
assert ival in self.ivals
self.priority_split.append(ival)
def tell_pending(self):
pass
def propagate_removed(self, ival):
def _propagate_removed_down(ival):
ival.removed = True
self.ivals.discard(ival)
for child in ival.children:
_propagate_removed_down(child)
_propagate_removed_down(ival)
def add_ival(self, ival):
for x in ival.points():
# Update the mappings
self.x_mapping[x].add(ival)
if x in self.done_points:
self.tell(x, self.done_points[x])
elif x not in self.pending_points:
self.pending_points.add(x)
self._stack.append(x)
self.ivals.add(ival)
def ask(self, n, tell_pending=True):
"""Choose points for learners."""
if not tell_pending:
with restore(self):
return self._ask_and_tell_pending(n)
else:
return self._ask_and_tell_pending(n)
def _ask_and_tell_pending(self, n):
points, loss_improvements = self.pop_from_stack(n)
n_left = n - len(points)
while n_left > 0:
assert n_left >= 0
try:
self._fill_stack()
except ValueError:
raise RuntimeError("No way to improve the integral estimate.")
new_points, new_loss_improvements = self.pop_from_stack(n_left)
points += new_points
loss_improvements += new_loss_improvements
n_left -= len(new_points)
return points, loss_improvements
def pop_from_stack(self, n):
points = self._stack[:n]
self._stack = self._stack[n:]
loss_improvements = [max(ival.err for ival in self.x_mapping[x])
for x in points]
return points, loss_improvements
def remove_unfinished(self):
pass
def _fill_stack(self):
# XXX: to-do if all the ivals have err=inf, take the interval
# with the lowest rdepth and no children.
force_split = bool(self.priority_split)
if force_split:
ival = self.priority_split.pop()
else:
ival = max(self.ivals, key=lambda x: (x.err, x.a))
assert not ival.children
# If the interval points are smaller than machine precision, then
# don't continue with splitting or refining.
points = ival.points()
if (points[1] - points[0] < points[0] * min_sep
or points[-1] - points[-2] < points[-2] * min_sep):
self.ivals.remove(ival)
elif ival.depth == 3 or force_split:
# Always split when depth is maximal or if refining didn't help
self.ivals.remove(ival)
for ival in ival.split():
self.add_ival(ival)
else:
self.add_ival(ival.refine())
# Remove the interval with the smallest error
# if number of intervals is larger than max_ivals
if len(self.ivals) > self.max_ivals:
self.ivals.remove(min(self.ivals, key=lambda x: (x.err, x.a)))
return self._stack
@property
def npoints(self):
"""Number of evaluated points."""
return len(self.done_points)
@property
def igral(self):
return sum(i.igral for i in self.approximating_intervals)
@property
def err(self):
if self.approximating_intervals:
err = sum(i.err for i in self.approximating_intervals)
if err > sys.float_info.max:
err = np.inf
else:
err = np.inf
return err
def done(self):
err = self.err
igral = self.igral
err_excess = sum(i.err for i in self.approximating_intervals
if i.removed)
return (err == 0
or err < abs(igral) * self.tol
or (err - err_excess < abs(igral) * self.tol < err_excess)
or not self.ivals)
@cache_latest
def loss(self, real=True):
return abs(abs(self.igral) * self.tol - self.err)
def plot(self):
hv = ensure_holoviews()
ivals = sorted(self.ivals, key=attrgetter('a'))
if not self.done_points:
return hv.Path([])
xs, ys = zip(*[(x, y) for ival in ivals
for x, y in sorted(ival.done_points.items())])
return hv.Path((xs, ys))
def _get_data(self):
# Change the defaultdict of SortedSets to a normal dict of sets.
x_mapping = {k: set(v) for k, v in self.x_mapping.items()}
return (self.priority_split,
self.done_points,
self.pending_points,
self._stack,
x_mapping,
self.ivals,
self.first_ival)
def _set_data(self, data):
self.priority_split, self.done_points, self.pending_points, \
self._stack, x_mapping, self.ivals, self.first_ival = data
# Add the pending_points to the _stack such that they are evaluated again
for x in self.pending_points:
if x not in self._stack:
self._stack.append(x)
# x_mapping is a data structure that can't easily be saved
# so we recreate it here
self.x_mapping = defaultdict(lambda: SortedSet([], key=attrgetter('rdepth')))
for k, _set in x_mapping.items():
self.x_mapping[k].update(_set)
```
#### File: adaptive/docs/logo.py
```python
import os
import sys
sys.path.insert(0, os.path.abspath('..')) # to get adaptive on the path
import adaptive
import holoviews
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
from PIL import Image, ImageDraw
holoviews.notebook_extension('matplotlib')
def create_and_run_learner():
def ring(xy):
import numpy as np
x, y = xy
a = 0.2
return x + np.exp(-(x**2 + y**2 - 0.75**2)**2/a**4)
learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
adaptive.runner.simple(learner, goal=lambda l: l.loss() < 0.01)
return learner
def plot_learner_and_save(learner, fname):
fig, ax = plt.subplots()
tri = learner.ip().tri
triang = mtri.Triangulation(*tri.points.T, triangles=tri.vertices)
ax.triplot(triang, c='k', lw=0.8)
ax.imshow(learner.plot().Image.I.data, extent=(-0.5, 0.5, -0.5, 0.5))
ax.set_xticks([])
ax.set_yticks([])
plt.savefig(fname, bbox_inches="tight", transparent=True, dpi=300, pad_inches=-0.1)
def add_rounded_corners(fname, rad):
im = Image.open(fname)
circle = Image.new('L', (rad * 2, rad * 2), 0)
draw = ImageDraw.Draw(circle)
draw.ellipse((0, 0, rad * 2, rad * 2), fill=255)
alpha = Image.new('L', im.size, 255)
w, h = im.size
alpha.paste(circle.crop((0, 0, rad, rad)), (0, 0))
alpha.paste(circle.crop((0, rad, rad, rad * 2)), (0, h - rad))
alpha.paste(circle.crop((rad, 0, rad * 2, rad)), (w - rad, 0))
alpha.paste(circle.crop((rad, rad, rad * 2, rad * 2)), (w - rad, h - rad))
im.putalpha(alpha)
return im
if __name__ == '__main__':
learner = create_and_run_learner()
fname = 'source/_static/logo_docs.png'
plot_learner_and_save(learner, fname)
im = add_rounded_corners(fname, rad=200)
im.thumbnail((200, 200), Image.ANTIALIAS) # resize
im.save(fname)
``` |
{
"source": "JHoogendijk/Differences-in-reaction-speed-when-reacting-to-changes-in-rotation-and-changes-in-contrast",
"score": 3
} |
#### File: JHoogendijk/Differences-in-reaction-speed-when-reacting-to-changes-in-rotation-and-changes-in-contrast/__init__.py
```python
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from scipy import stats
import logging, sys, json
logging.basicConfig(stream=sys.stderr)
db = SQLAlchemy()
def serialize_list(e):
d = dict()
i = 0
for item in e:
d[i] = item.serialize
i = i + 1
return d
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
participant_id = db.Column(db.Integer, db.ForeignKey('participant.id'), primary_key=True)
participant = db.relationship('Participant', foreign_keys=[participant_id], backref="data")
reaction_time = db.Column(db.Integer)
def __init__(self, id, participant_id, reaction_time):
self.participant_id = participant_id
self.reaction_time = reaction_time
self.id = id
@property
def serialize(self):
return {
"id":self.id,
"reaction_time":self.reaction_time
}
class Participant(db.Model):
id = db.Column(db.Integer, primary_key=True)
gender = db.Column(db.Text)
age = db.Column(db.Integer)
monitor = db.Column(db.Text)
average_time = db.Column(db.Integer)
def __init__(self, gender, age, monitor, average_time):
self.gender = gender
self.age = age
self.monitor = monitor
self.average_time = average_time
@property
def serialize(self):
return {
"id":self.id,
"gender":self.gender,
"age":self.age,
"monitor":self.monitor,
"average_time":self.average_time,
"data":serialize_list(self.data)
}
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./experiment.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
app.app_context().push()
@app.route("/")
def hello():
return app.send_static_file('index.html')
@app.route("/js/app.js")
def appfile():
return app.send_static_file('js/app.js')
@app.route("/save", methods=['POST'])
def save():
data = request.json
results = data.get("results")
participant = Participant(data.get("gender"), data.get("age"), data.get("monitor"), data.get("averageTime"))
db.session.add(participant)
db.session.commit()
i=0
for result in results:
db.session.add(Data(i, participant.id, result))
i = i + 1
db.session.commit()
all_participants = Participant.query.all()
averages = []
for participant in all_participants:
averages.append(participant.average_time)
percentile = 100-stats.stats.percentileofscore(averages, participant.average_time, kind="mean")
return jsonify({'percentile':percentile})
@app.route("/getData")
def getData():
return jsonify(serialize_list(Participant.query.all()))
@app.route("/performTTest")
def perform_ttest():
orientation_data = []
contrast_data = []
all_participants = Participant.query.all()
for participant in all_participants:
for x in range(0, 5):
if participant.data[x].reaction_time != 0 and participant.data[x+5].reaction_time != 0:
orientation_data.append(participant.data[x].reaction_time)
contrast_data.append(participant.data[x+5].reaction_time)
statistic, pvalue = stats.ttest_rel(orientation_data, contrast_data)
return jsonify({
"statistic":statistic,
"pvalue":pvalue
})
@app.route("/getNormalizedData")
def get_normalized_data():
orientation_data = []
contrast_data = []
all_participants = Participant.query.all()
for participant in all_participants:
for x in range(0, 5):
if participant.data[x].reaction_time != 0 and participant.data[x+5].reaction_time != 0:
orientation_data.append(participant.data[x].reaction_time)
contrast_data.append(participant.data[x+5].reaction_time)
return jsonify({
"orientation":json.dumps(orientation_data),
"contrast":json.dumps(contrast_data)
})
@app.route("/getDataExcel")
def get_excel_data():
orientation_data = []
contrast_data = []
all_participants = Participant.query.all()
for participant in all_participants:
for x in range(0, 5):
orientation_data.append(participant.data[x].reaction_time)
contrast_data.append(participant.data[x+5].reaction_time)
return jsonify({
"orientation":json.dumps(orientation_data),
"contrast":json.dumps(contrast_data)
})
if __name__ == "__main__":
app.run()
``` |
{
"source": "jhoogmoed/HumanThreatPerception",
"score": 3
} |
#### File: htpmKitti/model/compare.py
```python
from math import isnan
import os
import shutil
import numpy as np
import random
from numpy.lib.function_base import average
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
import cv2
# import simpledorff
from pandas.core.frame import DataFrame
from sklearn import decomposition, datasets
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import cohen_kappa_score
class analyse:
def __init__(self, dataPath, drive, mergedDataFile, modelDataFile, results_folder):
# Docstring
"Class container for analysing functions."
self.modelDataFile = modelDataFile
# Set paths
self.dataPath = dataPath
self.drive = drive
self.results_folder = results_folder
# Load data
self.merge_data = pd.read_csv(mergedDataFile)
print('Created analyse class')
def get_responses(self):
# Get response per frame
indices = []
all_responses = []
for key in self.merge_data.keys():
for stimulus_index in range(0, len(self.merge_data.keys())):
response_column = 'Stimulus %s: response' % stimulus_index
if response_column in key:
indices.append(int(stimulus_index))
all_responses.append(self.merge_data[response_column])
# Create response DataFrame
self.response_data = pd.DataFrame(all_responses, index=indices)
self.response_data.sort_index(inplace=True)
self.response_data.columns = self.merge_data['Meta:worker_code']
self.response_data = self.response_data.transpose()
# Get mean and std of responses
self.response_mean = self.response_data.mean(skipna=True)
self.response_std = self.response_data.std(skipna=True)
# Get normalized response
self.response_normal = (
self.response_data-self.response_mean.mean())/self.response_std.mean()
# Save responses
response_data = pd.DataFrame(self.response_data)
response_data.to_csv(self.results_folder +
'filtered_responses/' + 'response_data.csv')
# Get anonymous data
survey_data = pd.DataFrame(
self.merge_data.loc[:, 'about_how_many_kilometers_miles_did_you_drive_in_the_last_12_months':'which_input_device_are_you_using_now'])
survey_data.index = survey_data['Meta:worker_code']
survey_data.pop('Meta:worker_code')
self.survey_data = survey_data.join(response_data)
self.survey_data.to_csv(self.results_folder +
'filtered_responses/' + 'survey_data.csv')
# print(survey_data)
print('Got responses')
def find_outliers(self, thresh):
# Find outliers
self.bad_indices = []
for index in self.response_data.index:
if(self.response_data.loc[index].std(skipna=True) < thresh):
self.bad_indices.append(index)
self.response_data = self.response_data.drop(index)
print('Found outliers')
def info(self):
# Get mean and std of responses
self.response_mean = self.response_data.mean(skipna=True)
self.response_std = self.response_data.std(skipna=True)
# Get general info on data
self.data_description = self.response_data.describe()
self.data_description.to_csv(
self.results_folder + 'filtered_responses/' + 'self.data_description.csv')
# print(self.data_description)
print('Got info')
def split(self, useNorm=False):
# Chose usage of normalized data
if useNorm == True:
data = self.response_normal
else:
data = self.response_data
# Get mean and std of responses
self.response_mean = data.mean(skipna=True)
self.response_std = data.std(skipna=True)
# Find middle
middle = int(round(len(self.response_data)/2))
# Get first half of data
self.response_data_first = data[0:middle]
self.response_mean_first = self.response_data_first.mean(skipna=True)
self.response_std_first = self.response_data_first.std(skipna=True)
# Get last half of data
self.response_data_last = data[(middle+1):len(data)]
self.response_mean_last = self.response_data_last.mean(skipna=True)
self.response_std_last = self.response_data_last.std(skipna=True)
# Get correlation of first and last half
r_fl = self.response_mean_first.corr(self.response_mean_last)
r2_fl = r_fl*r_fl
# print('{:<25}'.format('autocorrelation') + ': R^2 =',f'{r2_fl:.5f}')
# print('{:<25}'.format('autocorrelation') + ': R^2 =',f'{r2_fl:.5f}')
# Plot correlation of first and last half
plt.figure(figsize=(10,4))
self.plot_correlation(self.response_mean_first, self.response_mean_last,
None, None,
'Group 1', 'Group 2', 'Autocorrelation', r=round(r_fl, 5))
# self.training_data = self.response_data
middle_frame = int(round(self.response_data.shape[1])/2)
self.data_training = self.response_data.iloc[:, 0:middle_frame]
self.data_testing = self.response_data.iloc[:,
middle_frame:self.response_data.shape[1]]
plt.tight_layout()
# plt.show()
print('Split data')
def random(self, useNorm=False, seed=100):
# Chose usage of normalized data
if useNorm == True:
data = self.response_normal
else:
data = self.response_data
# Get mean and std of responses
data_response_mean = data.mean(skipna=True)
data_response_std = data.std(skipna=True)
# Chose random person
random.seed(seed)
random_person = random.randrange(0, len(data), 1)
self.single_response = data.iloc[random_person]
# Get correlation of data and random person
correlations = []
for i in range(0, len(data)):
single_response = data.iloc[i]
r = single_response.corr(data_response_mean)
correlations.append(r)
r_sm = self.single_response.corr(data_response_mean)
r2_sm = r_sm*r_sm
print('{:<30}'.format('Single random') + ': N' +
'{:<4}'.format(str(random_person)) + " vs Human R^2 = " + str(r_sm))
# Plot correlation of data and random person
# self.plot_correlation(self.single_response, data_response_mean,
# data_response_std, [],
# ('Person n'+str(random_person)), 'Response mean', 'random', r2_sm)
print('Got random correlation')
pd_corr = pd.DataFrame(correlations)
pd_corr.to_csv(self.results_folder + 'filtered_responses/' + 'individual_corr.csv')
# plt.figure()
# plt.boxplot(pd_corr)
pd_corr.plot.box(vert=False, figsize=(10,2))
parts = plt.vlines(0.5,1,1)
model = plt.vlines(0.58568,0.8,1.2,colors='orange')
# plt.title("Participant correlation")
# plt.ylabel("Participants")
plt.xlabel("Correlation")
plt.legend([parts, model],['Participants', 'Model'])
plt.grid()
plt.yticks([])
plt.tight_layout()
plt.savefig(self.results_folder + 'filtered_responses/' +
'individual_corr' + '.png')
# print(correlations)
print("Average over correlation: {}, stdev: ".format(pd_corr.median()))
def model(self, plotBool=True):
self.model_data = pd.read_csv(
self.results_folder + 'model_responses/' + self.modelDataFile)
# Get keys
self.parameter_keys = list(self.model_data)
self.parameter_keys.remove('general_frame_number')
self.model_data.pop('general_frame_number')
for parameter in self.parameter_keys:
# Get correlation
r = self.model_data[parameter].corr(self.response_mean)
# Print correlation
print('{:<25}'.format(parameter) + ': r =',f'{r:.5f}')
# Save figure correlation
if plotBool == True:
self.plot_correlation(self.model_data[parameter], self.response_mean,
None, self.response_std,
str(parameter), 'response_mean', parameter, r=round(r, 5))
# Check model cronbach alpha
# self.cronbach_alpha(self.model_data[['model_type', 'model_imminence', 'model_probability']])
# Add mean response to correlation matrix
self.model_data['response_mean_last'] = self.response_mean_last
# Get correlation matrix
corrMatrix = self.model_data.corr(method='pearson')
# corrMatrix = corrMatrix.sort_values(by='response_mean')
# Remove uppper triangle
mask = np.zeros_like(corrMatrix)
mask[np.triu_indices_from(mask, k=1)] = True
# Get eigenvalues and vectors
# Number of params
n = len(self.parameter_keys)
v = np.linalg.eig(corrMatrix.iloc[4:n, 4:n])
v_sum = np.sum(v[0])
v_csum = np.cumsum(v[0])
v_ccurve = v_csum/v_sum
v_cutoff = len(v_ccurve[(v_ccurve <= 0.8)])+1
# print(v_cutoff)
plt.clf()
plt.plot(v[0], marker="o")
plt.plot(np.ones(len(v[0])))
# plt.title('Scree plot')
plt.xlabel('Component')
plt.ylabel('Eigenvalue')
plt.grid()
# Save figure
plt.savefig(self.results_folder +
'regression/' + 'scree_plot' + '.png')
plt.clf()
plt.plot(v_ccurve, marker ="o")
# plt.title('Cumulative eigenvalue curve')
plt.xlabel('Component')
plt.ylabel('Cumulative eigenvalue')
plt.grid()
# Save figure
plt.savefig(self.results_folder +
'regression/' + 'ccum_eigen_plot' + '.png')
# Get significant params
p_keys = self.model_data.keys()[4:n]
# print(p_keys)
significant_parameters = set([])
# print(v_cutoff)
loading = v[1] * [v**0.5 for v in v[0]]
for column in range(0, v_cutoff):
for row in range(0, len(v[1])):
if (abs(v[1][row, column]) >= 0.4):
# if (abs(loading[row, column]) >= 0.8):
# if (row <= 3):
# pass
# else:
significant_parameters.add(p_keys[row])
self.sig_params = list(significant_parameters)
# Plot corr of sigs
# plt.clf()
# sn.heatmap(self.model_data[self.].corr(method='pearson'),vmax = 1,vmin = -1,cmap = 'RdBu_r', linewidths=.5, annot=True,yticklabels=self.)
# plt.title('Correlations of significant parameters')
# plt.show()
# Get eigenvector heatmap
# plt.figure()
# sn.heatmap(loading, vmax = 1,vmin = -1,cmap = 'RdBu_r', linewidths=.5, annot=True,yticklabels=p_keys,fmt='.2f')
# # plt.title('Eigenvectors')
# plt.xlabel('Loading of principle component')
# plt.ylabel('Values')
# plt.show()
# Save figure
# plt.savefig(self.results_folder + 'regression/' + 'eigenvector_matrix' + '.png')
# Plot correlation matrix
if (plotBool == True):
plt.clf()
sn.heatmap(corrMatrix, vmax=1, vmin=-1,
cmap='RdBu_r', linewidths=.5, annot=True)
# plt.show()
print('Got model data and correlations')
else:
pass
r = self.model_data['model_combination'].corr(self.response_mean)
return r**2
def risky_images(self, model=False):
# Get most risky and least risky images
if (model == True):
response_model_sorted = self.model_data['model_combination'].sort_values(
)
least_risky = response_model_sorted.index[0:5]
most_risky = response_model_sorted.tail(5).index[::-1]
else:
response_model_sorted = self.response_mean.sort_values()
least_risky = response_model_sorted.index[0:5]
most_risky = response_model_sorted.tail(5).index[::-1]
# Save most and least risky images
i = 1
for image in least_risky:
# os.path.join(self.dataPath + self.drive + '/image_02/data')
shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(image) + '.png',
self.results_folder + 'most_least_risky_images/' + 'least_risky_%s.png' % i)
i += 1
i = 1
for image in most_risky:
# os.path.join(self.dataPath + self.drive + '/image_02/data')
shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(image) + '.png',
self.results_folder + 'most_least_risky_images/' + 'most_risky_%s.png' % i)
i += 1
print('Got risky images')
def risk_ranking(self):
# Sort list of mean response values
response_mean_sorted = self.response_mean.sort_values()
# i = 0
# for image in response_mean_sorted.index:
# shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(
# image) + '.png', self.results_folder + 'risk_sorted_images/' + '%s.png' % i)
# i += 1
# Sort list of model combination values
response_model_sorted = pd.Series(
self.model_data['model_combination']).sort_values()
# i = 0
# for image in response_model_sorted.index:
# shutil.copyfile(self.dataPath + self.drive + '/image_02/data/' + str(image) +
# '.png', self.results_folder + 'risk_sorted_images/model' + '%s.png' % i)
# i += 1
r = round(np.corrcoef(self.response_mean, self.model_data['model_combination'])[1][0],4)
self.plot_correlation(self.response_mean, self.model_data['model_combination'], name1="Experiment result", name2="Model result", parameter="model_experiment", r=r)
print(np.corrcoef(response_mean_sorted.index,response_model_sorted.index))
print('Ranked images on risk')
def PCA(self):
print("Starting PCA analysis")
images = sorted(os.listdir(
self.dataPath + self.drive + '/image_02/data/'))
images_features_gray = []
images_features_blue = []
images_features_green = []
images_features_red = []
for image in images:
image_features_gray = []
image_features_blue = []
image_features_green = []
image_features_red = []
full_path = self.dataPath + self.drive + '/image_02/data/' + image
loaded_image = cv2.imread(full_path)
gray = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
blue = loaded_image[:, :, 0]
green = loaded_image[:, :, 1]
red = loaded_image[:, :, 2]
scaling = 1./2
gray_scaled = cv2.resize(gray, (0, 0), fx=(scaling), fy=(scaling))
blue_scaled = cv2.resize(blue, (0, 0), fx=(scaling), fy=(scaling))
green_scaled = cv2.resize(
green, (0, 0), fx=(scaling), fy=(scaling))
red_scaled = cv2.resize(red, (0, 0), fx=(scaling), fy=(scaling))
scaled_shape = gray_scaled.shape
for horizontal in gray_scaled:
image_features_gray = image_features_gray + list(horizontal)
images_features_gray.append(image_features_gray)
for horizontal in blue_scaled:
image_features_blue = image_features_blue + list(horizontal)
images_features_blue.append(image_features_blue)
for horizontal in green_scaled:
image_features_green = image_features_green + list(horizontal)
images_features_green.append(image_features_green)
for horizontal in red_scaled:
image_features_red = image_features_red + list(horizontal)
images_features_red.append(image_features_red)
# PCA decomposition
print("Running decomposition")
nc = 50 # number of model variables
pca = decomposition.PCA(n_components=nc)
std_gray = StandardScaler()
gray_std = std_gray.fit_transform(images_features_gray)
gray_pca = pca.fit_transform(gray_std)
eigen_frames_gray = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
std_blue = StandardScaler()
blue_std = std_blue.fit_transform(images_features_blue)
blue_pca = pca.fit_transform(blue_std)
eigen_frames_blue = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
std_green = StandardScaler()
green_std = std_green.fit_transform(images_features_green)
green_pca = pca.fit_transform(green_std)
eigen_frames_green = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
std_red = StandardScaler()
red_std = std_red.fit_transform(images_features_red)
red_pca = pca.fit_transform(red_std)
eigen_frames_red = np.array(pca.components_.reshape(
(nc, scaled_shape[0], scaled_shape[1])))
# # Back tranform for check
# back_transform = pca.inverse_transform(gray_pca)
# back_transform_renormalize = std_gray.inverse_transform(back_transform)
# # Show before and after
# first_image = np.array(images_features[0]).reshape(scaled_shape)
# cv2.imshow('Before PCA',first_image)
# cv2.waitKey(0)
# # second_image = np.array(back_transform_renormalize[0]).reshape(scaled_shape)
# cv2.imshow('After PCA',second_image)
# cv2.waitKey(0)
gray_pca_df = pd.DataFrame(gray_pca)
blue_pca_df = pd.DataFrame(blue_pca)
green_pca_df = pd.DataFrame(green_pca)
red_pca_df = pd.DataFrame(red_pca)
self.pca = gray_pca_df
r = round(gray_pca_df[2].corr(self.response_mean),5)
self.plot_correlation(gray_pca_df[2],self.response_mean_last,name1='Gray pca component 2',name2='response_mean_last',r=r)
print("Saving images")
for i in range(0, nc):
print('Feature: ', i)
print('Gray correlation: ',
gray_pca_df[i].corr(self.response_mean))
print('Blue correlation: ',
blue_pca_df[i].corr(self.response_mean))
print('Green correlation: ',
green_pca_df[i].corr(self.response_mean))
print('Red correlation: ', red_pca_df[i].corr(self.response_mean))
max_pixel_gray = np.max(abs(eigen_frames_gray[i]))
max_pixel_blue = np.max(abs(eigen_frames_blue[i]))
max_pixel_green = np.max(abs(eigen_frames_green[i]))
max_pixel_red = np.max(abs(eigen_frames_red[i]))
gray_channel = eigen_frames_gray[i]*1/max_pixel_gray*255
blue_channel = eigen_frames_blue[i]*1/max_pixel_blue*255
green_channel = eigen_frames_green[i]*1/max_pixel_green*255
red_channel = eigen_frames_red[i]*1/max_pixel_red*255
bgr_image = np.zeros((scaled_shape[0], scaled_shape[1], 3))
bgr_image[:, :, 0] = blue_channel
bgr_image[:, :, 1] = green_channel
bgr_image[:, :, 2] = red_channel
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('color ' + str(i)+'.png')), bgr_image)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('gray ' + str(i)+'.png')), gray_channel)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('blue ' + str(i)+'.png')), blue_channel)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('green' + str(i)+'.png')), green_channel)
cv2.imwrite(os.path.join(self.results_folder, 'pca',
('red ' + str(i)+'.png')), red_channel)
print('Performed PCA')
def multivariate_regression(self, pred='default'):
# train = pd.DataFrame(self.pca, columns= ['0','1','2','3','4','5','6','7','8','9','10','11','12','13'])
# train = self.pca.iloc[0:middle]
# test = self.pca.iloc[middle:len(self.pca)]
lr = LinearRegression(normalize=False, copy_X=True)
# lr = LinearRegression()
if (pred == 'default'):
predictor_keys = ['general_velocity', 'general_distance_mean',
'general_number_bjects', 'manual_breaklight', 'occluded_mean']
elif(pred == 'sig'):
predictor_keys = self.sig_params
elif(pred == 'all'):
predictor_keys = self.model_data.keys()
else:
print('Wrong input, changing to default')
predictor_keys = ['general_velocity', 'general_distance_mean',
'general_number_bjects', 'manual_breaklight', 'occluded_mean']
predictors = self.model_data[predictor_keys]
sc = StandardScaler()
predictors_stand = sc.fit_transform(predictors)
middle = int(round(predictors.shape[0]/2))
print(predictors_stand[middle])
# Fit regression
print("Fitting regression model")
print(self.sig_params)
lr.fit(predictors_stand[0:middle], self.response_mean[0:middle])
predictions = lr.predict(predictors_stand[middle:predictors.shape[0]])
# print(predictors[0:middle])
# print(lr.predict(predictors[0:middle]))
data = predictors_stand[0:middle] * lr.coef_
results = pd.DataFrame(data, columns=predictor_keys)
results.insert(0, "intercept", lr.intercept_)
results.to_csv(self.results_folder +
'regression/' + 'regression.csv')
data2 = predictors_stand[middle:predictors.shape[0]] * lr.coef_
results2 = pd.DataFrame(data2, columns=predictor_keys)
results2.insert(0, "intercept", lr.intercept_)
results2.to_csv(self.results_folder +
'regression/' + 'regression2.csv')
# Analyse result
r = np.corrcoef(
self.response_mean[middle:predictors.shape[0]], predictions)[0, 1]
print('Correlation = {}'.format(r))
self.plot_correlation(predictions, self.response_mean[middle:len(
self.response_mean)], name1="Multivariate regression", name2="Response test", parameter="regression_multivariate", r=round(r, 5))
print('Lr coef: {}'.format(lr.coef_))
print('Lr coef deep: {}'.format(lr.coef_[0]))
# self.cronbach_alpha(self.model_data[predictor_keys])
print('Performed multivariate regression')
def risk_accidents(self, plotBool=False):
# Get accident answers
accident_occurence = self.merge_data['how_many_accidents_were_you_involved_in_when_driving_a_car_in_the_last_3_years_please_include_all_accidents_regardless_of_how_they_were_caused_how_slight_they_were_or_where_they_happened']
# Filter no responses
accident_occurence = [-1 if value ==
'i_prefer_not_to_respond' else value for value in accident_occurence]
accident_occurence = [
6 if value == 'more_than_5' else value for value in accident_occurence]
accident_occurence = [value if value == np.nan else float(
value) for value in accident_occurence]
# Group by accidents
n_bins = 20
bins = np.linspace(0, 100, n_bins+1)
binned = []
for value in self.response_data.mean(axis=1):
for b in bins:
if (value <= b):
binned.append(b)
# print("Value:{} < bin:{}".format(value,b))
break
# Get accident occurence
average_score = list(self.response_data.mean(axis=1))
risk_accidents = pd.DataFrame(
{'Accidents': accident_occurence, 'Average_score': average_score})
r = risk_accidents.corr().values[0, 1]
self.plot_correlation(pd.Series(accident_occurence), pd.Series(
average_score), name2='Accidents', name1='Average score', parameter='accident_score', r=round(r, 5))
risk_accidents_grouped = []
for i in range(8):
risk_accidents_grouped.append([])
for i in range(len(accident_occurence)):
# print('i = {}, and value = {}'.format(i,close_occurence[i]))
risk_accidents_grouped[int(accident_occurence[i])].append(
average_score[i])
# Risk close riding
close_occurence = self.merge_data['how_often_do_you_do_the_following_driving_so_close_to_the_car_in_front_that_it_would_be_difficult_to_stop_in_an_emergency']
# Filter no responses
close_occurence = [
0 if value == 'i_prefer_not_to_respond' else value for value in close_occurence]
close_occurence = [
1 if value == '0_times_per_month' else value for value in close_occurence]
close_occurence = [
2 if value == '1_to_3_times_per_month' else value for value in close_occurence]
close_occurence = [
3 if value == '4_to_6_times_per_month' else value for value in close_occurence]
close_occurence = [
4 if value == '7_to_9_times_per_month' else value for value in close_occurence]
close_occurence = [
5 if value == '10_or_more_times_per_month' else value for value in close_occurence]
close_occurence = [value if value == np.nan else float(
value) for value in close_occurence]
close_occurence_grouped = []
for i in range(6):
close_occurence_grouped.append([])
for i in range(len(close_occurence)):
close_occurence_grouped[int(close_occurence[i])].append(
average_score[i])
r = np.corrcoef(close_occurence, average_score)[0, 1]
self.plot_correlation(pd.Series(close_occurence), pd.Series(
average_score), name2='Close driving', name1='Average score', parameter='close_score', r=round(r, 5))
# Disregard speedlimit
speed_occurence = self.merge_data['how_often_do_you_do_the_following_disregarding_the_speed_limit_on_a_residential_road']
# Filter no response
speed_occurence = [
0 if value == 'i_prefer_not_to_respond' else value for value in speed_occurence]
speed_occurence = [
1 if value == '0_times_per_month' else value for value in speed_occurence]
speed_occurence = [
2 if value == '1_to_3_times_per_month' else value for value in speed_occurence]
speed_occurence = [
3 if value == '4_to_6_times_per_month' else value for value in speed_occurence]
speed_occurence = [
4 if value == '7_to_9_times_per_month' else value for value in speed_occurence]
speed_occurence = [
5 if value == '10_or_more_times_per_month' else value for value in speed_occurence]
speed_occurence = [value if value == np.nan else float(
value) for value in speed_occurence]
speed_occurence_grouped = []
for i in range(6):
speed_occurence_grouped.append([])
for i in range(len(speed_occurence)):
speed_occurence_grouped[int(speed_occurence[i])].append(
average_score[i])
r = np.corrcoef(speed_occurence, average_score)[0, 1]
self.plot_correlation(pd.Series(speed_occurence), pd.Series(
average_score), name2='Speeding', name1='Average score', parameter='speed_score', r=round(r, 5))
# Disregard phone
phone_occurence = self.merge_data['how_often_do_you_do_the_following_using_a_mobile_phone_without_a_hands_free_kit']
# Filter no response
phone_occurence = [
0 if value == 'i_prefer_not_to_respond' else value for value in phone_occurence]
phone_occurence = [
1 if value == '0_times_per_month' else value for value in phone_occurence]
phone_occurence = [
2 if value == '1_to_3_times_per_month' else value for value in phone_occurence]
phone_occurence = [
3 if value == '4_to_6_times_per_month' else value for value in phone_occurence]
phone_occurence = [
4 if value == '7_to_9_times_per_month' else value for value in phone_occurence]
phone_occurence = [
5 if value == '10_or_more_times_per_month' else value for value in phone_occurence]
phone_occurence = [value if value == np.nan else float(
value) for value in phone_occurence]
phone_occurence_grouped = []
for i in range(6):
phone_occurence_grouped.append([])
for i in range(len(phone_occurence)):
phone_occurence_grouped[int(phone_occurence[i])].append(
average_score[i])
r = np.corrcoef(phone_occurence, average_score)[0, 1]
self.plot_correlation(pd.Series(phone_occurence), pd.Series(
average_score), name2='Phone driving', name1='Average score', parameter='phone_score', r=round(r, 5))
# Result correlation matrix
inter_group_df = pd.DataFrame([speed_occurence, phone_occurence, accident_occurence, close_occurence])
inter_group_df = inter_group_df.transpose()
inter_group_df.columns = ['speed_occurence', 'phone_occurence', 'accident_occurence', 'close_occurence']
inter_group_corr = inter_group_df.corr()
plt.clf()
sn.heatmap(inter_group_corr, vmax=1, vmin=-1,
cmap='RdBu_r', linewidths=.5, annot=True)
# plt.show()
r = np.corrcoef(speed_occurence, accident_occurence)[0, 1]
self.plot_correlation(pd.Series(speed_occurence), pd.Series(
accident_occurence), name2='Speed driving', name1='Accidents', parameter='speed_accident', r=round(r, 5))
r = np.corrcoef(phone_occurence, accident_occurence)[0, 1]
self.plot_correlation(pd.Series(phone_occurence), pd.Series(
average_score), name2='Phone driving', name1='Accidents', parameter='phone_accident', r=round(r, 5))
r = np.corrcoef(close_occurence, accident_occurence)[0, 1]
self.plot_correlation(pd.Series(close_occurence), pd.Series(
accident_occurence), name2='close driving', name1='Accidents', parameter='close_accident', r=round(r, 5))
# print(survey_results)
if plotBool:
plt.figure()
plt.boxplot(risk_accidents_grouped, labels=[
'no reply', '0', '1', '2', '3', '4', '5', 'more than 5'])
plt.title("Accident risk")
plt.xlabel("Accident occurence")
plt.ylabel("Risk score")
plt.savefig(self.results_folder + 'survey_images/' +
'risk_accidents_box' + '.png')
plt.figure()
plt.boxplot(close_occurence_grouped, labels=[
'no reply', '0', '1-3', '4-6', '7-9', '10 or more'])
plt.title("Keeping distance driving risk")
plt.xlabel("Disregard occurence")
plt.ylabel("Risk score")
plt.savefig(self.results_folder + 'survey_images/' +
'close_occurence_box' + '.png')
plt.figure()
plt.boxplot(speed_occurence_grouped, labels=[
'no reply', '0', '1-3', '4-6', '7-9', '10 or more'])
plt.title("Speed disregard driving risk")
plt.xlabel("Disregard occurence")
plt.ylabel("Risk score")
plt.savefig(self.results_folder + 'survey_images/' +
'speed_occurence_box' + '.png')
plt.figure()
plt.boxplot(phone_occurence_grouped, labels=[
'no reply', '0', '1-3', '4-6', '7-9', '10 or more'])
plt.title("Handsfree disregard driving risk")
plt.xlabel("Disregard occurence")
plt.ylabel("Risk score")
plt.savefig(self.results_folder + 'survey_images/' +
'phone_occurence_box' + '.png')
plt.figure()
plt.hist(average_score, bins=n_bins, rwidth=0.9)
plt.title("Average score responses")
plt.xlabel("Average risk score")
plt.ylabel("Occurences")
plt.savefig(self.results_folder + 'survey_images/' +
'avg_response' + '.png')
# plt.show()
# plt.figure()
# plt.hist2d(accident_occurence,average_score,alpha=0.9)
# plt.title("Accident risk")
# plt.xlabel("Accident occurence")
# plt.ylabel("Risk score")
print('Saved survey images')
def road_velocity(self):
road_data = self.model_data[self.model_data['road_road']==1]
residential_data = self.model_data[self.model_data['road_residential']==1]
city_data = self.model_data[self.model_data['road_city']==1]
road_mean = self.response_mean[road_data.index]
residential_mean = self.response_mean[residential_data.index]
city_mean = self.response_mean[city_data.index]
r_road = road_data['general_velocity'].corr(road_mean)
r_residential = residential_data['general_velocity'].corr(residential_mean)
r_city = city_data['general_velocity'].corr(city_mean)
self.plot_correlation(road_data['general_velocity'],road_mean,name1="velocity_road",name2="response_mean", r=round(r_road,5), parameter="road_velocity")
self.plot_correlation(residential_data['general_velocity'],residential_mean,name1="velocity_residential",name2="response_mean", r=round(r_residential,5), parameter="residential_velocity")
self.plot_correlation(city_data['general_velocity'],city_mean,name1="velocity_city",name2="response_mean", r=round(r_city,5), parameter="city_velocity")
def sorted_mu_sig(self):
mu = self.response_mean
std = self.response_std
sorting = mu.sort_values()
# Plot linear fit
plt.figure(figsize=(10,3))
plt.plot(range(0,210), mu[sorting.index])
plt.plot(range(0,210), std[sorting.index])
plt.grid()
plt.legend(["mean", "std"])
plt.xlabel("Image")
plt.ylabel("Perceived risk")
plt.tight_layout()
# plt.show()
# Save figure
plt.savefig(self.results_folder +
'survey_analysis/' + "mu_std" + '.png')
def cronbach_alpha(self, df):
# 1. Transform the df into a correlation matrix
df_corr = df.corr()
# 2.1 Calculate N
# The number of variables equals the number of columns in the df
N = df.shape[1]
# 2.2 Calculate R
rs = np.array([])
for i, col in enumerate(df_corr.columns):
sum_ = df_corr[col][i+1:].values
rs = np.append(sum_, rs)
mean_r = np.mean(rs)
# 3. Use the formula to calculate Cronbach's Alpha
cronbach_alpha = (N * mean_r) / (1 + (N - 1) * mean_r)
# print(rs)
print('Cronbach alpha: {}'.format(cronbach_alpha))
print('Calculated cronbach alpha')
def krippendorffs_alpha(self):
# print(self.response_data.transpose())
# annotation_triples = []
# for i in range(0, len(self.response_data)):
# annotator = self.response_data.index[i]
# for j in range(0,len(self.response_data.columns)):
# image = str(j)
# response = round(self.response_data.values[i][j],1)
# annotation_triples.append((annotator, image, response))
# input_data = pd.DataFrame(annotation_triples,columns=["annotator_id", "document_id", "annotation"])
# print(input_data)
# alpha = simpledorff.calculate_krippendorffs_alpha_for_df(input_data,experiment_col='document_id',
# annotator_col='annotator_id',
# class_col='annotation')
# alpha = simpledorff.calculate_krippendorffs_alpha(self.response_data)
# print(alpha)
cohen_kappa_score(self.response_data)
# ratingtask = agreement.AnnotationTask(data=annotation_triples)
# print('Krippendorff\'s alpha:',ratingtask.alpha())
# print('Scott\'s pi:',ratingtask.pi())
def plot_correlation(self, series1, series2,
std1=None,
std2=None,
name1='Series 1', name2='Series 2',
parameter='Parameter', r2=np.nan, r = np.nan):
# Plot errobar figure
plt.figure(figsize=(10,4))
# plt.errorbar(series1, series2, std2, std1, linestyle='None',
# marker='.', markeredgecolor='green')
# plt.errorbar(series1, series2, linestyle='None',
# marker='.', markeredgecolor='green')
plt.errorbar(series1, series2, linestyle='None',
marker='.')
# if(not isnan(r2)):
# plt.title(name1 + ' vs. ' + name2 + " | R^2 = %s" % r2)
# elif(not isnan(r)):
# plt.title(name1 + ' vs. ' + name2 + " | r = %s" % r)
# else:
# plt.title(name1 + ' vs. ' + name2)
# Create linear fit of model and responses
linear_model = np.polyfit(series1, series2, 1)
linear_model_fn = np.poly1d(linear_model)
x_s = np.arange(series1.min(), series1.max(),
((series1.max()-series1.min())/1000))
# Plot linear fit
plt.plot(x_s, linear_model_fn(x_s))
plt.legend(["r = {}".format(r), 'images'])
plt.grid()
plt.xlabel(name1)
plt.ylabel(name2)
plt.tight_layout()
# Save figure
plt.savefig(self.results_folder +
'correlation_images/' + parameter + '.png')
if __name__ == "__main__":
dataPath = '/dataset'
drive = '/test_images'
resultsFolder = '/media/jim/HDD/university/master/thesis/results/'
mergedDataFile = '/media/jim/HDD/university/master/thesis/results/filtered_responses/merged_data.csv'
modelDataFile = 'model_results_small_bnds.csv'
# modelDataFile = 'model_results_no_opt.csv'
analyse = analyse(dataPath, drive, mergedDataFile,
modelDataFile, resultsFolder)
analyse.get_responses()
analyse.info()
# analyse.find_outliers(10)
analyse.split()
analyse.random()
# analyse.risk_accidents()
analyse.model(plotBool=False)
# analyse.risky_images(model=False)
# analyse.risk_accidents(plotBool=False)
analyse.risk_ranking()
# analyse.PCA()
analyse.multivariate_regression(pred='sig')
# analyse.multivariate_regression()
# analyse.plot_correlation(analyse.model_data['road_road'],analyse.model_data['general_velocity'])
# analyse.road_velocity()
# analyse.sorted_mu_sig()
# analyse.cronbach_alpha(analyse.response_data)
# analyse.krippendorffs_alpha()
# analyse.plot_correlation(series1=analyse.model_data['general_velocity'],
# series2=analyse.model_data['general_number_bjects'],
# parameter="velocit_nobjects",
# name1='general_velocity',
# name2='general_number_objects',
# r=round(analyse.model_data["general_velocity"].corr(analyse.model_data["general_number_bjects"]),4))
```
#### File: htpmKitti/model/services.py
```python
import os
import numpy as np
# import kitti.tracklet_parser as tracklet_parser
# import kitti.tracklet_parser as tracklet_parser
import collections
import pandas as pd
KittiObject = collections.namedtuple('KittiObject', ['type',
'truncated',
'occluded',
'alpha',
'bbox',
'dimensions',
'location',
'location_y'])
KittiImu = collections.namedtuple(
'KittiImu', ['location', 'linear_velocity', 'linear_acceleration'])
class kitti_parser:
def __init__(self,dataPath,drive,resultsFolder):
# Set base paths
self.dataPath = dataPath
self.drive = drive
self.resultsFolder = resultsFolder
# Check if exists
if(not os.path.exists(self.dataPath+self.drive)):
print("Drive does not exist")
raise SystemExit
# Image pathsdataPath,drive,resultsFolder
try:
self.left_color_image_list = sorted(os.listdir(
self.dataPath + self.drive + '/image_02/data'), key=self.sorter)
except:
print("No image data")
raise SystemExit
# Imu paths
try:
self.imuFileList = sorted(os.listdir(
self.dataPath + self.drive + '/oxts/data/'), key=self.sorter)
except:
print("No oxts data")
raise SystemExit
# Object paths
try:
self.objectFileList = sorted(os.listdir(
self.dataPath + self.drive + '/label_2'), key=self.sorter)
except:
print("No object data, create from xml...")
try:
tracklet_parser.main(self.dataPath, self.drive)
self.objects_list = sorted(os.listdir(
self.dataPath + self.drive + '/label_2'), key=self.sorter)
except:
print("No object xml")
raise SystemExit
# Check variables
self.frame = 0
self.done = 0
# Setup data acquisition
try:
os.remove(os.path.join(self.resultsFolder,
'model_responses/model_results.csv'))
except:
pass
# Get information
self.get_road()
self.get_objects()
self.get_imu()
self.get_manual()
def get_road(self):
self.par_city = []
self.par_residential = []
self.par_road = []
road_file = open(self.dataPath + self.drive +
'/uniform_image_list.txt', "r")
lines = road_file.readlines()
self.road_types = []
for i in range(len(lines)):
road = lines[i].split('/')[0]
self.road_types.append(road)
self.par_city.append((road == 'city')*1)
self.par_residential.append((road == 'residential')*1)
self.par_road.append((road == 'road')*1)
def get_objects(self):
self.objectsList = []
for i in range(len(self.objectFileList)):
# Open file
self.object_file = open(
self.dataPath + self.drive + '/label_2/' + self.objectFileList[i], "r")
# Setup object per frame
objects = []
# Read next line
lines = self.object_file.readlines()
for object in lines:
oArgs = object.split(' ')
type = oArgs[0]
truncated = float(oArgs[1])
occluded = int(oArgs[2])
alpha = float(oArgs[3])
bbox = [float(oArgs[4]),
float(oArgs[5]),
float(oArgs[6]),
float(oArgs[7])]
dimensions = [float(oArgs[8]),
float(oArgs[9]),
float(oArgs[10])]
location = [float(oArgs[11]),
float(oArgs[12]),
float(oArgs[13])]
location_y = float(oArgs[14])
# Append object list of frame
objects.append(KittiObject(type,
truncated,
occluded,
alpha,
bbox,
dimensions,
location,
location_y))
# Close file
self.object_file.close
self.objectsList.append(objects)
def get_imu(self):
self.imuList = []
for file in self.imuFileList:
# Open file
imu_file = open(
self.dataPath + self.drive + '/oxts/data/' + file, "r")
# Create new imu msg
# imuObject = KittiImu
# Get imu data from file
line = imu_file.readline()
imuArgs = line.split(' ')
# Fill new object
location = [
float(imuArgs[0]),
float(imuArgs[1]),
float(imuArgs[2]),
float(imuArgs[5])]
linear_velocity = [
float(imuArgs[8]),
float(imuArgs[9]),
float(imuArgs[10])]
linear_acceleration = [
float(imuArgs[11]),
float(imuArgs[12]),
float(imuArgs[13])]
self.imuList.append(
KittiImu(location, linear_velocity, linear_acceleration))
# Close file
imu_file.close
def get_manual(self):
self.manual_data = pd.read_csv(
self.dataPath + self.drive + '/manual_data.csv')
def sorter(self, name):
frame = int(name.split('.')[0])
return frame
def typeSwitch(self, objType, parameters):
# Switch to type to assign weight based on...
typeSwitch = {
'Car': parameters[0],
'Van': parameters[1],
'Truck': parameters[2],
'Pedestrian': parameters[3],
'Person_sitting': parameters[4],
'Cyclist': parameters[5],
'Tram': parameters[6],
'Misc': parameters[7],
'DontCare': parameters[8],
}
return typeSwitch.get(objType, "Invalid object type")
def roadSwitch(self, roadType, parameters):
# Switch to type to assign weight based on...
roadSwitch = {
'city': parameters[9],
'residential': parameters[10],
'road': parameters[11],
}
return roadSwitch.get(roadType, "Invalid object type")
def fast_type(self, x):
par_type = []
par_alpha = []
par_occluded = []
par_truncated = []
par_size = []
for frame_objects in self.objectsList:
types = []
alpha = []
occluded = []
truncated = []
size = []
for object in frame_objects:
types.append(self.typeSwitch(object.type, x))
alpha.append(abs(object.alpha))
occluded.append(object.occluded)
truncated.append(object.truncated)
size.append(np.prod(object.dimensions))
par_alpha.append(alpha)
par_type.append(sum(types))
par_occluded.append(occluded)
par_truncated.append(truncated)
par_size.append(size)
return par_type, par_alpha, par_occluded, par_truncated,par_size
def fast_imm(self, x):
# Get variables from arguments
a = x[12]
b = x[13]
# Create empty return lists
par_total_distance = []
par_velocity = []
par_imm = []
# Get object and ego vehicle data per frame
for frame in range(len(self.imuFileList)):
# Get ego velocity
velocity = np.linalg.norm(self.imuList[frame].linear_velocity, 2)
# Construct save variables
all_imminence = []
all_distance = []
# Get object data per object in frame
for object in self.objectsList[frame]:
distance = np.linalg.norm(object.location, 2)
# Linear imminence parameter
# imm = a * distance/velocity + b
# Quadratic imminence parameter
if b == 0:
imm = np.nan
else:
imm = a*(distance/velocity)**(1/b)
if imm>50:
imm = 50
# Save paremeter per object
all_imminence.append(imm)
all_distance.append(distance)
# Save parameter values per frame
par_imm.append(sum(all_imminence))
par_velocity.append(velocity)
par_total_distance.append(all_distance)
frame += 1
return par_imm, par_velocity, par_total_distance
def fast_prob(self, x):
probability_par = []
for road in self.road_types:
probability_par.append(self.roadSwitch(road, x))
return probability_par
def get_model(self, x):
# Get individual model results
par_all_imminence, par_velocity, par_all_distance = self.fast_imm(x)
par_type, par_alpha, par_occluded, par_truncated,par_size = self.fast_type(x)
par_probability = self.fast_prob(x)
# Construct empty lists for itereation
par_combi = []
number_objects = []
sum_distance = []
min_distance = []
mean_distance = []
min_alpha = []
mean_alpha = []
max_alpha = []
mean_par_occluded = []
sum_par_occluded = []
mean_par_truncated = []
sum_par_truncated = []
mean_par_size = []
max_par_size = []
min_par_size = []
sum_par_size = []
# Get combined model results
for frame in range(len(par_all_imminence)):
sum_distance.append(sum(par_all_distance[frame]))
min_distance.append(min(par_all_distance[frame], default=0))
# Check for objects present
if len(par_all_distance[frame]) != 0:
number_objects.append(len(par_all_distance[frame]))
mean_distance.append(
sum(par_all_distance[frame])/len(par_all_distance[frame]))
min_alpha.append(min(par_alpha[frame]))
mean_alpha.append(
sum(par_alpha[frame])/len(par_alpha[frame]))
max_alpha.append(max(par_alpha[frame]))
mean_par_occluded.append(sum(par_occluded[frame])/len(par_occluded[frame]))
sum_par_occluded.append(sum(par_occluded[frame]))
mean_par_truncated.append(sum(par_truncated[frame])/len(par_truncated[frame]))
sum_par_truncated.append(sum(par_truncated[frame]))
mean_par_size.append(sum(par_size[frame])/len(par_size[frame]))
max_par_size.append(max(par_size[frame]))
min_par_size.append(min(par_size[frame]))
sum_par_size.append(sum(par_size[frame]))
else:
number_objects.append(0.0)
mean_distance.append(0.0)
min_alpha.append(0.0)
mean_alpha.append(0.0)
max_alpha.append(0.0)
mean_par_occluded.append(0.0)
sum_par_occluded.append(0.0)
mean_par_truncated.append(0.0)
sum_par_truncated.append(0.0)
mean_par_size.append(0.0)
max_par_size.append(0.0)
min_par_size.append(0.0)
sum_par_size.append(0.0)
par_combi.append(par_all_imminence[frame] +
par_type[frame] + par_probability[frame])
# Create empty dict
results = {}
# Add items to dict
results['general_frame_number'] = range(
len(self.left_color_image_list))
results['model_combination'] = par_combi
results['model_type'] = par_type
results['model_imminence'] = par_all_imminence
results['model_probability'] = par_probability
results['general_velocity'] = par_velocity
results['general_distance_sum'] = sum_distance
results['general_distance_min'] = min_distance
results['general_distance_mean'] = mean_distance
results['general_number_bjects'] = number_objects
results['manual_car_toward'] = self.manual_data.CarToward
results['manual_car_away'] = self.manual_data.CarAway
results['manual_breaklight'] = self.manual_data.Breaklight
results['alpha_min'] = min_alpha
results['alpha_mean'] = mean_alpha
results['alpha_max'] = max_alpha
results['occluded_mean'] = mean_par_occluded
results['occluded_sum'] = sum_par_occluded
results['truncated_mean'] = mean_par_truncated
results['truncated_sum'] = sum_par_truncated
results['size_mean'] = mean_par_size
results['size_max'] = max_par_size
results['size_min'] = min_par_size
results['size_sum'] = sum_par_size
results['road_road']= self.par_road
results['road_residential'] = self.par_residential
results['road_city'] = self.par_city
return results
def save_model(self, x,modelFile = 'model_results.csv'):
# Get model response
results = self.get_model(x)
# Create dataframe from dict
resultsDF = pd.DataFrame.from_dict(results)
# save dataframe as csv file
resultsDF.to_csv(os.path.join(self.resultsFolder,
'model_responses',modelFile), index=False)
if __name__ == "__main__":
# Example input
dataPath = '/dataset'
drive = '/test_images'
resultsFolder = '/home/jim/HDDocuments/university/master/thesis/results'
# Construct parser class
kp = kitti_parser(dataPath,drive,resultsFolder)
# Example parameters
# x = [0., 1.458974, 2.63547244, 0.96564807, 2.21222542, 1.65225034, 0., 0., 1.,
# 2.20176468, 2.40070779, 0.1750559,
# 0.20347586, 6.54656438]
x = [0.2, 0.4, 0.6, 1., 0.2, 1., 0.6, 0.2, 0.,
3., 1.5, 0.,
1., 0.1]
# Get model results
results = kp.get_model(x)
# Save model results
kp.save_model(x)
```
#### File: htpmKitti/online/appenFunctions.py
```python
import pandas as pd
class appen:
def __init__(self, csvFile, results_folder):
self.csvFile = csvFile
self.results_folder = results_folder
self.appen_data = pd.read_csv(
self.results_folder + 'online_data/' + csvFile)
def find_cheaters(self, column_type='standard'):
code_name = 'type_the_code_that_you_received_at_the_end_of_the_experiment'
self.unique_appen_data = self.appen_data.drop_duplicates(subset=[
code_name])
if column_type == 'standard':
self.cheater_appen_data = self.appen_data.drop(
self.unique_appen_data.index)
elif column_type == 'daniel':
self.cheater_appen_data = self.appen_data.drop(
self.unique_appen_data.index)
self.cheater_appen_data = self.cheater_appen_data[['_id', '_worker_id', code_name, '_ip', '_started_at', '_created_at', '_country', 'what_is_your_gender', 'what_is_your_age', 'have_you_read_and_understood_the_above_instructions', 'at_which_age_did_you_obtain_your_first_license_for_driving_a_car_or_motorcycle', 'what_is_your_primary_mode_of_transportation',
'on_average_how_often_did_you_drive_a_vehicle_in_the_last_12_months', 'about_how_many_kilometers_miles_did_you_drive_in_the_last_12_months', 'how_many_accidents_were_you_involved_in_when_driving_a_car_in_the_last_3_years_please_include_all_accidents_regardless_of_how_they_were_caused_how_slight_they_were_or_where_they_happened']]
daniels_headers = ['ID', 'worker_id', 'worker_code', 'ip_address', 'start_time', 'end_time', 'country', 'gender',
'age', 'read_instructions', 'license_age', 'primary_transport', 'avg_vehicle_time', 'avg_mileage', 'accidents']
self.cheater_appen_data.columns = daniels_headers
else:
pass
print('There are %s cheaters detected, giving %s unreliable results.' % (len(
self.cheater_appen_data['worker_code'].unique()), len(self.cheater_appen_data['worker_code'])))
def makeCSV(self):
appen_name = self.csvFile.split('.')[0]
self.unique_appen_data.to_csv(self.results_folder +
'filtered_responses/'+
appen_name+
'_unique.csv')
self.cheater_appen_data.to_csv(
self.results_folder + 'filtered_responses/' + appen_name + '_cheaters.csv')
return self.results_folder + 'filtered_responses/' + appen_name + '_unique.csv', appen_name + '_cheaters.csv'
if __name__ == "__main__":
results_folder = '/home/jim/HDDocuments/university/master/thesis/results/'
appenFile = 'f1669822.csv'
a = appen(appenFile, results_folder)
a.find_cheaters('daniel')
a.makeCSV()
``` |
{
"source": "jhoogstraat/mc-mod-getter",
"score": 2
} |
#### File: mc_mod_getter/utils/ApiHandler.py
```python
from __future__ import annotations
from pathlib import Path
from typing import Union
import requests as req
import logging
import hashlib
import os
class ApiHandler:
class NoAccess(Exception): pass
class Unknown(Exception): pass
_api_handler_hosts = {}
def __init_subclass__(cls, **kwargs: str) -> None:
"""Registers the the different ApiHandler subclasses to be used for __new__ """
super().__init_subclass__(**kwargs)
cls._api_handler_hosts[cls._host] = cls
def __new__(cls, host: str, **kwargs: str) -> Union[ModrinthApiHandler, CurseforgeApiHandler, Unknown]:
"""Creates the correct ApiHandler subclass given the host arg """
api_handler_host = cls._api_handler_hosts.get(host, None)
if api_handler_host:
return object.__new__(api_handler_host)
else:
# Host provided in the yaml is not supported
raise cls.Unknown(f'Mod host: {host} is not supported')
def __init__(self, *args: str, **kwargs: str) -> None:
self.version = str(kwargs.pop('version'))
self.loader = kwargs.pop('loader').lower()
self.mod_dir = kwargs.pop('mod_dir', str(Path.home() / 'Downloads'))
self.downloaded = self._get_downloaded_mods()
def __repr__(self) -> str:
return str(self.__dict__)
@classmethod
def _file_checksum(cls, file_path: str, host_hash: Union[list,str]) -> bool:
hash_algorithms = {
'modrinth': 'sha512',
'curseforge': 'md5'
}
# Handle Curseforge api's 0 or many provided hashes
if not host_hash:
logging.info(f' > [WARNING] : Cannot verify {file_path} was downloaded correctly')
return True
host_hash = [host_hash] if type(host_hash) is str else host_hash
with open(file_path, 'rb') as f:
file_hash = hashlib.new(hash_algorithms[cls._host])
while chunk := f.read(8192):
file_hash.update(chunk)
return any([file_hash.hexdigest() == h for h in host_hash])
@staticmethod
def _strip_non_alpha(string: str):
return ''.join([char for char in string if char.isalpha() or char == "'"])
def _get_downloaded_mods(self):
files = [p.name for p in Path(self.mod_dir).rglob('*.jar')]
downloaded = {}
for f in files:
downloaded[self._strip_non_alpha(f)] = f
return downloaded
def _get_mod_id(self) -> None:
raise NotImplementedError
def _filter_mod_version(self) -> None:
raise NotImplementedError
def download_mod(self, mod_name: str) -> None:
logging.info(f'Downloading mod {mod_name}')
mod_id = self._get_mod_id(mod_name)
if not mod_id:
return
mod = self._filter_mod_version(mod_id)
if not mod:
logging.info(f' > [WARNING] : Mod not found for version. skipping...')
return
mod_file_path = os.path.join(self.mod_dir, mod['filename'])
old_version = self.downloaded.get(self._strip_non_alpha(mod['filename']), None)
# Already have latest version
if Path(mod_file_path).is_file():
logging.info(f' > Skipping {mod_name}...already latest')
return
# If theres an update, delete the older mod version
elif old_version:
logging.info(f' > Updating {mod_name} & removing old version: {old_version}')
Path(os.path.join(self.mod_dir, old_version)).unlink()
logging.info(f' > {mod_name} ({mod_id}) File: {mod["filename"]}')
# Download the mod, if the file hashes dont match, redownload the mod and check again
while True:
with open(mod_file_path, 'wb') as f:
f.write(req.get(mod['url'], stream=True).content)
if self._file_checksum(mod_file_path, mod['hashes']):
break
class ModrinthApiHandler(ApiHandler):
_host = 'modrinth'
_host_api = 'https://api.modrinth.com/api/v1/mod'
def __init__(self, *args: str, **kwargs: str) -> None:
super().__init__(*args, **kwargs)
def __repr__(self) -> str:
return super().__repr__()
def _get_mod_id(self, mod_name: str) -> str:
last_seen = None
search_query = f'{self._host_api}?query={mod_name.lower()}'
for mod in req.get(search_query).json()['hits']:
last_seen = mod
if mod_name in mod['title'] and self.loader in mod['categories']:
return mod['mod_id'].split('-')[1]
else:
warning = f' > [WARNING] Skipping {mod_name}, '
if not last_seen or mod_name != last_seen['title']:
warning += f'check if mod exists on {self._host} or mod name spelling'
elif self.loader in str(mod['categories']).lower():
warning += f'No {self.loader} version found, only {",".join(last_seen["modLoaders"])}'
logging.info(warning)
return None
def _filter_mod_version(self, mod_id: str) -> dict:
# Send the id, and get back all version available for the mod
versions_query = f'{self._host_api}/{mod_id}/version'
mod_versions = req.get(versions_query).json()
# Get all versions that match the mc version found in yaml file
mod_versions = [v for v in mod_versions if self.version == v['game_versions'][-1]]
# Return first mod in mod_versions, it's the latest matching mc version in yaml
mod = mod_versions[0]['files'][0] if mod_versions else None
mod['hashes'] = mod['hashes']['sha512']
return mod
def download_mod(self, mod_name: str) -> None:
super().download_mod(mod_name)
class CurseforgeApiHandler(ApiHandler):
# NOTE: The Curseforge api is dogwater >:(
_host = 'curseforge'
_host_api = 'https://addons-ecs.forgesvc.net/api/v2/addon'
_user_agent = (
'user-agent=Mozilla/5.0 (Windows NT 6.1; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36'
)
_headers = {'User-Agent': _user_agent}
def __init__(self, *args: str, **kwargs: str) -> None:
super().__init__(*args, **kwargs)
def __repr__(self) -> str:
return super().__repr__()
def _get_mod_id(self, mod_name: str) -> str:
# Search only 1 word from mod name b/c api is dumb and uses OR conditions for each word
mod_query_name = mod_name.lower().split(' ')[0]
mod_query_name = self._strip_non_alpha(mod_query_name)
last_seen = None
search_query = (
f'{self._host_api}/search?gameId=432§ionId=6'
f'&searchFilter={mod_query_name}'
f'&gameVersion={self.version}'
)
for mod in req.get(search_query,headers=self._headers).json():
last_seen = mod
# Yet another curse issue, assume if no loader is specified for mod, its universal
if not mod.get('modLoaders'):
mod['modLoaders'] = self.loader
if mod_name == mod['name'] and self.loader in str(mod['modLoaders']).lower():
return mod['id']
else:
warning = f' > [WARNING] Skipping {mod_name}, '
if not last_seen or mod_name != last_seen['name']:
warning += f'check if mod exists on {self._host} or mod name spelling'
elif self.loader in str(mod['modLoaders']).lower():
warning += f'No {self.loader} version found, only {",".join(last_seen["modLoaders"])}'
logging.info(warning)
return None
def _filter_mod_version(self, mod_id: str) -> dict:
logging.info(f' > [INFO] : Filtering mod versions {self.version}/{self.loader.capitalize()} for mod {mod_id}')
try:
search_query = f'{self._host_api}/{mod_id}'
logging.info(f' > [INFO] : Requesting mod version: {search_query}')
mod_versions = req.get(search_query,headers=self._headers).json()['latestFiles']
mod_versions = [v for v in mod_versions if any(self.version in x for x in v['gameVersion']) and self.loader.capitalize() in v['gameVersion']]
except:
# TODO: If searching on curseforge version not in Latest files
search_query = f'{self._host_api}/{mod_id}'
mod_versions = req.get(search_query,headers=self._headers).json()
else:
# Version not found. Skipping.
if not mod_versions:
return
logging.info(f' > [INFO] : Found mod for version')
# {curseapi key : renamed key}
mod_details = {
'fileName':'filename',
'downloadUrl': 'url',
'hashes': 'hashes'
}
# Modify keys to to make download method generic
mod = {mod_details[key]: value for key, value in mod_versions[0].items() if key in mod_details}
mod['hashes'] = [h['value'] for h in mod['hashes']]
return mod
def download_mod(self, mod_name: str) -> None:
super().download_mod(mod_name)
``` |
{
"source": "jhoogstraat/tao-runner",
"score": 2
} |
#### File: tao-runner/tao-runner/context.py
```python
from pathlib import Path
from typing import Any, Dict, Optional
class TaoConfig:
def __init__(self, config: Dict[str, Any]):
self.gpus = str(config['gpus'])
self.gpu_indices = ','.join([str(i) for i in config['gpu_indices']])
class ExperimentConfig:
def __init__(self, config: Dict[str, Any]):
self.head: str = config['head']
self.backbone: str = config['backbone']
self.repository: str = config['repository']
self.model_key: str = config['model_key']
self.dataset: str = config['dataset']
self.export_model: str = config['export_model']
self.export_type: str = config['export_type']
class ExperimentPaths:
def __init__(self, base: Path, project: str, experiment: str, config: ExperimentConfig, pretrained_model_filename: Optional[str] = None):
# Base dirs
self.project_dir = base.joinpath('projects', project)
data_dir = self.project_dir.joinpath('data')
models_dir = self.project_dir.joinpath('models')
specs_dir = self.project_dir.joinpath('specs')
# Specialised dirs
self.model_dir = models_dir.joinpath(experiment)
self.specs_dir = specs_dir.joinpath(experiment)
self.dataset_dir = data_dir.joinpath(config.dataset)
self.subset_full_dir = self.dataset_dir.joinpath("full")
self.subset_train_dir = self.dataset_dir.joinpath("train")
self.subset_val_dir = self.dataset_dir.joinpath("val")
self.subset_tfrecords_dir = self.dataset_dir.joinpath(
"tfrecords_" + experiment)
self.pretrained_model_dir = base.joinpath(
'repositories', config.repository, config.repository + '_v' + config.backbone)
# Files
self.convert_spec_file = self.specs_dir.joinpath('convert.txt')
self.train_spec_file = self.specs_dir.joinpath('train.txt')
self.compiled_convert_spec_file = self.subset_tfrecords_dir.joinpath(
self.convert_spec_file.name)
self.compiled_train_spec_file = self.model_dir.joinpath(
self.convert_spec_file.name)
self.pretrained_model_file = self.pretrained_model_dir.joinpath(pretrained_model_filename) if pretrained_model_filename else next(
self.pretrained_model_dir.glob('*.hdf5'), None)
class ExperimentContext:
def __init__(self, project: str, experiment: str, config: Dict[str, Any], tao: Dict[str, Any]):
self.project = project
self.experiment = experiment
self.tao = TaoConfig(tao)
self.config = ExperimentConfig(config)
self.local_paths = ExperimentPaths(
project=project, experiment=experiment, config=self.config, base=Path.cwd())
self.docker_paths = ExperimentPaths(
project=project, experiment=experiment, config=self.config, base=Path('/workspace'), pretrained_model_filename=self.local_paths.pretrained_model_file.name)
```
#### File: tao-runner/tasks/convert.py
```python
import subprocess
from pathlib import Path
from shutil import rmtree
from ..context import ExperimentContext
# Datumaro does not export label files for 'background' images, which do not contain any object to be detected.
# TAO expects kitti label files to have 15 columns, but kitti has 16 originally (https://github.com/NVIDIA/DIGITS/issues/992)
def check_kitti(kitti_dir: Path):
assert kitti_dir.exists(), f"The directory {kitti_dir} does not exist"
images_dir = kitti_dir.joinpath("image_2")
labels_dir = kitti_dir.joinpath("label_2")
images = list(
images_dir.glob('*.jpg')) + list(images_dir.glob('*.png')) + list(images_dir.glob("*.jpeg"))
labels = list(labels_dir.glob('*.txt'))
assert len(images) > 0, f"No samples found in dataset {kitti_dir}"
if len(labels) <= len(images):
missing_files = [file.stem for file in labels if file not in images]
assert f"Found {len(labels)} label files, but only {len(images)} images. Missing:\n" + \
'\n'.join(missing_files)
print(f"Images: {len(images)}, labels: {len(labels)}")
for image in images:
label_file = labels_dir.joinpath(image.with_suffix('.txt').name)
if not label_file.exists():
with open(label_file, 'w') as r:
r.write('')
print(f"Created empty label file {label_file.name}")
else:
with open(label_file, 'r') as r:
for line in r.readlines():
row = line.strip().split(" ")
if len(row) > 15:
print(
f"The file {label_file.name} has {len(row)} fields. Saving first 15")
new_label = str.join(" ", row[:15])
with open(label_file, 'w') as w:
w.write(new_label)
print(f"Checked labels for {len(images)} images. All good.")
def run(context: ExperimentContext, overwrite: bool = False, **kwargs):
assert context.local_paths.convert_spec_file.is_file(
), f"Converter spec file does not exist at location '{context.local_paths.convert_spec_file}'"
# TODO: We currently don't know which subset (full, train, val, ...) is being converted, so we cannot check if the dataset is ok.
# check_kitti(dataset)
if context.local_paths.subset_tfrecords_dir.exists():
assert overwrite, f"The directory '{context.local_paths.subset_tfrecords_dir.name}' already exists at 'data/'. Use --overwrite to replace the existing data."
rmtree(context.local_paths.subset_tfrecords_dir)
context.local_paths.subset_tfrecords_dir.mkdir()
with open(context.local_paths.convert_spec_file, 'r') as infile, open(context.local_paths.compiled_convert_spec_file, 'w') as outfile:
spec = infile.read()
spec = spec.replace("$project", context.project)
spec = spec.replace(
"$dataset", context.docker_paths.dataset_dir.as_posix())
spec = spec.replace(
"$tfrecords", context.docker_paths.subset_tfrecords_dir.as_posix())
spec = spec.replace("$pretrained_model",
context.docker_paths.pretrained_model_file.as_posix())
outfile.write(spec)
print("Converting dataset to TFRecords...\n")
completed = subprocess.run(["tao", context.config.head, "dataset_convert",
"-d", context.docker_paths.compiled_convert_spec_file.as_posix(),
"-o", context.docker_paths.subset_tfrecords_dir.joinpath("tfrecord").as_posix()], check=False, text=True, capture_output=True)
print("STDOUT:", completed.stdout)
print("STDERR:", completed.stderr)
```
#### File: tao-runner/tasks/train.py
```python
import subprocess
from shutil import rmtree
from ..context import ExperimentContext
def run(context: ExperimentContext, overwrite: bool = False, stop: bool = False, **kwargs):
# Checks to make sure all files are present and we don't override anything
assert context.local_paths.train_spec_file.is_file(
), f"Spec file is not present at location '{context.local_paths.train_spec_file}'"
if not overwrite:
assert not context.local_paths.model_dir.exists(
), f"The model directory '{context.local_paths.model_dir.name}' already exists."
elif context.local_paths.model_dir.exists():
rmtree(context.local_paths.model_dir)
# Prepare the model directory
context.local_paths.model_dir.mkdir(exist_ok=True)
with open(context.local_paths.train_spec_file, 'r') as infile, open(context.local_paths.model_dir.joinpath(context.local_paths.train_spec_file.name), 'w') as outfile:
spec = infile.read()
spec = spec.replace("$project", context.project)
spec = spec.replace(
"$dataset", context.docker_paths.dataset_dir.as_posix())
spec = spec.replace(
"$tfrecords", context.docker_paths.subset_tfrecords_dir.as_posix())
spec = spec.replace("$pretrained_model",
context.docker_paths.pretrained_model_file.as_posix())
outfile.write(spec)
# Stop running training, if requested
if stop:
print("Stopping running tao tasks...")
subprocess.run(["tao", "stop", "--all"], check=True,
text=True, capture_output=True)
print("Starting training...")
print(
f"Using pretrained model: {context.config.repository}/{context.docker_paths.pretrained_model_file.name}")
log_file = context.local_paths.model_dir.joinpath("train.log").as_posix()
print(f"See {log_file} for training progress")
completed = subprocess.run(["tao", context.config.head, "train",
"--gpus", context.tao.gpus,
"--gpu_index", context.tao.gpu_indices,
"-e", context.docker_paths.model_dir.joinpath(
context.local_paths.train_spec_file.name).as_posix(),
"-r", context.docker_paths.model_dir.as_posix(),
"-k", context.config.model_key,
"--log_file", context.docker_paths.model_dir.joinpath("train.log").as_posix()], check=False, text=True, capture_output=True)
print("STDOUT:", completed.stdout)
print("STDERR:", completed.stderr)
``` |
{
"source": "jhoolmans/shifter",
"score": 2
} |
#### File: mgear/shifter/relative_guide_placement.py
```python
import json
import math
# dcc
import maya.cmds as mc
import pymel.core as pm
import maya.OpenMaya as om
# mgear
from mgear.core import utils
from mgear.core import vector
from mgear.core import transform
from mgear.core import meshNavigation
# constants -------------------------------------------------------------------
# Designate the root of the hierarchy to crawl
GUIDE_ROOT = "guide"
# Nodes to avoid checking the hierarchy
DEFAULT_SKIP_CRAWL_NODES = ("controllers_org",
"spineUI_C0_root",
"faceUI_C0_root",
"legUI_R0_root",
"armUI_L0_root",
"legUI_L0_root",
"armUI_R0_root")
# nodes that will not have their positions updated
DEFAULT_SKIP_PLACEMENT_NODES = ("controllers_org",
"global_C0_root",
"spineUI_C0_root",
"faceUI_C0_root",
"legUI_R0_root",
"armUI_L0_root",
"legUI_L0_root",
"armUI_R0_root")
try:
SKIP_CRAWL_NODES
SKIP_PLACEMENT_NODES
except NameError:
SKIP_CRAWL_NODES = list(DEFAULT_SKIP_CRAWL_NODES)
SKIP_PLACEMENT_NODES = list(DEFAULT_SKIP_PLACEMENT_NODES)
# skip the node if it even contains the characters in the list
# eg SKIP_CONTAINS = ["hair"]
SKIP_CONTAINS = []
# Avoid nodes of a specified suffix
SKIP_SUFFIX = ["sizeRef", "crv", "crvRef", "blade"]
# Types of nodes to avoid
SKIP_NODETYPES = ["aimConstraint", "pointConstraint", "parentConstraint"]
UNIVERSAL_MESH_NAME = "skin_geo_setup"
# general functions -----------------------------------------------------------
def crawlHierarchy(parentNode,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=None):
"""recursive function to crawl a hierarchy of nodes to return decendents
Args:
parentNode (str): node to query
ordered_hierarchy (str): list to continuesly pass itself
skip_crawl_nodes (list): nodes to skip crawl
"""
if not skip_strings:
skip_strings = []
for node in mc.listRelatives(parentNode, type="transform") or []:
if node in skip_crawl_nodes or node in ordered_hierarchy:
continue
if node.endswith(tuple(SKIP_SUFFIX)):
continue
if mc.objectType(node) in SKIP_NODETYPES:
continue
if [True for skip_str in skip_strings
if skip_str.lower() in node.lower()]:
continue
ordered_hierarchy.append(node)
crawlHierarchy(node,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=skip_strings)
def getPostionFromLoop(vertList):
"""Get the center position from the list of edge ids provided
Args:
vertList (list): list of edge ids
Returns:
list: of translate XYZ, world space
"""
bb = mc.exactWorldBoundingBox(vertList)
pos = ((bb[0] + bb[3]) / 2, (bb[1] + bb[4]) / 2, (bb[2] + bb[5]) / 2)
return pos
def getVertMatrix(closestVert):
"""create a matrix from the closestVert and the normals of the surrounding
faces for later comparison
Args:
node (str): guide node to query
closestVert (str): closest vert to guide
Returns:
list: of matrices
"""
closestVert = pm.PyNode(closestVert)
faces = closestVert.connectedFaces()
normalVector = faces.getNormal("world")
pm.select(faces)
faces_str = mc.ls(sl=True, fl=True)
pm.select(cl=True)
face_pos = pm.dt.Vector(getPostionFromLoop(faces_str))
normal_rot = getOrient([normalVector.x, normalVector.y, normalVector.z],
[0, 1, 0],
ro=0)
orig_ref_matrix = pm.dt.TransformationMatrix()
orig_ref_matrix.setTranslation(face_pos, pm.dt.Space.kWorld)
orig_ref_matrix.setRotation(normal_rot)
return orig_ref_matrix
def getOrient(normal, tangent, ro=0):
"""convert normal direction into euler rotations
Args:
normal (list): of nomel values
ro (int, optional): rotate order
Returns:
list: of euler rotations
"""
kRotateOrders = [om.MEulerRotation.kXYZ, om.MEulerRotation.kYZX,
om.MEulerRotation.kZXY, om.MEulerRotation.kXZY,
om.MEulerRotation.kYXZ, om.MEulerRotation.kZYX, ]
cross = [normal[1] * tangent[2] - normal[2] * tangent[1],
normal[2] * tangent[0] - normal[0] * tangent[2],
normal[0] * tangent[1] - normal[1] * tangent[0]]
tMatrix = normal + [0] + tangent + [0] + cross + [0, 0, 0, 0, 1]
mMatrix = om.MMatrix()
om.MScriptUtil.createMatrixFromList(tMatrix, mMatrix)
tmMatrix = om.MTransformationMatrix(mMatrix)
rotate = tmMatrix.eulerRotation().reorder(kRotateOrders[ro])
RAD_to_DEG = (180 / math.pi)
return [rotate[0] * RAD_to_DEG,
rotate[1] * RAD_to_DEG,
rotate[2] * RAD_to_DEG]
def getRepositionMatrix(node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix,
closestVerts):
"""Get the delta matrix from the original position and multiply by the
new vert position. Add the rotations from the face normals.
Args:
node_matrix (pm.dt.Matrix): matrix of the guide
orig_ref_matrix (pm.dt.Matrix): matrix from the original vert position
closestVerts (str): name of the closest vert
Returns:
mmatrix: matrix of the new offset position, worldSpace
"""
current_vert = pm.PyNode(closestVerts[0])
mr_current_vert = pm.PyNode(closestVerts[1])
current_length = vector.getDistance(current_vert.getPosition("world"),
mr_current_vert.getPosition("world"))
orig_length = vector.getDistance(orig_ref_matrix.translate,
mr_orig_ref_matrix.translate)
orig_center = vector.linearlyInterpolate(orig_ref_matrix.translate,
mr_orig_ref_matrix.translate)
orig_center_matrix = pm.dt.Matrix()
# orig_center_matrix.setTranslation(orig_center, pm.dt.Space.kWorld)
orig_center_matrix = transform.setMatrixPosition(
orig_center_matrix, orig_center)
current_center = vector.linearlyInterpolate(
current_vert.getPosition("world"),
mr_current_vert.getPosition("world"))
length_percentage = 1
if current_length != 0 or orig_length != 0:
length_percentage = current_length / orig_length
# refPosition_matrix = pm.dt.TransformationMatrix()
refPosition_matrix = pm.dt.Matrix()
# refPosition_matrix.setTranslation(current_center, pm.dt.Space.kWorld)
refPosition_matrix = transform.setMatrixPosition(
refPosition_matrix, current_center)
deltaMatrix = node_matrix * orig_center_matrix.inverse()
deltaMatrix = deltaMatrix * length_percentage
deltaMatrix = transform.setMatrixScale(deltaMatrix)
refPosition_matrix = deltaMatrix * refPosition_matrix
return refPosition_matrix
def getRepositionMatrixSingleRef(node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix,
closestVerts):
"""Get the delta matrix from the original position and multiply by the
new vert position. Add the rotations from the face normals.
Args:
node_matrix (pm.dt.Matrix): matrix of the guide
orig_ref_matrix (pm.dt.Matrix): matrix from the original vert position
closestVerts (str): name of the closest vert
Returns:
mmatrix: matrix of the new offset position, worldSpace
"""
closestVerts = pm.PyNode(closestVerts[0])
faces = closestVerts.connectedFaces()
normalVector = faces.getNormal("world")
pm.select(faces)
faces_str = mc.ls(sl=True, fl=True)
pm.select(cl=True)
face_pos = pm.dt.Vector(getPostionFromLoop(faces_str))
normal_rot = getOrient([normalVector.x, normalVector.y, normalVector.z],
[0, 1, 0],
ro=0)
refPosition_matrix = pm.dt.TransformationMatrix()
refPosition_matrix.setTranslation(face_pos, pm.dt.Space.kWorld)
refPosition_matrix.setRotation(normal_rot)
deltaMatrix = node_matrix * orig_ref_matrix.inverse()
refPosition_matrix = deltaMatrix * refPosition_matrix
return refPosition_matrix
@utils.viewport_off
@utils.one_undo
def getGuideRelativeDictionaryLegacy(mesh, guideOrder):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
relativeGuide_dict = {}
mesh = pm.PyNode(mesh)
for guide in guideOrder:
guide = pm.PyNode(guide)
# slow function A
clst_vert = meshNavigation.getClosestVertexFromTransform(mesh, guide)
vertexIds = [clst_vert.name()]
# slow function B
orig_ref_matrix = getVertMatrix(clst_vert.name())
# --------------------------------------------------------------------
a_mat = guide.getMatrix(worldSpace=True)
mm = ((orig_ref_matrix - a_mat) * -1) + a_mat
pos = mm[3][:3]
mr_vert = meshNavigation.getClosestVertexFromTransform(mesh, pos)
mr_orig_ref_matrix = getVertMatrix(mr_vert.name())
vertexIds.append(mr_vert.name())
node_matrix = guide.getMatrix(worldSpace=True)
relativeGuide_dict[guide.name()] = [vertexIds,
node_matrix.get(),
orig_ref_matrix.get(),
mr_orig_ref_matrix.get()]
mc.select(cl=True)
return relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def yieldGuideRelativeDictionary(mesh, guideOrder, relativeGuide_dict):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
for guide in guideOrder:
guide = pm.PyNode(guide)
# slow function A
clst_vert = meshNavigation.getClosestVertexFromTransform(mesh, guide)
vertexIds = [clst_vert.name()]
# slow function B
orig_ref_matrix = getVertMatrix(clst_vert.name())
# --------------------------------------------------------------------
a_mat = guide.getMatrix(worldSpace=True)
mm = ((orig_ref_matrix - a_mat) * -1) + a_mat
pos = mm[3][:3]
mr_vert = meshNavigation.getClosestVertexFromTransform(mesh, pos)
mr_orig_ref_matrix = getVertMatrix(mr_vert.name())
vertexIds.append(mr_vert.name())
node_matrix = guide.getMatrix(worldSpace=True)
relativeGuide_dict[guide.name()] = [vertexIds,
node_matrix.get(),
orig_ref_matrix.get(),
mr_orig_ref_matrix.get()]
yield relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def getGuideRelativeDictionary(mesh, guideOrder):
"""create a dictionary of guide:[[shape.vtx[int]], relativeMatrix]
Args:
mesh (string): name of the mesh
guideOrder (list): the order to query the guide hierarchy
Returns:
dictionary: create a dictionary of guide:[[edgeIDs], relativeMatrix]
"""
relativeGuide_dict = {}
mesh = pm.PyNode(mesh)
for result in yieldGuideRelativeDictionary(
mesh, guideOrder, relativeGuide_dict):
pass
return relativeGuide_dict
@utils.viewport_off
@utils.one_undo
def updateGuidePlacementLegacy(guideOrder, guideDictionary):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
(vertexIds,
node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix) = guideDictionary[guide]
guideNode = pm.PyNode(guide)
repoMatrix = getRepositionMatrix(pm.dt.Matrix(node_matrix),
pm.dt.Matrix(orig_ref_matrix),
pm.dt.Matrix(mr_orig_ref_matrix),
vertexIds)
guideNode.setMatrix(repoMatrix, worldSpace=True, preserve=True)
@utils.viewport_off
@utils.one_undo
def yieldUpdateGuidePlacement(guideOrder, guideDictionary):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
(vertexIds,
node_matrix,
orig_ref_matrix,
mr_orig_ref_matrix) = guideDictionary[guide]
repoMatrix = getRepositionMatrix(pm.dt.Matrix(node_matrix),
pm.dt.Matrix(orig_ref_matrix),
pm.dt.Matrix(mr_orig_ref_matrix),
vertexIds)
yield repoMatrix
@utils.viewport_off
@utils.one_undo
def updateGuidePlacement(guideOrder, guideDictionary, reset_scale=False):
"""update the guides based on new universal mesh, in the provided order
Args:
guideOrder (list): of the hierarchy to crawl
guideDictionary (dictionary): dict of the guide:edge, matrix position
"""
updateGen = yieldUpdateGuidePlacement(guideOrder, guideDictionary)
for guide in guideOrder:
if guide not in guideDictionary or not mc.objExists(guide):
continue
elif guide in SKIP_PLACEMENT_NODES:
continue
guideNode = pm.PyNode(guide)
scl = guideNode.getScale()
repoMatrix = updateGen.next()
guideNode.setMatrix(repoMatrix, worldSpace=True, preserve=True)
if reset_scale:
guideNode.setScale([1, 1, 1])
else:
guideNode.setScale(scl)
yield True
# ==============================================================================
# Data export, still testing
# ==============================================================================
def _importData(filepath):
try:
with open(filepath, 'r') as f:
data = json.load(f)
return data
except Exception as e:
print e
def _exportData(data, filepath):
try:
with open(filepath, 'w') as f:
json.dump(data, f, sort_keys=False, indent=4)
except Exception as e:
print e
def exportGuidePlacement(filepath=None,
reference_mesh=UNIVERSAL_MESH_NAME,
root_node=GUIDE_ROOT,
skip_crawl_nodes=SKIP_CRAWL_NODES,
skip_strings=[]):
"""Export the position of the supplied root node to a file.
Args:
filepath (str, optional): path to export too
reference_mesh (str, optional): mesh to query verts
root_node (str, optional): name of node to query against
skip_crawl_nodes (list, optional): of nodes not to crawl
skip_strings (list, optional): strings to check to skip node
Returns:
list: dict, list, str
"""
if filepath is None:
filepath = pm.fileDialog2(fileMode=0,
startingDirectory="/",
fileFilter="Export position(*.json)")
if filepath:
filepath = filepath[0]
(relativeGuide_dict,
ordered_hierarchy) = recordInitialGuidePlacement(
reference_mesh=reference_mesh,
root_node=root_node,
skip_crawl_nodes=skip_crawl_nodes,
skip_strings=skip_strings)
data = {}
data["relativeGuide_dict"] = relativeGuide_dict
data["ordered_hierarchy"] = ordered_hierarchy
_exportData(data, filepath)
print "Guide position exported: {}".format(filepath)
return relativeGuide_dict, ordered_hierarchy, filepath
@utils.one_undo
def importGuidePlacement(filepath):
"""import the position from the provided file
Args:
filepath (str): file to the json
referenceMesh (str, optional): name of mesh to compare against
"""
data = _importData(filepath)
updateGuidePlacement(data["ordered_hierarchy"], data["relativeGuide_dict"])
return data["relativeGuide_dict"], data["ordered_hierarchy"]
def recordInitialGuidePlacement(reference_mesh=UNIVERSAL_MESH_NAME,
root_node=GUIDE_ROOT,
skip_crawl_nodes=SKIP_CRAWL_NODES,
skip_strings=None):
"""convenience function for retrieving a dict of position
Args:
reference_mesh (str, optional): the mesh to query against
root_node (str, optional): root node to crawl
skip_crawl_nodes (list, optional): of nodes to avoid
skip_strings (list, optional): of strings to check if skip
Returns:
dict, list: dict of positions, list of ordered nodes
"""
ordered_hierarchy = []
relativeGuide_dict = {}
crawlHierarchy(root_node,
ordered_hierarchy,
skip_crawl_nodes,
skip_strings=skip_strings)
relativeGuide_dict = getGuideRelativeDictionary(reference_mesh,
ordered_hierarchy)
return relativeGuide_dict, ordered_hierarchy
``` |
{
"source": "jhoonb/jornal",
"score": 2
} |
#### File: jornal/jornal/run.py
```python
import os
import bottle
from app import app
def main():
env = os.getenv("BOTTLE_ENV", "production")
host = os.getenv("HOST", "0.0.0.0")
port = os.getenv("PORT", 8080)
if env == "production":
bottle.debug(False)
app.run(host=host, port=port, reloader=False)
else:
bottle.debug(True)
app.run(host=host, port=port, reloader=True)
if __name__ == "__main__":
main()
```
#### File: jornal/tests/test_utils.py
```python
from jornal import utils
def test_json_request_valido():
assert True == utils.json_request_valido(
("k", "v", "b"), {"k": "", "v": "", "b": ""}
)
assert False == utils.json_request_valido(
("k", "b"), {"k": "", "v": "", "b": ""}
)
assert True == utils.json_request_valido(("k",), {"k": ""})
assert True == utils.json_request_valido(
("k", "v", "b"), {"b": "", "v": "", "k": ""}
)
assert False == utils.json_request_valido(
(), {"b": "", "v": "", "k": ""}
)
assert False == utils.json_request_valido(("k", "v", "b"), {})
def test__gerar_titulo():
t = utils._gerar_titulo('TITULO DE TESTE')
assert t == 'titulo_de_teste'
t = utils._gerar_titulo('TITULO,DE, TESTE')
assert t == 'titulo_de_teste'
t = utils._gerar_titulo(' TITULO,DE, TESTE ')
assert t == 'titulo_de_teste'
``` |
{
"source": "jhoonb/pygraph",
"score": 4
} |
#### File: pygraph/src/digraph.py
```python
from graph import Graph
class Digraph(Graph):
def __init__(self, graph_id='Digraph G', weighted=False):
Graph.__init__(self, graph_id, weighted)
self._typegraph = 'digraph'
def is_adjacent(self, node_x, node_y):
'''
@brief check if node_x is adjacent to node_y
@param node_x (str)
@param node_y (str)
@return (bool)
'''
#only str
node_x = str(node_x)
node_y = str(node_y)
#check if nodes no exist in the graph
if not self.exist_node([node_x, node_y], 'all'):
raise Exception(_error_[3])
if node_x in self._node[node_y][1]:
return True
return False
def neighbourhood(self, node, neighbourhood_open=True):
'''
@brief neighbourhood of the node in the graph
@param node (str)
@param neighbourhood_open (bool): default (True)
@return (list)
'''
# only str
node = str(node)
# check if node not exist in the graph
if not self.exist_node(node):
raise Exception(_error_[3])
output = [i for i in self._node if self.is_adjacent(i, node)]
if neighbourhood_open:
return output
else:
output.insert(0, node)
return output
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.